aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c390
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c23
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c22
-rw-r--r--drivers/i2c/busses/i2c-omap.c11
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c3
-rw-r--r--drivers/pci/ats.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c30
-rw-r--r--drivers/pci/iov.c7
-rw-r--r--drivers/pci/pci.c9
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c5
-rw-r--r--drivers/scsi/fcoe/fcoe.c116
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c25
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c42
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h55
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h8
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h16
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c243
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c11
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c1084
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--include/scsi/libfcoe.h3
-rw-r--r--kernel/events/core.c8
-rw-r--r--kernel/sched_fair.c14
41 files changed, 1943 insertions, 411 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index b9db108f01c8..855afd479156 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3101,6 +3101,7 @@ F: include/linux/hid*
3101 3101
3102HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS 3102HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS
3103M: Thomas Gleixner <tglx@linutronix.de> 3103M: Thomas Gleixner <tglx@linutronix.de>
3104T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
3104S: Maintained 3105S: Maintained
3105F: Documentation/timers/ 3106F: Documentation/timers/
3106F: kernel/hrtimer.c 3107F: kernel/hrtimer.c
@@ -3610,7 +3611,7 @@ F: net/irda/
3610IRQ SUBSYSTEM 3611IRQ SUBSYSTEM
3611M: Thomas Gleixner <tglx@linutronix.de> 3612M: Thomas Gleixner <tglx@linutronix.de>
3612S: Maintained 3613S: Maintained
3613T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core 3614T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
3614F: kernel/irq/ 3615F: kernel/irq/
3615 3616
3616ISAPNP 3617ISAPNP
@@ -4098,7 +4099,7 @@ F: drivers/hwmon/lm90.c
4098LOCKDEP AND LOCKSTAT 4099LOCKDEP AND LOCKSTAT
4099M: Peter Zijlstra <peterz@infradead.org> 4100M: Peter Zijlstra <peterz@infradead.org>
4100M: Ingo Molnar <mingo@redhat.com> 4101M: Ingo Molnar <mingo@redhat.com>
4101T: git git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git 4102T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/locking
4102S: Maintained 4103S: Maintained
4103F: Documentation/lockdep*.txt 4104F: Documentation/lockdep*.txt
4104F: Documentation/lockstat.txt 4105F: Documentation/lockstat.txt
@@ -5086,6 +5087,7 @@ M: Peter Zijlstra <a.p.zijlstra@chello.nl>
5086M: Paul Mackerras <paulus@samba.org> 5087M: Paul Mackerras <paulus@samba.org>
5087M: Ingo Molnar <mingo@elte.hu> 5088M: Ingo Molnar <mingo@elte.hu>
5088M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 5089M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
5090T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
5089S: Supported 5091S: Supported
5090F: kernel/events/* 5092F: kernel/events/*
5091F: include/linux/perf_event.h 5093F: include/linux/perf_event.h
@@ -5165,6 +5167,7 @@ F: drivers/scsi/pm8001/
5165 5167
5166POSIX CLOCKS and TIMERS 5168POSIX CLOCKS and TIMERS
5167M: Thomas Gleixner <tglx@linutronix.de> 5169M: Thomas Gleixner <tglx@linutronix.de>
5170T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
5168S: Supported 5171S: Supported
5169F: fs/timerfd.c 5172F: fs/timerfd.c
5170F: include/linux/timer* 5173F: include/linux/timer*
@@ -5680,6 +5683,7 @@ F: drivers/dma/dw_dmac.c
5680TIMEKEEPING, NTP 5683TIMEKEEPING, NTP
5681M: John Stultz <johnstul@us.ibm.com> 5684M: John Stultz <johnstul@us.ibm.com>
5682M: Thomas Gleixner <tglx@linutronix.de> 5685M: Thomas Gleixner <tglx@linutronix.de>
5686T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
5683S: Supported 5687S: Supported
5684F: include/linux/clocksource.h 5688F: include/linux/clocksource.h
5685F: include/linux/time.h 5689F: include/linux/time.h
@@ -5704,6 +5708,7 @@ F: drivers/watchdog/sc1200wdt.c
5704SCHEDULER 5708SCHEDULER
5705M: Ingo Molnar <mingo@elte.hu> 5709M: Ingo Molnar <mingo@elte.hu>
5706M: Peter Zijlstra <peterz@infradead.org> 5710M: Peter Zijlstra <peterz@infradead.org>
5711T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
5707S: Maintained 5712S: Maintained
5708F: kernel/sched* 5713F: kernel/sched*
5709F: include/linux/sched.h 5714F: include/linux/sched.h
@@ -6631,7 +6636,7 @@ TRACING
6631M: Steven Rostedt <rostedt@goodmis.org> 6636M: Steven Rostedt <rostedt@goodmis.org>
6632M: Frederic Weisbecker <fweisbec@gmail.com> 6637M: Frederic Weisbecker <fweisbec@gmail.com>
6633M: Ingo Molnar <mingo@redhat.com> 6638M: Ingo Molnar <mingo@redhat.com>
6634T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git perf/core 6639T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
6635S: Maintained 6640S: Maintained
6636F: Documentation/trace/ftrace.txt 6641F: Documentation/trace/ftrace.txt
6637F: arch/*/*/*/ftrace.h 6642F: arch/*/*/*/ftrace.h
@@ -7381,7 +7386,7 @@ M: Thomas Gleixner <tglx@linutronix.de>
7381M: Ingo Molnar <mingo@redhat.com> 7386M: Ingo Molnar <mingo@redhat.com>
7382M: "H. Peter Anvin" <hpa@zytor.com> 7387M: "H. Peter Anvin" <hpa@zytor.com>
7383M: x86@kernel.org 7388M: x86@kernel.org
7384T: git git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git 7389T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
7385S: Maintained 7390S: Maintained
7386F: Documentation/x86/ 7391F: Documentation/x86/
7387F: arch/x86/ 7392F: arch/x86/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 8cca91a93bde..dc279706ca70 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -390,6 +390,11 @@ extern int vmw_context_check(struct vmw_private *dev_priv,
390 struct ttm_object_file *tfile, 390 struct ttm_object_file *tfile,
391 int id, 391 int id,
392 struct vmw_resource **p_res); 392 struct vmw_resource **p_res);
393extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
394 struct ttm_object_file *tfile,
395 uint32_t handle,
396 struct vmw_surface **out_surf,
397 struct vmw_dma_buffer **out_buf);
393extern void vmw_surface_res_free(struct vmw_resource *res); 398extern void vmw_surface_res_free(struct vmw_resource *res);
394extern int vmw_surface_init(struct vmw_private *dev_priv, 399extern int vmw_surface_init(struct vmw_private *dev_priv,
395 struct vmw_surface *srf, 400 struct vmw_surface *srf,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 03bbc2a6f9a7..a0c2f12b1e1b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -33,6 +33,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
33{ 33{
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t fifo_min, hwversion; 35 uint32_t fifo_min, hwversion;
36 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
36 37
37 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) 38 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
38 return false; 39 return false;
@@ -41,7 +42,12 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
41 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) 42 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
42 return false; 43 return false;
43 44
44 hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION); 45 hwversion = ioread32(fifo_mem +
46 ((fifo->capabilities &
47 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
48 SVGA_FIFO_3D_HWVERSION_REVISED :
49 SVGA_FIFO_3D_HWVERSION));
50
45 if (hwversion == 0) 51 if (hwversion == 0)
46 return false; 52 return false;
47 53
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 5ff561d4e0b4..66917c6c3813 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -58,8 +58,14 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
58 case DRM_VMW_PARAM_FIFO_HW_VERSION: 58 case DRM_VMW_PARAM_FIFO_HW_VERSION:
59 { 59 {
60 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 60 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
61 61 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
62 param->value = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION); 62
63 param->value =
64 ioread32(fifo_mem +
65 ((fifo->capabilities &
66 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
67 SVGA_FIFO_3D_HWVERSION_REVISED :
68 SVGA_FIFO_3D_HWVERSION));
63 break; 69 break;
64 } 70 }
65 default: 71 default:
@@ -166,13 +172,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
166 ret = -EINVAL; 172 ret = -EINVAL;
167 goto out_no_fb; 173 goto out_no_fb;
168 } 174 }
169
170 vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj)); 175 vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
171 if (!vfb->dmabuf) {
172 DRM_ERROR("Framebuffer not dmabuf backed.\n");
173 ret = -EINVAL;
174 goto out_no_fb;
175 }
176 176
177 ret = ttm_read_lock(&vmaster->lock, true); 177 ret = ttm_read_lock(&vmaster->lock, true);
178 if (unlikely(ret != 0)) 178 if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 1748a7142aca..c4bdef3062c7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -31,6 +31,44 @@
31/* Might need a hrtimer here? */ 31/* Might need a hrtimer here? */
32#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) 32#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
33 33
34
35struct vmw_clip_rect {
36 int x1, x2, y1, y2;
37};
38
39/**
40 * Clip @num_rects number of @rects against @clip storing the
41 * results in @out_rects and the number of passed rects in @out_num.
42 */
43void vmw_clip_cliprects(struct drm_clip_rect *rects,
44 int num_rects,
45 struct vmw_clip_rect clip,
46 SVGASignedRect *out_rects,
47 int *out_num)
48{
49 int i, k;
50
51 for (i = 0, k = 0; i < num_rects; i++) {
52 int x1 = max_t(int, clip.x1, rects[i].x1);
53 int y1 = max_t(int, clip.y1, rects[i].y1);
54 int x2 = min_t(int, clip.x2, rects[i].x2);
55 int y2 = min_t(int, clip.y2, rects[i].y2);
56
57 if (x1 >= x2)
58 continue;
59 if (y1 >= y2)
60 continue;
61
62 out_rects[k].left = x1;
63 out_rects[k].top = y1;
64 out_rects[k].right = x2;
65 out_rects[k].bottom = y2;
66 k++;
67 }
68
69 *out_num = k;
70}
71
34void vmw_display_unit_cleanup(struct vmw_display_unit *du) 72void vmw_display_unit_cleanup(struct vmw_display_unit *du)
35{ 73{
36 if (du->cursor_surface) 74 if (du->cursor_surface)
@@ -82,6 +120,43 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
82 return 0; 120 return 0;
83} 121}
84 122
123int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
124 struct vmw_dma_buffer *dmabuf,
125 u32 width, u32 height,
126 u32 hotspotX, u32 hotspotY)
127{
128 struct ttm_bo_kmap_obj map;
129 unsigned long kmap_offset;
130 unsigned long kmap_num;
131 void *virtual;
132 bool dummy;
133 int ret;
134
135 kmap_offset = 0;
136 kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
137
138 ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
139 if (unlikely(ret != 0)) {
140 DRM_ERROR("reserve failed\n");
141 return -EINVAL;
142 }
143
144 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
145 if (unlikely(ret != 0))
146 goto err_unreserve;
147
148 virtual = ttm_kmap_obj_virtual(&map, &dummy);
149 ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
150 hotspotX, hotspotY);
151
152 ttm_bo_kunmap(&map);
153err_unreserve:
154 ttm_bo_unreserve(&dmabuf->base);
155
156 return ret;
157}
158
159
85void vmw_cursor_update_position(struct vmw_private *dev_priv, 160void vmw_cursor_update_position(struct vmw_private *dev_priv,
86 bool show, int x, int y) 161 bool show, int x, int y)
87{ 162{
@@ -110,24 +185,21 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
110 return -EINVAL; 185 return -EINVAL;
111 186
112 if (handle) { 187 if (handle) {
113 ret = vmw_user_surface_lookup_handle(dev_priv, tfile, 188 ret = vmw_user_lookup_handle(dev_priv, tfile,
114 handle, &surface); 189 handle, &surface, &dmabuf);
115 if (!ret) { 190 if (ret) {
116 if (!surface->snooper.image) { 191 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
117 DRM_ERROR("surface not suitable for cursor\n"); 192 return -EINVAL;
118 vmw_surface_unreference(&surface);
119 return -EINVAL;
120 }
121 } else {
122 ret = vmw_user_dmabuf_lookup(tfile,
123 handle, &dmabuf);
124 if (ret) {
125 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
126 return -EINVAL;
127 }
128 } 193 }
129 } 194 }
130 195
196 /* need to do this before taking down old image */
197 if (surface && !surface->snooper.image) {
198 DRM_ERROR("surface not suitable for cursor\n");
199 vmw_surface_unreference(&surface);
200 return -EINVAL;
201 }
202
131 /* takedown old cursor */ 203 /* takedown old cursor */
132 if (du->cursor_surface) { 204 if (du->cursor_surface) {
133 du->cursor_surface->snooper.crtc = NULL; 205 du->cursor_surface->snooper.crtc = NULL;
@@ -146,36 +218,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
146 vmw_cursor_update_image(dev_priv, surface->snooper.image, 218 vmw_cursor_update_image(dev_priv, surface->snooper.image,
147 64, 64, du->hotspot_x, du->hotspot_y); 219 64, 64, du->hotspot_x, du->hotspot_y);
148 } else if (dmabuf) { 220 } else if (dmabuf) {
149 struct ttm_bo_kmap_obj map;
150 unsigned long kmap_offset;
151 unsigned long kmap_num;
152 void *virtual;
153 bool dummy;
154
155 /* vmw_user_surface_lookup takes one reference */ 221 /* vmw_user_surface_lookup takes one reference */
156 du->cursor_dmabuf = dmabuf; 222 du->cursor_dmabuf = dmabuf;
157 223
158 kmap_offset = 0; 224 ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
159 kmap_num = (64*64*4) >> PAGE_SHIFT; 225 du->hotspot_x, du->hotspot_y);
160
161 ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
162 if (unlikely(ret != 0)) {
163 DRM_ERROR("reserve failed\n");
164 return -EINVAL;
165 }
166
167 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
168 if (unlikely(ret != 0))
169 goto err_unreserve;
170
171 virtual = ttm_kmap_obj_virtual(&map, &dummy);
172 vmw_cursor_update_image(dev_priv, virtual, 64, 64,
173 du->hotspot_x, du->hotspot_y);
174
175 ttm_bo_kunmap(&map);
176err_unreserve:
177 ttm_bo_unreserve(&dmabuf->base);
178
179 } else { 226 } else {
180 vmw_cursor_update_position(dev_priv, false, 0, 0); 227 vmw_cursor_update_position(dev_priv, false, 0, 0);
181 return 0; 228 return 0;
@@ -377,8 +424,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
377 struct drm_clip_rect *clips, 424 struct drm_clip_rect *clips,
378 unsigned num_clips, int inc) 425 unsigned num_clips, int inc)
379{ 426{
380 struct drm_clip_rect *clips_ptr;
381 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 427 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
428 struct drm_clip_rect *clips_ptr;
429 struct drm_clip_rect *tmp;
382 struct drm_crtc *crtc; 430 struct drm_crtc *crtc;
383 size_t fifo_size; 431 size_t fifo_size;
384 int i, num_units; 432 int i, num_units;
@@ -391,7 +439,6 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
391 } *cmd; 439 } *cmd;
392 SVGASignedRect *blits; 440 SVGASignedRect *blits;
393 441
394
395 num_units = 0; 442 num_units = 0;
396 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, 443 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
397 head) { 444 head) {
@@ -402,13 +449,24 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
402 449
403 BUG_ON(!clips || !num_clips); 450 BUG_ON(!clips || !num_clips);
404 451
452 tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
453 if (unlikely(tmp == NULL)) {
454 DRM_ERROR("Temporary cliprect memory alloc failed.\n");
455 return -ENOMEM;
456 }
457
405 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips; 458 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
406 cmd = kzalloc(fifo_size, GFP_KERNEL); 459 cmd = kzalloc(fifo_size, GFP_KERNEL);
407 if (unlikely(cmd == NULL)) { 460 if (unlikely(cmd == NULL)) {
408 DRM_ERROR("Temporary fifo memory alloc failed.\n"); 461 DRM_ERROR("Temporary fifo memory alloc failed.\n");
409 return -ENOMEM; 462 ret = -ENOMEM;
463 goto out_free_tmp;
410 } 464 }
411 465
466 /* setup blits pointer */
467 blits = (SVGASignedRect *)&cmd[1];
468
469 /* initial clip region */
412 left = clips->x1; 470 left = clips->x1;
413 right = clips->x2; 471 right = clips->x2;
414 top = clips->y1; 472 top = clips->y1;
@@ -434,45 +492,60 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
434 cmd->body.srcRect.bottom = bottom; 492 cmd->body.srcRect.bottom = bottom;
435 493
436 clips_ptr = clips; 494 clips_ptr = clips;
437 blits = (SVGASignedRect *)&cmd[1];
438 for (i = 0; i < num_clips; i++, clips_ptr += inc) { 495 for (i = 0; i < num_clips; i++, clips_ptr += inc) {
439 blits[i].left = clips_ptr->x1 - left; 496 tmp[i].x1 = clips_ptr->x1 - left;
440 blits[i].right = clips_ptr->x2 - left; 497 tmp[i].x2 = clips_ptr->x2 - left;
441 blits[i].top = clips_ptr->y1 - top; 498 tmp[i].y1 = clips_ptr->y1 - top;
442 blits[i].bottom = clips_ptr->y2 - top; 499 tmp[i].y2 = clips_ptr->y2 - top;
443 } 500 }
444 501
445 /* do per unit writing, reuse fifo for each */ 502 /* do per unit writing, reuse fifo for each */
446 for (i = 0; i < num_units; i++) { 503 for (i = 0; i < num_units; i++) {
447 struct vmw_display_unit *unit = units[i]; 504 struct vmw_display_unit *unit = units[i];
448 int clip_x1 = left - unit->crtc.x; 505 struct vmw_clip_rect clip;
449 int clip_y1 = top - unit->crtc.y; 506 int num;
450 int clip_x2 = right - unit->crtc.x; 507
451 int clip_y2 = bottom - unit->crtc.y; 508 clip.x1 = left - unit->crtc.x;
509 clip.y1 = top - unit->crtc.y;
510 clip.x2 = right - unit->crtc.x;
511 clip.y2 = bottom - unit->crtc.y;
452 512
453 /* skip any crtcs that misses the clip region */ 513 /* skip any crtcs that misses the clip region */
454 if (clip_x1 >= unit->crtc.mode.hdisplay || 514 if (clip.x1 >= unit->crtc.mode.hdisplay ||
455 clip_y1 >= unit->crtc.mode.vdisplay || 515 clip.y1 >= unit->crtc.mode.vdisplay ||
456 clip_x2 <= 0 || clip_y2 <= 0) 516 clip.x2 <= 0 || clip.y2 <= 0)
457 continue; 517 continue;
458 518
519 /*
520 * In order for the clip rects to be correctly scaled
521 * the src and dest rects needs to be the same size.
522 */
523 cmd->body.destRect.left = clip.x1;
524 cmd->body.destRect.right = clip.x2;
525 cmd->body.destRect.top = clip.y1;
526 cmd->body.destRect.bottom = clip.y2;
527
528 /* create a clip rect of the crtc in dest coords */
529 clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
530 clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
531 clip.x1 = 0 - clip.x1;
532 clip.y1 = 0 - clip.y1;
533
459 /* need to reset sid as it is changed by execbuf */ 534 /* need to reset sid as it is changed by execbuf */
460 cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle); 535 cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
461
462 cmd->body.destScreenId = unit->unit; 536 cmd->body.destScreenId = unit->unit;
463 537
464 /* 538 /* clip and write blits to cmd stream */
465 * The blit command is a lot more resilient then the 539 vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
466 * readback command when it comes to clip rects. So its
467 * okay to go out of bounds.
468 */
469 540
470 cmd->body.destRect.left = clip_x1; 541 /* if no cliprects hit skip this */
471 cmd->body.destRect.right = clip_x2; 542 if (num == 0)
472 cmd->body.destRect.top = clip_y1; 543 continue;
473 cmd->body.destRect.bottom = clip_y2;
474 544
475 545
546 /* recalculate package length */
547 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
548 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
476 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, 549 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
477 fifo_size, 0, NULL); 550 fifo_size, 0, NULL);
478 551
@@ -480,7 +553,10 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
480 break; 553 break;
481 } 554 }
482 555
556
483 kfree(cmd); 557 kfree(cmd);
558out_free_tmp:
559 kfree(tmp);
484 560
485 return ret; 561 return ret;
486} 562}
@@ -556,6 +632,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
556 * Sanity checks. 632 * Sanity checks.
557 */ 633 */
558 634
635 /* Surface must be marked as a scanout. */
636 if (unlikely(!surface->scanout))
637 return -EINVAL;
638
559 if (unlikely(surface->mip_levels[0] != 1 || 639 if (unlikely(surface->mip_levels[0] != 1 ||
560 surface->num_sizes != 1 || 640 surface->num_sizes != 1 ||
561 surface->sizes[0].width < mode_cmd->width || 641 surface->sizes[0].width < mode_cmd->width ||
@@ -782,6 +862,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
782 int clip_y1 = clips_ptr->y1 - unit->crtc.y; 862 int clip_y1 = clips_ptr->y1 - unit->crtc.y;
783 int clip_x2 = clips_ptr->x2 - unit->crtc.x; 863 int clip_x2 = clips_ptr->x2 - unit->crtc.x;
784 int clip_y2 = clips_ptr->y2 - unit->crtc.y; 864 int clip_y2 = clips_ptr->y2 - unit->crtc.y;
865 int move_x, move_y;
785 866
786 /* skip any crtcs that misses the clip region */ 867 /* skip any crtcs that misses the clip region */
787 if (clip_x1 >= unit->crtc.mode.hdisplay || 868 if (clip_x1 >= unit->crtc.mode.hdisplay ||
@@ -789,12 +870,21 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
789 clip_x2 <= 0 || clip_y2 <= 0) 870 clip_x2 <= 0 || clip_y2 <= 0)
790 continue; 871 continue;
791 872
873 /* clip size to crtc size */
874 clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
875 clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
876
877 /* translate both src and dest to bring clip into screen */
878 move_x = min_t(int, clip_x1, 0);
879 move_y = min_t(int, clip_y1, 0);
880
881 /* actual translate done here */
792 blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN; 882 blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
793 blits[hit_num].body.destScreenId = unit->unit; 883 blits[hit_num].body.destScreenId = unit->unit;
794 blits[hit_num].body.srcOrigin.x = clips_ptr->x1; 884 blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
795 blits[hit_num].body.srcOrigin.y = clips_ptr->y1; 885 blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
796 blits[hit_num].body.destRect.left = clip_x1; 886 blits[hit_num].body.destRect.left = clip_x1 - move_x;
797 blits[hit_num].body.destRect.top = clip_y1; 887 blits[hit_num].body.destRect.top = clip_y1 - move_y;
798 blits[hit_num].body.destRect.right = clip_x2; 888 blits[hit_num].body.destRect.right = clip_x2;
799 blits[hit_num].body.destRect.bottom = clip_y2; 889 blits[hit_num].body.destRect.bottom = clip_y2;
800 hit_num++; 890 hit_num++;
@@ -1045,42 +1135,29 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1045 * End conditioned code. 1135 * End conditioned code.
1046 */ 1136 */
1047 1137
1048 ret = vmw_user_surface_lookup_handle(dev_priv, tfile, 1138 /* returns either a dmabuf or surface */
1049 mode_cmd.handle, &surface); 1139 ret = vmw_user_lookup_handle(dev_priv, tfile,
1140 mode_cmd.handle,
1141 &surface, &bo);
1050 if (ret) 1142 if (ret)
1051 goto try_dmabuf; 1143 goto err_out;
1052 1144
1053 if (!surface->scanout) 1145 /* Create the new framebuffer depending one what we got back */
1054 goto err_not_scanout; 1146 if (bo)
1055 1147 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
1056 ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface, 1148 &mode_cmd);
1057 &vfb, &mode_cmd); 1149 else if (surface)
1058 1150 ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
1059 /* vmw_user_surface_lookup takes one ref so does new_fb */ 1151 surface, &vfb, &mode_cmd);
1060 vmw_surface_unreference(&surface); 1152 else
1061 1153 BUG();
1062 if (ret) {
1063 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1064 ttm_base_object_unref(&user_obj);
1065 return ERR_PTR(ret);
1066 } else
1067 vfb->user_obj = user_obj;
1068 return &vfb->base;
1069
1070try_dmabuf:
1071 DRM_INFO("%s: trying buffer\n", __func__);
1072
1073 ret = vmw_user_dmabuf_lookup(tfile, mode_cmd.handle, &bo);
1074 if (ret) {
1075 DRM_ERROR("failed to find buffer: %i\n", ret);
1076 return ERR_PTR(-ENOENT);
1077 }
1078
1079 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
1080 &mode_cmd);
1081 1154
1082 /* vmw_user_dmabuf_lookup takes one ref so does new_fb */ 1155err_out:
1083 vmw_dmabuf_unreference(&bo); 1156 /* vmw_user_lookup_handle takes one ref so does new_fb */
1157 if (bo)
1158 vmw_dmabuf_unreference(&bo);
1159 if (surface)
1160 vmw_surface_unreference(&surface);
1084 1161
1085 if (ret) { 1162 if (ret) {
1086 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); 1163 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
@@ -1090,14 +1167,6 @@ try_dmabuf:
1090 vfb->user_obj = user_obj; 1167 vfb->user_obj = user_obj;
1091 1168
1092 return &vfb->base; 1169 return &vfb->base;
1093
1094err_not_scanout:
1095 DRM_ERROR("surface not marked as scanout\n");
1096 /* vmw_user_surface_lookup takes one ref */
1097 vmw_surface_unreference(&surface);
1098 ttm_base_object_unref(&user_obj);
1099
1100 return ERR_PTR(-EINVAL);
1101} 1170}
1102 1171
1103static struct drm_mode_config_funcs vmw_kms_funcs = { 1172static struct drm_mode_config_funcs vmw_kms_funcs = {
@@ -1114,10 +1183,12 @@ int vmw_kms_present(struct vmw_private *dev_priv,
1114 uint32_t num_clips) 1183 uint32_t num_clips)
1115{ 1184{
1116 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 1185 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
1186 struct drm_clip_rect *tmp;
1117 struct drm_crtc *crtc; 1187 struct drm_crtc *crtc;
1118 size_t fifo_size; 1188 size_t fifo_size;
1119 int i, k, num_units; 1189 int i, k, num_units;
1120 int ret = 0; /* silence warning */ 1190 int ret = 0; /* silence warning */
1191 int left, right, top, bottom;
1121 1192
1122 struct { 1193 struct {
1123 SVGA3dCmdHeader header; 1194 SVGA3dCmdHeader header;
@@ -1135,60 +1206,95 @@ int vmw_kms_present(struct vmw_private *dev_priv,
1135 BUG_ON(surface == NULL); 1206 BUG_ON(surface == NULL);
1136 BUG_ON(!clips || !num_clips); 1207 BUG_ON(!clips || !num_clips);
1137 1208
1209 tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
1210 if (unlikely(tmp == NULL)) {
1211 DRM_ERROR("Temporary cliprect memory alloc failed.\n");
1212 return -ENOMEM;
1213 }
1214
1138 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips; 1215 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
1139 cmd = kmalloc(fifo_size, GFP_KERNEL); 1216 cmd = kmalloc(fifo_size, GFP_KERNEL);
1140 if (unlikely(cmd == NULL)) { 1217 if (unlikely(cmd == NULL)) {
1141 DRM_ERROR("Failed to allocate temporary fifo memory.\n"); 1218 DRM_ERROR("Failed to allocate temporary fifo memory.\n");
1142 return -ENOMEM; 1219 ret = -ENOMEM;
1220 goto out_free_tmp;
1221 }
1222
1223 left = clips->x;
1224 right = clips->x + clips->w;
1225 top = clips->y;
1226 bottom = clips->y + clips->h;
1227
1228 for (i = 1; i < num_clips; i++) {
1229 left = min_t(int, left, (int)clips[i].x);
1230 right = max_t(int, right, (int)clips[i].x + clips[i].w);
1231 top = min_t(int, top, (int)clips[i].y);
1232 bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
1143 } 1233 }
1144 1234
1145 /* only need to do this once */ 1235 /* only need to do this once */
1146 memset(cmd, 0, fifo_size); 1236 memset(cmd, 0, fifo_size);
1147 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN); 1237 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
1148 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
1149
1150 cmd->body.srcRect.left = 0;
1151 cmd->body.srcRect.right = surface->sizes[0].width;
1152 cmd->body.srcRect.top = 0;
1153 cmd->body.srcRect.bottom = surface->sizes[0].height;
1154 1238
1155 blits = (SVGASignedRect *)&cmd[1]; 1239 blits = (SVGASignedRect *)&cmd[1];
1240
1241 cmd->body.srcRect.left = left;
1242 cmd->body.srcRect.right = right;
1243 cmd->body.srcRect.top = top;
1244 cmd->body.srcRect.bottom = bottom;
1245
1156 for (i = 0; i < num_clips; i++) { 1246 for (i = 0; i < num_clips; i++) {
1157 blits[i].left = clips[i].x; 1247 tmp[i].x1 = clips[i].x - left;
1158 blits[i].right = clips[i].x + clips[i].w; 1248 tmp[i].x2 = clips[i].x + clips[i].w - left;
1159 blits[i].top = clips[i].y; 1249 tmp[i].y1 = clips[i].y - top;
1160 blits[i].bottom = clips[i].y + clips[i].h; 1250 tmp[i].y2 = clips[i].y + clips[i].h - top;
1161 } 1251 }
1162 1252
1163 for (k = 0; k < num_units; k++) { 1253 for (k = 0; k < num_units; k++) {
1164 struct vmw_display_unit *unit = units[k]; 1254 struct vmw_display_unit *unit = units[k];
1165 int clip_x1 = destX - unit->crtc.x; 1255 struct vmw_clip_rect clip;
1166 int clip_y1 = destY - unit->crtc.y; 1256 int num;
1167 int clip_x2 = clip_x1 + surface->sizes[0].width; 1257
1168 int clip_y2 = clip_y1 + surface->sizes[0].height; 1258 clip.x1 = left + destX - unit->crtc.x;
1259 clip.y1 = top + destY - unit->crtc.y;
1260 clip.x2 = right + destX - unit->crtc.x;
1261 clip.y2 = bottom + destY - unit->crtc.y;
1169 1262
1170 /* skip any crtcs that misses the clip region */ 1263 /* skip any crtcs that misses the clip region */
1171 if (clip_x1 >= unit->crtc.mode.hdisplay || 1264 if (clip.x1 >= unit->crtc.mode.hdisplay ||
1172 clip_y1 >= unit->crtc.mode.vdisplay || 1265 clip.y1 >= unit->crtc.mode.vdisplay ||
1173 clip_x2 <= 0 || clip_y2 <= 0) 1266 clip.x2 <= 0 || clip.y2 <= 0)
1174 continue; 1267 continue;
1175 1268
1269 /*
1270 * In order for the clip rects to be correctly scaled
1271 * the src and dest rects needs to be the same size.
1272 */
1273 cmd->body.destRect.left = clip.x1;
1274 cmd->body.destRect.right = clip.x2;
1275 cmd->body.destRect.top = clip.y1;
1276 cmd->body.destRect.bottom = clip.y2;
1277
1278 /* create a clip rect of the crtc in dest coords */
1279 clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
1280 clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
1281 clip.x1 = 0 - clip.x1;
1282 clip.y1 = 0 - clip.y1;
1283
1176 /* need to reset sid as it is changed by execbuf */ 1284 /* need to reset sid as it is changed by execbuf */
1177 cmd->body.srcImage.sid = sid; 1285 cmd->body.srcImage.sid = sid;
1178
1179 cmd->body.destScreenId = unit->unit; 1286 cmd->body.destScreenId = unit->unit;
1180 1287
1181 /* 1288 /* clip and write blits to cmd stream */
1182 * The blit command is a lot more resilient then the 1289 vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
1183 * readback command when it comes to clip rects. So its
1184 * okay to go out of bounds.
1185 */
1186 1290
1187 cmd->body.destRect.left = clip_x1; 1291 /* if no cliprects hit skip this */
1188 cmd->body.destRect.right = clip_x2; 1292 if (num == 0)
1189 cmd->body.destRect.top = clip_y1; 1293 continue;
1190 cmd->body.destRect.bottom = clip_y2;
1191 1294
1295 /* recalculate package length */
1296 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
1297 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
1192 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, 1298 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
1193 fifo_size, 0, NULL); 1299 fifo_size, 0, NULL);
1194 1300
@@ -1197,6 +1303,8 @@ int vmw_kms_present(struct vmw_private *dev_priv,
1197 } 1303 }
1198 1304
1199 kfree(cmd); 1305 kfree(cmd);
1306out_free_tmp:
1307 kfree(tmp);
1200 1308
1201 return ret; 1309 return ret;
1202} 1310}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 055b844bd80f..a4f7f034996a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -63,9 +63,14 @@ struct vmw_framebuffer {
63int vmw_cursor_update_image(struct vmw_private *dev_priv, 63int vmw_cursor_update_image(struct vmw_private *dev_priv,
64 u32 *image, u32 width, u32 height, 64 u32 *image, u32 width, u32 height,
65 u32 hotspotX, u32 hotspotY); 65 u32 hotspotX, u32 hotspotY);
66int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
67 struct vmw_dma_buffer *dmabuf,
68 u32 width, u32 height,
69 u32 hotspotX, u32 hotspotY);
66void vmw_cursor_update_position(struct vmw_private *dev_priv, 70void vmw_cursor_update_position(struct vmw_private *dev_priv,
67 bool show, int x, int y); 71 bool show, int x, int y);
68 72
73
69/** 74/**
70 * Base class display unit. 75 * Base class display unit.
71 * 76 *
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 15a6805e48b0..f77b184be807 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -74,9 +74,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
74{ 74{
75 struct vmw_legacy_display *lds = dev_priv->ldu_priv; 75 struct vmw_legacy_display *lds = dev_priv->ldu_priv;
76 struct vmw_legacy_display_unit *entry; 76 struct vmw_legacy_display_unit *entry;
77 struct vmw_display_unit *du = NULL;
77 struct drm_framebuffer *fb = NULL; 78 struct drm_framebuffer *fb = NULL;
78 struct drm_crtc *crtc = NULL; 79 struct drm_crtc *crtc = NULL;
79 int i = 0; 80 int i = 0, ret;
80 81
81 /* If there is no display topology the host just assumes 82 /* If there is no display topology the host just assumes
82 * that the guest will set the same layout as the host. 83 * that the guest will set the same layout as the host.
@@ -129,6 +130,25 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
129 130
130 lds->last_num_active = lds->num_active; 131 lds->last_num_active = lds->num_active;
131 132
133
134 /* Find the first du with a cursor. */
135 list_for_each_entry(entry, &lds->active, active) {
136 du = &entry->base;
137
138 if (!du->cursor_dmabuf)
139 continue;
140
141 ret = vmw_cursor_update_dmabuf(dev_priv,
142 du->cursor_dmabuf,
143 64, 64,
144 du->hotspot_x,
145 du->hotspot_y);
146 if (ret == 0)
147 break;
148
149 DRM_ERROR("Could not update cursor image\n");
150 }
151
132 return 0; 152 return 0;
133} 153}
134 154
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 2eb84a55aee7..a37abb581cbb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1190,6 +1190,29 @@ void vmw_resource_unreserve(struct list_head *list)
1190 write_unlock(lock); 1190 write_unlock(lock);
1191} 1191}
1192 1192
1193/**
1194 * Helper function that looks either a surface or dmabuf.
1195 *
1196 * The pointer this pointed at by out_surf and out_buf needs to be null.
1197 */
1198int vmw_user_lookup_handle(struct vmw_private *dev_priv,
1199 struct ttm_object_file *tfile,
1200 uint32_t handle,
1201 struct vmw_surface **out_surf,
1202 struct vmw_dma_buffer **out_buf)
1203{
1204 int ret;
1205
1206 BUG_ON(*out_surf || *out_buf);
1207
1208 ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
1209 if (!ret)
1210 return 0;
1211
1212 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
1213 return ret;
1214}
1215
1193 1216
1194int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, 1217int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
1195 struct ttm_object_file *tfile, 1218 struct ttm_object_file *tfile,
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 8cebef49aeac..18936ac9d51c 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -893,6 +893,13 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
893 /* Set the number of I2C channel instance */ 893 /* Set the number of I2C channel instance */
894 adap_info->ch_num = id->driver_data; 894 adap_info->ch_num = id->driver_data;
895 895
896 ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
897 KBUILD_MODNAME, adap_info);
898 if (ret) {
899 pch_pci_err(pdev, "request_irq FAILED\n");
900 goto err_request_irq;
901 }
902
896 for (i = 0; i < adap_info->ch_num; i++) { 903 for (i = 0; i < adap_info->ch_num; i++) {
897 pch_adap = &adap_info->pch_data[i].pch_adapter; 904 pch_adap = &adap_info->pch_data[i].pch_adapter;
898 adap_info->pch_i2c_suspended = false; 905 adap_info->pch_i2c_suspended = false;
@@ -910,28 +917,23 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
910 917
911 pch_adap->dev.parent = &pdev->dev; 918 pch_adap->dev.parent = &pdev->dev;
912 919
920 pch_i2c_init(&adap_info->pch_data[i]);
913 ret = i2c_add_adapter(pch_adap); 921 ret = i2c_add_adapter(pch_adap);
914 if (ret) { 922 if (ret) {
915 pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i); 923 pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
916 goto err_i2c_add_adapter; 924 goto err_add_adapter;
917 } 925 }
918
919 pch_i2c_init(&adap_info->pch_data[i]);
920 }
921 ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
922 KBUILD_MODNAME, adap_info);
923 if (ret) {
924 pch_pci_err(pdev, "request_irq FAILED\n");
925 goto err_i2c_add_adapter;
926 } 926 }
927 927
928 pci_set_drvdata(pdev, adap_info); 928 pci_set_drvdata(pdev, adap_info);
929 pch_pci_dbg(pdev, "returns %d.\n", ret); 929 pch_pci_dbg(pdev, "returns %d.\n", ret);
930 return 0; 930 return 0;
931 931
932err_i2c_add_adapter: 932err_add_adapter:
933 for (j = 0; j < i; j++) 933 for (j = 0; j < i; j++)
934 i2c_del_adapter(&adap_info->pch_data[j].pch_adapter); 934 i2c_del_adapter(&adap_info->pch_data[j].pch_adapter);
935 free_irq(pdev->irq, adap_info);
936err_request_irq:
935 pci_iounmap(pdev, base_addr); 937 pci_iounmap(pdev, base_addr);
936err_pci_iomap: 938err_pci_iomap:
937 pci_release_regions(pdev); 939 pci_release_regions(pdev);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index a43d0023446a..fa23faa20f0e 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1047,13 +1047,14 @@ omap_i2c_probe(struct platform_device *pdev)
1047 * size. This is to ensure that we can handle the status on int 1047 * size. This is to ensure that we can handle the status on int
1048 * call back latencies. 1048 * call back latencies.
1049 */ 1049 */
1050 if (dev->rev >= OMAP_I2C_REV_ON_3530_4430) { 1050
1051 dev->fifo_size = 0; 1051 dev->fifo_size = (dev->fifo_size / 2);
1052
1053 if (dev->rev >= OMAP_I2C_REV_ON_3530_4430)
1052 dev->b_hw = 0; /* Disable hardware fixes */ 1054 dev->b_hw = 0; /* Disable hardware fixes */
1053 } else { 1055 else
1054 dev->fifo_size = (dev->fifo_size / 2);
1055 dev->b_hw = 1; /* Enable hardware fixes */ 1056 dev->b_hw = 1; /* Enable hardware fixes */
1056 } 1057
1057 /* calculate wakeup latency constraint for MPU */ 1058 /* calculate wakeup latency constraint for MPU */
1058 if (dev->set_mpu_wkup_lat != NULL) 1059 if (dev->set_mpu_wkup_lat != NULL)
1059 dev->latency = (1000000 * dev->fifo_size) / 1060 dev->latency = (1000000 * dev->fifo_size) /
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 2754cef86a06..4c1718081685 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -534,6 +534,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
534 534
535 /* first, try busy waiting briefly */ 535 /* first, try busy waiting briefly */
536 do { 536 do {
537 cpu_relax();
537 iicstat = readl(i2c->regs + S3C2410_IICSTAT); 538 iicstat = readl(i2c->regs + S3C2410_IICSTAT);
538 } while ((iicstat & S3C2410_IICSTAT_START) && --spins); 539 } while ((iicstat & S3C2410_IICSTAT_START) && --spins);
539 540
@@ -786,7 +787,7 @@ static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
786#else 787#else
787static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c) 788static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
788{ 789{
789 return -EINVAL; 790 return 0;
790} 791}
791 792
792static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c) 793static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 7ec56fb0bd78..b0dd08e6a9da 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -13,6 +13,7 @@
13#include <linux/export.h> 13#include <linux/export.h>
14#include <linux/pci-ats.h> 14#include <linux/pci-ats.h>
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/slab.h>
16 17
17#include "pci.h" 18#include "pci.h"
18 19
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index fce1c54a0c8d..9ddf69e3bbef 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -132,6 +132,18 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
132 if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle)) 132 if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
133 return AE_OK; 133 return AE_OK;
134 134
135 pdev = pbus->self;
136 if (pdev && pci_is_pcie(pdev)) {
137 tmp = acpi_find_root_bridge_handle(pdev);
138 if (tmp) {
139 struct acpi_pci_root *root = acpi_pci_find_root(tmp);
140
141 if (root && (root->osc_control_set &
142 OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
143 return AE_OK;
144 }
145 }
146
135 acpi_evaluate_integer(handle, "_ADR", NULL, &adr); 147 acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
136 device = (adr >> 16) & 0xffff; 148 device = (adr >> 16) & 0xffff;
137 function = adr & 0xffff; 149 function = adr & 0xffff;
@@ -213,7 +225,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
213 225
214 pdev = pci_get_slot(pbus, PCI_DEVFN(device, function)); 226 pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
215 if (pdev) { 227 if (pdev) {
216 pdev->current_state = PCI_D0;
217 slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); 228 slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
218 pci_dev_put(pdev); 229 pci_dev_put(pdev);
219 } 230 }
@@ -459,17 +470,8 @@ static int add_bridge(acpi_handle handle)
459{ 470{
460 acpi_status status; 471 acpi_status status;
461 unsigned long long tmp; 472 unsigned long long tmp;
462 struct acpi_pci_root *root;
463 acpi_handle dummy_handle; 473 acpi_handle dummy_handle;
464 474
465 /*
466 * We shouldn't use this bridge if PCIe native hotplug control has been
467 * granted by the BIOS for it.
468 */
469 root = acpi_pci_find_root(handle);
470 if (root && (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
471 return -ENODEV;
472
473 /* if the bridge doesn't have _STA, we assume it is always there */ 475 /* if the bridge doesn't have _STA, we assume it is always there */
474 status = acpi_get_handle(handle, "_STA", &dummy_handle); 476 status = acpi_get_handle(handle, "_STA", &dummy_handle);
475 if (ACPI_SUCCESS(status)) { 477 if (ACPI_SUCCESS(status)) {
@@ -1385,19 +1387,11 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type,
1385static acpi_status 1387static acpi_status
1386find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) 1388find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
1387{ 1389{
1388 struct acpi_pci_root *root;
1389 int *count = (int *)context; 1390 int *count = (int *)context;
1390 1391
1391 if (!acpi_is_root_bridge(handle)) 1392 if (!acpi_is_root_bridge(handle))
1392 return AE_OK; 1393 return AE_OK;
1393 1394
1394 root = acpi_pci_find_root(handle);
1395 if (!root)
1396 return AE_OK;
1397
1398 if (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
1399 return AE_OK;
1400
1401 (*count)++; 1395 (*count)++;
1402 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, 1396 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
1403 handle_hotplug_event_bridge, NULL); 1397 handle_hotplug_event_bridge, NULL);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b82c155d7b37..1969a3ee3058 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -283,6 +283,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
283 struct resource *res; 283 struct resource *res;
284 struct pci_dev *pdev; 284 struct pci_dev *pdev;
285 struct pci_sriov *iov = dev->sriov; 285 struct pci_sriov *iov = dev->sriov;
286 int bars = 0;
286 287
287 if (!nr_virtfn) 288 if (!nr_virtfn)
288 return 0; 289 return 0;
@@ -307,6 +308,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
307 308
308 nres = 0; 309 nres = 0;
309 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { 310 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
311 bars |= (1 << (i + PCI_IOV_RESOURCES));
310 res = dev->resource + PCI_IOV_RESOURCES + i; 312 res = dev->resource + PCI_IOV_RESOURCES + i;
311 if (res->parent) 313 if (res->parent)
312 nres++; 314 nres++;
@@ -324,6 +326,11 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
324 return -ENOMEM; 326 return -ENOMEM;
325 } 327 }
326 328
329 if (pci_enable_resources(dev, bars)) {
330 dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
331 return -ENOMEM;
332 }
333
327 if (iov->link != dev->devfn) { 334 if (iov->link != dev->devfn) {
328 pdev = pci_get_slot(dev->bus, iov->link); 335 pdev = pci_get_slot(dev->bus, iov->link);
329 if (!pdev) 336 if (!pdev)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6f45a73c6e9f..6d4a5319148d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -664,6 +664,9 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
664 error = platform_pci_set_power_state(dev, state); 664 error = platform_pci_set_power_state(dev, state);
665 if (!error) 665 if (!error)
666 pci_update_current_state(dev, state); 666 pci_update_current_state(dev, state);
667 /* Fall back to PCI_D0 if native PM is not supported */
668 if (!dev->pm_cap)
669 dev->current_state = PCI_D0;
667 } else { 670 } else {
668 error = -ENODEV; 671 error = -ENODEV;
669 /* Fall back to PCI_D0 if native PM is not supported */ 672 /* Fall back to PCI_D0 if native PM is not supported */
@@ -1126,7 +1129,11 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
1126 if (atomic_add_return(1, &dev->enable_cnt) > 1) 1129 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1127 return 0; /* already enabled */ 1130 return 0; /* already enabled */
1128 1131
1129 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 1132 /* only skip sriov related */
1133 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1134 if (dev->resource[i].flags & flags)
1135 bars |= (1 << i);
1136 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1130 if (dev->resource[i].flags & flags) 1137 if (dev->resource[i].flags & flags)
1131 bars |= (1 << i); 1138 bars |= (1 << i);
1132 1139
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 11f07f888223..b79576b64f45 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -55,6 +55,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
55{ 55{
56 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 56 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
57 57
58 /* if previous slave_alloc returned early, there is nothing to do */
59 if (!zfcp_sdev->port)
60 return;
61
58 zfcp_erp_lun_shutdown_wait(sdev, "scssd_1"); 62 zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
59 put_device(&zfcp_sdev->port->dev); 63 put_device(&zfcp_sdev->port->dev);
60} 64}
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index dba72a4e6a1c..1ad0b8225560 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1906,18 +1906,19 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
1906 spin_lock(&session->lock); 1906 spin_lock(&session->lock);
1907 task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data, 1907 task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
1908 cqe->itt & ISCSI_CMD_RESPONSE_INDEX); 1908 cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
1909 if (!task) { 1909 if (!task || !task->sc) {
1910 spin_unlock(&session->lock); 1910 spin_unlock(&session->lock);
1911 return -EINVAL; 1911 return -EINVAL;
1912 } 1912 }
1913 sc = task->sc; 1913 sc = task->sc;
1914 spin_unlock(&session->lock);
1915 1914
1916 if (!blk_rq_cpu_valid(sc->request)) 1915 if (!blk_rq_cpu_valid(sc->request))
1917 cpu = smp_processor_id(); 1916 cpu = smp_processor_id();
1918 else 1917 else
1919 cpu = sc->request->cpu; 1918 cpu = sc->request->cpu;
1920 1919
1920 spin_unlock(&session->lock);
1921
1921 p = &per_cpu(bnx2i_percpu, cpu); 1922 p = &per_cpu(bnx2i_percpu, cpu);
1922 spin_lock(&p->p_work_lock); 1923 spin_lock(&p->p_work_lock);
1923 if (unlikely(!p->iothread)) { 1924 if (unlikely(!p->iothread)) {
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cefbe44bb84a..8d67467dd9ce 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -31,6 +31,8 @@
31#include <linux/sysfs.h> 31#include <linux/sysfs.h>
32#include <linux/ctype.h> 32#include <linux/ctype.h>
33#include <linux/workqueue.h> 33#include <linux/workqueue.h>
34#include <net/dcbnl.h>
35#include <net/dcbevent.h>
34#include <scsi/scsi_tcq.h> 36#include <scsi/scsi_tcq.h>
35#include <scsi/scsicam.h> 37#include <scsi/scsicam.h>
36#include <scsi/scsi_transport.h> 38#include <scsi/scsi_transport.h>
@@ -101,6 +103,8 @@ static int fcoe_ddp_done(struct fc_lport *, u16);
101static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *, 103static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
102 unsigned int); 104 unsigned int);
103static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *); 105static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
106static int fcoe_dcb_app_notification(struct notifier_block *notifier,
107 ulong event, void *ptr);
104 108
105static bool fcoe_match(struct net_device *netdev); 109static bool fcoe_match(struct net_device *netdev);
106static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode); 110static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
@@ -129,6 +133,11 @@ static struct notifier_block fcoe_cpu_notifier = {
129 .notifier_call = fcoe_cpu_callback, 133 .notifier_call = fcoe_cpu_callback,
130}; 134};
131 135
136/* notification function for DCB events */
137static struct notifier_block dcb_notifier = {
138 .notifier_call = fcoe_dcb_app_notification,
139};
140
132static struct scsi_transport_template *fcoe_nport_scsi_transport; 141static struct scsi_transport_template *fcoe_nport_scsi_transport;
133static struct scsi_transport_template *fcoe_vport_scsi_transport; 142static struct scsi_transport_template *fcoe_vport_scsi_transport;
134 143
@@ -1522,6 +1531,8 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1522 skb_reset_network_header(skb); 1531 skb_reset_network_header(skb);
1523 skb->mac_len = elen; 1532 skb->mac_len = elen;
1524 skb->protocol = htons(ETH_P_FCOE); 1533 skb->protocol = htons(ETH_P_FCOE);
1534 skb->priority = port->priority;
1535
1525 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN && 1536 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
1526 fcoe->realdev->features & NETIF_F_HW_VLAN_TX) { 1537 fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
1527 skb->vlan_tci = VLAN_TAG_PRESENT | 1538 skb->vlan_tci = VLAN_TAG_PRESENT |
@@ -1624,6 +1635,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
1624 stats->InvalidCRCCount++; 1635 stats->InvalidCRCCount++;
1625 if (stats->InvalidCRCCount < 5) 1636 if (stats->InvalidCRCCount < 5)
1626 printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); 1637 printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
1638 put_cpu();
1627 return -EINVAL; 1639 return -EINVAL;
1628} 1640}
1629 1641
@@ -1746,6 +1758,7 @@ int fcoe_percpu_receive_thread(void *arg)
1746 */ 1758 */
1747static void fcoe_dev_setup(void) 1759static void fcoe_dev_setup(void)
1748{ 1760{
1761 register_dcbevent_notifier(&dcb_notifier);
1749 register_netdevice_notifier(&fcoe_notifier); 1762 register_netdevice_notifier(&fcoe_notifier);
1750} 1763}
1751 1764
@@ -1754,9 +1767,69 @@ static void fcoe_dev_setup(void)
1754 */ 1767 */
1755static void fcoe_dev_cleanup(void) 1768static void fcoe_dev_cleanup(void)
1756{ 1769{
1770 unregister_dcbevent_notifier(&dcb_notifier);
1757 unregister_netdevice_notifier(&fcoe_notifier); 1771 unregister_netdevice_notifier(&fcoe_notifier);
1758} 1772}
1759 1773
1774static struct fcoe_interface *
1775fcoe_hostlist_lookup_realdev_port(struct net_device *netdev)
1776{
1777 struct fcoe_interface *fcoe;
1778 struct net_device *real_dev;
1779
1780 list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1781 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
1782 real_dev = vlan_dev_real_dev(fcoe->netdev);
1783 else
1784 real_dev = fcoe->netdev;
1785
1786 if (netdev == real_dev)
1787 return fcoe;
1788 }
1789 return NULL;
1790}
1791
1792static int fcoe_dcb_app_notification(struct notifier_block *notifier,
1793 ulong event, void *ptr)
1794{
1795 struct dcb_app_type *entry = ptr;
1796 struct fcoe_interface *fcoe;
1797 struct net_device *netdev;
1798 struct fcoe_port *port;
1799 int prio;
1800
1801 if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE)
1802 return NOTIFY_OK;
1803
1804 netdev = dev_get_by_index(&init_net, entry->ifindex);
1805 if (!netdev)
1806 return NOTIFY_OK;
1807
1808 fcoe = fcoe_hostlist_lookup_realdev_port(netdev);
1809 dev_put(netdev);
1810 if (!fcoe)
1811 return NOTIFY_OK;
1812
1813 if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
1814 prio = ffs(entry->app.priority) - 1;
1815 else
1816 prio = entry->app.priority;
1817
1818 if (prio < 0)
1819 return NOTIFY_OK;
1820
1821 if (entry->app.protocol == ETH_P_FIP ||
1822 entry->app.protocol == ETH_P_FCOE)
1823 fcoe->ctlr.priority = prio;
1824
1825 if (entry->app.protocol == ETH_P_FCOE) {
1826 port = lport_priv(fcoe->ctlr.lp);
1827 port->priority = prio;
1828 }
1829
1830 return NOTIFY_OK;
1831}
1832
1760/** 1833/**
1761 * fcoe_device_notification() - Handler for net device events 1834 * fcoe_device_notification() - Handler for net device events
1762 * @notifier: The context of the notification 1835 * @notifier: The context of the notification
@@ -1965,6 +2038,46 @@ static bool fcoe_match(struct net_device *netdev)
1965} 2038}
1966 2039
1967/** 2040/**
2041 * fcoe_dcb_create() - Initialize DCB attributes and hooks
2042 * @netdev: The net_device object of the L2 link that should be queried
2043 * @port: The fcoe_port to bind FCoE APP priority with
2044 * @
2045 */
2046static void fcoe_dcb_create(struct fcoe_interface *fcoe)
2047{
2048#ifdef CONFIG_DCB
2049 int dcbx;
2050 u8 fup, up;
2051 struct net_device *netdev = fcoe->realdev;
2052 struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
2053 struct dcb_app app = {
2054 .priority = 0,
2055 .protocol = ETH_P_FCOE
2056 };
2057
2058 /* setup DCB priority attributes. */
2059 if (netdev && netdev->dcbnl_ops && netdev->dcbnl_ops->getdcbx) {
2060 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
2061
2062 if (dcbx & DCB_CAP_DCBX_VER_IEEE) {
2063 app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
2064 up = dcb_ieee_getapp_mask(netdev, &app);
2065 app.protocol = ETH_P_FIP;
2066 fup = dcb_ieee_getapp_mask(netdev, &app);
2067 } else {
2068 app.selector = DCB_APP_IDTYPE_ETHTYPE;
2069 up = dcb_getapp(netdev, &app);
2070 app.protocol = ETH_P_FIP;
2071 fup = dcb_getapp(netdev, &app);
2072 }
2073
2074 port->priority = ffs(up) ? ffs(up) - 1 : 0;
2075 fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
2076 }
2077#endif
2078}
2079
2080/**
1968 * fcoe_create() - Create a fcoe interface 2081 * fcoe_create() - Create a fcoe interface
1969 * @netdev : The net_device object the Ethernet interface to create on 2082 * @netdev : The net_device object the Ethernet interface to create on
1970 * @fip_mode: The FIP mode for this creation 2083 * @fip_mode: The FIP mode for this creation
@@ -2007,6 +2120,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2007 /* Make this the "master" N_Port */ 2120 /* Make this the "master" N_Port */
2008 fcoe->ctlr.lp = lport; 2121 fcoe->ctlr.lp = lport;
2009 2122
2123 /* setup DCB priority attributes. */
2124 fcoe_dcb_create(fcoe);
2125
2010 /* add to lports list */ 2126 /* add to lports list */
2011 fcoe_hostlist_add(lport); 2127 fcoe_hostlist_add(lport);
2012 2128
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index c74c4b8e71ef..e7522dcc296e 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -320,6 +320,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
320 320
321 skb_put(skb, sizeof(*sol)); 321 skb_put(skb, sizeof(*sol));
322 skb->protocol = htons(ETH_P_FIP); 322 skb->protocol = htons(ETH_P_FIP);
323 skb->priority = fip->priority;
323 skb_reset_mac_header(skb); 324 skb_reset_mac_header(skb);
324 skb_reset_network_header(skb); 325 skb_reset_network_header(skb);
325 fip->send(fip, skb); 326 fip->send(fip, skb);
@@ -474,6 +475,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
474 } 475 }
475 skb_put(skb, len); 476 skb_put(skb, len);
476 skb->protocol = htons(ETH_P_FIP); 477 skb->protocol = htons(ETH_P_FIP);
478 skb->priority = fip->priority;
477 skb_reset_mac_header(skb); 479 skb_reset_mac_header(skb);
478 skb_reset_network_header(skb); 480 skb_reset_network_header(skb);
479 fip->send(fip, skb); 481 fip->send(fip, skb);
@@ -566,6 +568,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
566 cap->fip.fip_dl_len = htons(dlen / FIP_BPW); 568 cap->fip.fip_dl_len = htons(dlen / FIP_BPW);
567 569
568 skb->protocol = htons(ETH_P_FIP); 570 skb->protocol = htons(ETH_P_FIP);
571 skb->priority = fip->priority;
569 skb_reset_mac_header(skb); 572 skb_reset_mac_header(skb);
570 skb_reset_network_header(skb); 573 skb_reset_network_header(skb);
571 return 0; 574 return 0;
@@ -1911,6 +1914,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
1911 1914
1912 skb_put(skb, len); 1915 skb_put(skb, len);
1913 skb->protocol = htons(ETH_P_FIP); 1916 skb->protocol = htons(ETH_P_FIP);
1917 skb->priority = fip->priority;
1914 skb_reset_mac_header(skb); 1918 skb_reset_mac_header(skb);
1915 skb_reset_network_header(skb); 1919 skb_reset_network_header(skb);
1916 1920
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 4e041f6d808c..d570573b7963 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -4335,7 +4335,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4335 /* insert into event log */ 4335 /* insert into event log */
4336 sz = offsetof(Mpi2EventNotificationReply_t, EventData) + 4336 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
4337 sizeof(Mpi2EventDataSasDeviceStatusChange_t); 4337 sizeof(Mpi2EventDataSasDeviceStatusChange_t);
4338 event_reply = kzalloc(sz, GFP_KERNEL); 4338 event_reply = kzalloc(sz, GFP_ATOMIC);
4339 if (!event_reply) { 4339 if (!event_reply) {
4340 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 4340 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
4341 ioc->name, __FILE__, __LINE__, __func__); 4341 ioc->name, __FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index ac326c41e931..6465dae5883a 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1762,12 +1762,31 @@ qla2x00_get_host_port_state(struct Scsi_Host *shost)
1762 scsi_qla_host_t *vha = shost_priv(shost); 1762 scsi_qla_host_t *vha = shost_priv(shost);
1763 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev); 1763 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
1764 1764
1765 if (!base_vha->flags.online) 1765 if (!base_vha->flags.online) {
1766 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 1766 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1767 else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT) 1767 return;
1768 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 1768 }
1769 else 1769
1770 switch (atomic_read(&base_vha->loop_state)) {
1771 case LOOP_UPDATE:
1772 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
1773 break;
1774 case LOOP_DOWN:
1775 if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
1776 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
1777 else
1778 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1779 break;
1780 case LOOP_DEAD:
1781 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1782 break;
1783 case LOOP_READY:
1770 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 1784 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1785 break;
1786 default:
1787 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1788 break;
1789 }
1771} 1790}
1772 1791
1773static int 1792static int
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 9df4787715c0..f3cddd5800c3 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -12,17 +12,17 @@
12 * | Level | Last Value Used | Holes | 12 * | Level | Last Value Used | Holes |
13 * ---------------------------------------------------------------------- 13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0116 | | 14 * | Module Init and Probe | 0x0116 | |
15 * | Mailbox commands | 0x1129 | | 15 * | Mailbox commands | 0x112b | |
16 * | Device Discovery | 0x2083 | | 16 * | Device Discovery | 0x2083 | |
17 * | Queue Command and IO tracing | 0x302e | 0x3008 | 17 * | Queue Command and IO tracing | 0x302e | 0x3008 |
18 * | DPC Thread | 0x401c | | 18 * | DPC Thread | 0x401c | |
19 * | Async Events | 0x5059 | | 19 * | Async Events | 0x5059 | |
20 * | Timer Routines | 0x600d | | 20 * | Timer Routines | 0x6010 | 0x600e,0x600f |
21 * | User Space Interactions | 0x709d | | 21 * | User Space Interactions | 0x709d | |
22 * | Task Management | 0x8041 | | 22 * | Task Management | 0x8041 | 0x800b |
23 * | AER/EEH | 0x900f | | 23 * | AER/EEH | 0x900f | |
24 * | Virtual Port | 0xa007 | | 24 * | Virtual Port | 0xa007 | |
25 * | ISP82XX Specific | 0xb051 | | 25 * | ISP82XX Specific | 0xb052 | |
26 * | MultiQ | 0xc00b | | 26 * | MultiQ | 0xc00b | |
27 * | Misc | 0xd00b | | 27 * | Misc | 0xd00b | |
28 * ---------------------------------------------------------------------- 28 * ----------------------------------------------------------------------
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index ce32d8135c9e..c0c11afb685c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -578,6 +578,7 @@ extern int qla82xx_check_md_needed(scsi_qla_host_t *);
578extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *); 578extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
579extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int); 579extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
580extern char *qdev_state(uint32_t); 580extern char *qdev_state(uint32_t);
581extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
581 582
582/* BSG related functions */ 583/* BSG related functions */
583extern int qla24xx_bsg_request(struct fc_bsg_job *); 584extern int qla24xx_bsg_request(struct fc_bsg_job *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f03e915f1877..54ea68cec4c5 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1509,7 +1509,8 @@ enable_82xx_npiv:
1509 &ha->fw_xcb_count, NULL, NULL, 1509 &ha->fw_xcb_count, NULL, NULL,
1510 &ha->max_npiv_vports, NULL); 1510 &ha->max_npiv_vports, NULL);
1511 1511
1512 if (!fw_major_version && ql2xallocfwdump) 1512 if (!fw_major_version && ql2xallocfwdump
1513 && !IS_QLA82XX(ha))
1513 qla2x00_alloc_fw_dump(vha); 1514 qla2x00_alloc_fw_dump(vha);
1514 } 1515 }
1515 } else { 1516 } else {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index dbec89622a0f..a4b267e60a35 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -120,11 +120,10 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
120 * Returns a pointer to the continuation type 1 IOCB packet. 120 * Returns a pointer to the continuation type 1 IOCB packet.
121 */ 121 */
122static inline cont_a64_entry_t * 122static inline cont_a64_entry_t *
123qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha) 123qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124{ 124{
125 cont_a64_entry_t *cont_pkt; 125 cont_a64_entry_t *cont_pkt;
126 126
127 struct req_que *req = vha->req;
128 /* Adjust ring index. */ 127 /* Adjust ring index. */
129 req->ring_index++; 128 req->ring_index++;
130 if (req->ring_index == req->length) { 129 if (req->ring_index == req->length) {
@@ -292,7 +291,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
292 * Five DSDs are available in the Continuation 291 * Five DSDs are available in the Continuation
293 * Type 1 IOCB. 292 * Type 1 IOCB.
294 */ 293 */
295 cont_pkt = qla2x00_prep_cont_type1_iocb(vha); 294 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
296 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 295 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
297 avail_dsds = 5; 296 avail_dsds = 5;
298 } 297 }
@@ -684,7 +683,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
684 * Five DSDs are available in the Continuation 683 * Five DSDs are available in the Continuation
685 * Type 1 IOCB. 684 * Type 1 IOCB.
686 */ 685 */
687 cont_pkt = qla2x00_prep_cont_type1_iocb(vha); 686 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
688 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 687 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
689 avail_dsds = 5; 688 avail_dsds = 5;
690 } 689 }
@@ -2070,7 +2069,8 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2070 * Five DSDs are available in the Cont. 2069 * Five DSDs are available in the Cont.
2071 * Type 1 IOCB. 2070 * Type 1 IOCB.
2072 */ 2071 */
2073 cont_pkt = qla2x00_prep_cont_type1_iocb(vha); 2072 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2073 vha->hw->req_q_map[0]);
2074 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 2074 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2075 avail_dsds = 5; 2075 avail_dsds = 5;
2076 cont_iocb_prsnt = 1; 2076 cont_iocb_prsnt = 1;
@@ -2096,6 +2096,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2096 int index; 2096 int index;
2097 uint16_t tot_dsds; 2097 uint16_t tot_dsds;
2098 scsi_qla_host_t *vha = sp->fcport->vha; 2098 scsi_qla_host_t *vha = sp->fcport->vha;
2099 struct qla_hw_data *ha = vha->hw;
2099 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job; 2100 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
2100 int loop_iterartion = 0; 2101 int loop_iterartion = 0;
2101 int cont_iocb_prsnt = 0; 2102 int cont_iocb_prsnt = 0;
@@ -2141,7 +2142,8 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2141 * Five DSDs are available in the Cont. 2142 * Five DSDs are available in the Cont.
2142 * Type 1 IOCB. 2143 * Type 1 IOCB.
2143 */ 2144 */
2144 cont_pkt = qla2x00_prep_cont_type1_iocb(vha); 2145 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2146 ha->req_q_map[0]);
2145 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 2147 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2146 avail_dsds = 5; 2148 avail_dsds = 5;
2147 cont_iocb_prsnt = 1; 2149 cont_iocb_prsnt = 1;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2516adf1aeea..7b91b290ffd6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1741,7 +1741,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1741 resid, scsi_bufflen(cp)); 1741 resid, scsi_bufflen(cp));
1742 1742
1743 cp->result = DID_ERROR << 16 | lscsi_status; 1743 cp->result = DID_ERROR << 16 | lscsi_status;
1744 break; 1744 goto check_scsi_status;
1745 } 1745 }
1746 1746
1747 if (!lscsi_status && 1747 if (!lscsi_status &&
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 3b3cec9f6ac2..82a33533ed26 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -79,8 +79,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
79 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 79 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
80 ql_log(ql_log_warn, base_vha, 0x1004, 80 ql_log(ql_log_warn, base_vha, 0x1004,
81 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 81 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
82 rval = QLA_FUNCTION_FAILED; 82 return QLA_FUNCTION_TIMEOUT;
83 goto premature_exit;
84 } 83 }
85 84
86 /* 85 /*
@@ -163,6 +162,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
163 HINT_MBX_INT_PENDING) { 162 HINT_MBX_INT_PENDING) {
164 spin_unlock_irqrestore(&ha->hardware_lock, 163 spin_unlock_irqrestore(&ha->hardware_lock,
165 flags); 164 flags);
165 ha->flags.mbox_busy = 0;
166 ql_dbg(ql_dbg_mbx, base_vha, 0x1010, 166 ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
167 "Pending mailbox timeout, exiting.\n"); 167 "Pending mailbox timeout, exiting.\n");
168 rval = QLA_FUNCTION_TIMEOUT; 168 rval = QLA_FUNCTION_TIMEOUT;
@@ -188,6 +188,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
188 HINT_MBX_INT_PENDING) { 188 HINT_MBX_INT_PENDING) {
189 spin_unlock_irqrestore(&ha->hardware_lock, 189 spin_unlock_irqrestore(&ha->hardware_lock,
190 flags); 190 flags);
191 ha->flags.mbox_busy = 0;
191 ql_dbg(ql_dbg_mbx, base_vha, 0x1012, 192 ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
192 "Pending mailbox timeout, exiting.\n"); 193 "Pending mailbox timeout, exiting.\n");
193 rval = QLA_FUNCTION_TIMEOUT; 194 rval = QLA_FUNCTION_TIMEOUT;
@@ -302,7 +303,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
302 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 303 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
303 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 304 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
304 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 305 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
305 306 if (IS_QLA82XX(ha)) {
307 ql_dbg(ql_dbg_mbx, vha, 0x112a,
308 "disabling pause transmit on port "
309 "0 & 1.\n");
310 qla82xx_wr_32(ha,
311 QLA82XX_CRB_NIU + 0x98,
312 CRB_NIU_XG_PAUSE_CTL_P0|
313 CRB_NIU_XG_PAUSE_CTL_P1);
314 }
306 ql_log(ql_log_info, base_vha, 0x101c, 315 ql_log(ql_log_info, base_vha, 0x101c,
307 "Mailbox cmd timeout occured. " 316 "Mailbox cmd timeout occured. "
308 "Scheduling ISP abort eeh_busy=0x%x.\n", 317 "Scheduling ISP abort eeh_busy=0x%x.\n",
@@ -318,7 +327,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
318 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 327 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
319 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 328 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
320 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 329 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
321 330 if (IS_QLA82XX(ha)) {
331 ql_dbg(ql_dbg_mbx, vha, 0x112b,
332 "disabling pause transmit on port "
333 "0 & 1.\n");
334 qla82xx_wr_32(ha,
335 QLA82XX_CRB_NIU + 0x98,
336 CRB_NIU_XG_PAUSE_CTL_P0|
337 CRB_NIU_XG_PAUSE_CTL_P1);
338 }
322 ql_log(ql_log_info, base_vha, 0x101e, 339 ql_log(ql_log_info, base_vha, 0x101e,
323 "Mailbox cmd timeout occured. " 340 "Mailbox cmd timeout occured. "
324 "Scheduling ISP abort.\n"); 341 "Scheduling ISP abort.\n");
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 94bded5ddce4..03554934b0a5 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -3817,6 +3817,20 @@ exit:
3817 return rval; 3817 return rval;
3818} 3818}
3819 3819
3820void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
3821{
3822 struct qla_hw_data *ha = vha->hw;
3823
3824 if (ha->flags.mbox_busy) {
3825 ha->flags.mbox_int = 1;
3826 ha->flags.mbox_busy = 0;
3827 ql_log(ql_log_warn, vha, 0x6010,
3828 "Doing premature completion of mbx command.\n");
3829 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
3830 complete(&ha->mbx_intr_comp);
3831 }
3832}
3833
3820void qla82xx_watchdog(scsi_qla_host_t *vha) 3834void qla82xx_watchdog(scsi_qla_host_t *vha)
3821{ 3835{
3822 uint32_t dev_state, halt_status; 3836 uint32_t dev_state, halt_status;
@@ -3839,9 +3853,13 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3839 qla2xxx_wake_dpc(vha); 3853 qla2xxx_wake_dpc(vha);
3840 } else { 3854 } else {
3841 if (qla82xx_check_fw_alive(vha)) { 3855 if (qla82xx_check_fw_alive(vha)) {
3856 ql_dbg(ql_dbg_timer, vha, 0x6011,
3857 "disabling pause transmit on port 0 & 1.\n");
3858 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
3859 CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1);
3842 halt_status = qla82xx_rd_32(ha, 3860 halt_status = qla82xx_rd_32(ha,
3843 QLA82XX_PEG_HALT_STATUS1); 3861 QLA82XX_PEG_HALT_STATUS1);
3844 ql_dbg(ql_dbg_timer, vha, 0x6005, 3862 ql_log(ql_log_info, vha, 0x6005,
3845 "dumping hw/fw registers:.\n " 3863 "dumping hw/fw registers:.\n "
3846 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n " 3864 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
3847 " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n " 3865 " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
@@ -3858,6 +3876,11 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3858 QLA82XX_CRB_PEG_NET_3 + 0x3c), 3876 QLA82XX_CRB_PEG_NET_3 + 0x3c),
3859 qla82xx_rd_32(ha, 3877 qla82xx_rd_32(ha,
3860 QLA82XX_CRB_PEG_NET_4 + 0x3c)); 3878 QLA82XX_CRB_PEG_NET_4 + 0x3c));
3879 if (LSW(MSB(halt_status)) == 0x67)
3880 ql_log(ql_log_warn, vha, 0xb052,
3881 "Firmware aborted with "
3882 "error code 0x00006700. Device is "
3883 "being reset.\n");
3861 if (halt_status & HALT_STATUS_UNRECOVERABLE) { 3884 if (halt_status & HALT_STATUS_UNRECOVERABLE) {
3862 set_bit(ISP_UNRECOVERABLE, 3885 set_bit(ISP_UNRECOVERABLE,
3863 &vha->dpc_flags); 3886 &vha->dpc_flags);
@@ -3869,16 +3892,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3869 } 3892 }
3870 qla2xxx_wake_dpc(vha); 3893 qla2xxx_wake_dpc(vha);
3871 ha->flags.isp82xx_fw_hung = 1; 3894 ha->flags.isp82xx_fw_hung = 1;
3872 if (ha->flags.mbox_busy) { 3895 ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
3873 ha->flags.mbox_int = 1; 3896 qla82xx_clear_pending_mbx(vha);
3874 ql_log(ql_log_warn, vha, 0x6007,
3875 "Due to FW hung, doing "
3876 "premature completion of mbx "
3877 "command.\n");
3878 if (test_bit(MBX_INTR_WAIT,
3879 &ha->mbx_cmd_flags))
3880 complete(&ha->mbx_intr_comp);
3881 }
3882 } 3897 }
3883 } 3898 }
3884 } 3899 }
@@ -4073,10 +4088,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
4073 msleep(1000); 4088 msleep(1000);
4074 if (qla82xx_check_fw_alive(vha)) { 4089 if (qla82xx_check_fw_alive(vha)) {
4075 ha->flags.isp82xx_fw_hung = 1; 4090 ha->flags.isp82xx_fw_hung = 1;
4076 if (ha->flags.mbox_busy) { 4091 qla82xx_clear_pending_mbx(vha);
4077 ha->flags.mbox_int = 1;
4078 complete(&ha->mbx_intr_comp);
4079 }
4080 break; 4092 break;
4081 } 4093 }
4082 } 4094 }
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 57820c199bc2..57a226be339a 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1173,4 +1173,8 @@ struct qla82xx_md_entry_queue {
1173 1173
1174static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, 1174static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
1175 0x410000B8, 0x410000BC }; 1175 0x410000B8, 0x410000BC };
1176
1177#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
1178#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
1179
1176#endif 1180#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index fd14c7bfc626..f9e5b85e84d8 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -201,12 +201,12 @@ MODULE_PARM_DESC(ql2xmdcapmask,
201 "Set the Minidump driver capture mask level. " 201 "Set the Minidump driver capture mask level. "
202 "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F."); 202 "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
203 203
204int ql2xmdenable; 204int ql2xmdenable = 1;
205module_param(ql2xmdenable, int, S_IRUGO); 205module_param(ql2xmdenable, int, S_IRUGO);
206MODULE_PARM_DESC(ql2xmdenable, 206MODULE_PARM_DESC(ql2xmdenable,
207 "Enable/disable MiniDump. " 207 "Enable/disable MiniDump. "
208 "0 (Default) - MiniDump disabled. " 208 "0 - MiniDump disabled. "
209 "1 - MiniDump enabled."); 209 "1 (Default) - MiniDump enabled.");
210 210
211/* 211/*
212 * SCSI host template entry points 212 * SCSI host template entry points
@@ -423,6 +423,7 @@ fail2:
423 qla25xx_delete_queues(vha); 423 qla25xx_delete_queues(vha);
424 destroy_workqueue(ha->wq); 424 destroy_workqueue(ha->wq);
425 ha->wq = NULL; 425 ha->wq = NULL;
426 vha->req = ha->req_q_map[0];
426fail: 427fail:
427 ha->mqenable = 0; 428 ha->mqenable = 0;
428 kfree(ha->req_q_map); 429 kfree(ha->req_q_map);
@@ -814,49 +815,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
814 return return_status; 815 return return_status;
815} 816}
816 817
817/*
818 * qla2x00_wait_for_loop_ready
819 * Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
820 * to be in LOOP_READY state.
821 * Input:
822 * ha - pointer to host adapter structure
823 *
824 * Note:
825 * Does context switching-Release SPIN_LOCK
826 * (if any) before calling this routine.
827 *
828 *
829 * Return:
830 * Success (LOOP_READY) : 0
831 * Failed (LOOP_NOT_READY) : 1
832 */
833static inline int
834qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
835{
836 int return_status = QLA_SUCCESS;
837 unsigned long loop_timeout ;
838 struct qla_hw_data *ha = vha->hw;
839 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
840
841 /* wait for 5 min at the max for loop to be ready */
842 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
843
844 while ((!atomic_read(&base_vha->loop_down_timer) &&
845 atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
846 atomic_read(&base_vha->loop_state) != LOOP_READY) {
847 if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
848 return_status = QLA_FUNCTION_FAILED;
849 break;
850 }
851 msleep(1000);
852 if (time_after_eq(jiffies, loop_timeout)) {
853 return_status = QLA_FUNCTION_FAILED;
854 break;
855 }
856 }
857 return (return_status);
858}
859
860static void 818static void
861sp_get(struct srb *sp) 819sp_get(struct srb *sp)
862{ 820{
@@ -1035,12 +993,6 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
1035 "Wait for hba online failed for cmd=%p.\n", cmd); 993 "Wait for hba online failed for cmd=%p.\n", cmd);
1036 goto eh_reset_failed; 994 goto eh_reset_failed;
1037 } 995 }
1038 err = 1;
1039 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
1040 ql_log(ql_log_warn, vha, 0x800b,
1041 "Wait for loop ready failed for cmd=%p.\n", cmd);
1042 goto eh_reset_failed;
1043 }
1044 err = 2; 996 err = 2;
1045 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1) 997 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
1046 != QLA_SUCCESS) { 998 != QLA_SUCCESS) {
@@ -1137,10 +1089,9 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1137 goto eh_bus_reset_done; 1089 goto eh_bus_reset_done;
1138 } 1090 }
1139 1091
1140 if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) { 1092 if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
1141 if (qla2x00_loop_reset(vha) == QLA_SUCCESS) 1093 ret = SUCCESS;
1142 ret = SUCCESS; 1094
1143 }
1144 if (ret == FAILED) 1095 if (ret == FAILED)
1145 goto eh_bus_reset_done; 1096 goto eh_bus_reset_done;
1146 1097
@@ -1206,15 +1157,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1206 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS) 1157 if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
1207 goto eh_host_reset_lock; 1158 goto eh_host_reset_lock;
1208 1159
1209 /*
1210 * Fixme-may be dpc thread is active and processing
1211 * loop_resync,so wait a while for it to
1212 * be completed and then issue big hammer.Otherwise
1213 * it may cause I/O failure as big hammer marks the
1214 * devices as lost kicking of the port_down_timer
1215 * while dpc is stuck for the mailbox to complete.
1216 */
1217 qla2x00_wait_for_loop_ready(vha);
1218 if (vha != base_vha) { 1160 if (vha != base_vha) {
1219 if (qla2x00_vp_abort_isp(vha)) 1161 if (qla2x00_vp_abort_isp(vha))
1220 goto eh_host_reset_lock; 1162 goto eh_host_reset_lock;
@@ -1297,16 +1239,13 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1297 atomic_set(&vha->loop_state, LOOP_DOWN); 1239 atomic_set(&vha->loop_state, LOOP_DOWN);
1298 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1240 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1299 qla2x00_mark_all_devices_lost(vha, 0); 1241 qla2x00_mark_all_devices_lost(vha, 0);
1300 qla2x00_wait_for_loop_ready(vha);
1301 } 1242 }
1302 1243
1303 if (ha->flags.enable_lip_reset) { 1244 if (ha->flags.enable_lip_reset) {
1304 ret = qla2x00_lip_reset(vha); 1245 ret = qla2x00_lip_reset(vha);
1305 if (ret != QLA_SUCCESS) { 1246 if (ret != QLA_SUCCESS)
1306 ql_dbg(ql_dbg_taskm, vha, 0x802e, 1247 ql_dbg(ql_dbg_taskm, vha, 0x802e,
1307 "lip_reset failed (%d).\n", ret); 1248 "lip_reset failed (%d).\n", ret);
1308 } else
1309 qla2x00_wait_for_loop_ready(vha);
1310 } 1249 }
1311 1250
1312 /* Issue marker command only when we are going to start the I/O */ 1251 /* Issue marker command only when we are going to start the I/O */
@@ -4070,13 +4009,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
4070 /* For ISP82XX complete any pending mailbox cmd */ 4009 /* For ISP82XX complete any pending mailbox cmd */
4071 if (IS_QLA82XX(ha)) { 4010 if (IS_QLA82XX(ha)) {
4072 ha->flags.isp82xx_fw_hung = 1; 4011 ha->flags.isp82xx_fw_hung = 1;
4073 if (ha->flags.mbox_busy) { 4012 ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
4074 ha->flags.mbox_int = 1; 4013 qla82xx_clear_pending_mbx(vha);
4075 ql_dbg(ql_dbg_aer, vha, 0x9001,
4076 "Due to pci channel io frozen, doing premature "
4077 "completion of mbx command.\n");
4078 complete(&ha->mbx_intr_comp);
4079 }
4080 } 4014 }
4081 qla2x00_free_irqs(vha); 4015 qla2x00_free_irqs(vha);
4082 pci_disable_device(pdev); 4016 pci_disable_device(pdev);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 13b6357c1fa2..23f33a6d52d7 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.07.07-k" 10#define QLA2XXX_VERSION "8.03.07.12-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index ace637bf254e..fd5edc6e166d 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -147,7 +147,7 @@
147#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alias name size */ 147#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alias name size */
148#define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */ 148#define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */
149 149
150#define QL4_SESS_RECOVERY_TMO 30 /* iSCSI session */ 150#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */
151 /* recovery timeout */ 151 /* recovery timeout */
152 152
153#define LSDW(x) ((u32)((u64)(x))) 153#define LSDW(x) ((u32)((u64)(x)))
@@ -173,6 +173,8 @@
173#define ISNS_DEREG_TOV 5 173#define ISNS_DEREG_TOV 5
174#define HBA_ONLINE_TOV 30 174#define HBA_ONLINE_TOV 30
175#define DISABLE_ACB_TOV 30 175#define DISABLE_ACB_TOV 30
176#define IP_CONFIG_TOV 30
177#define LOGIN_TOV 12
176 178
177#define MAX_RESET_HA_RETRIES 2 179#define MAX_RESET_HA_RETRIES 2
178 180
@@ -240,6 +242,45 @@ struct ddb_entry {
240 242
241 uint16_t fw_ddb_index; /* DDB firmware index */ 243 uint16_t fw_ddb_index; /* DDB firmware index */
242 uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */ 244 uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
245 uint16_t ddb_type;
246#define FLASH_DDB 0x01
247
248 struct dev_db_entry fw_ddb_entry;
249 int (*unblock_sess)(struct iscsi_cls_session *cls_session);
250 int (*ddb_change)(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
251 struct ddb_entry *ddb_entry, uint32_t state);
252
253 /* Driver Re-login */
254 unsigned long flags; /* DDB Flags */
255 uint16_t default_relogin_timeout; /* Max time to wait for
256 * relogin to complete */
257 atomic_t retry_relogin_timer; /* Min Time between relogins
258 * (4000 only) */
259 atomic_t relogin_timer; /* Max Time to wait for
260 * relogin to complete */
261 atomic_t relogin_retry_count; /* Num of times relogin has been
262 * retried */
263 uint32_t default_time2wait; /* Default Min time between
264 * relogins (+aens) */
265
266};
267
268struct qla_ddb_index {
269 struct list_head list;
270 uint16_t fw_ddb_idx;
271 struct dev_db_entry fw_ddb;
272};
273
274#define DDB_IPADDR_LEN 64
275
276struct ql4_tuple_ddb {
277 int port;
278 int tpgt;
279 char ip_addr[DDB_IPADDR_LEN];
280 char iscsi_name[ISCSI_NAME_SIZE];
281 uint16_t options;
282#define DDB_OPT_IPV6 0x0e0e
283#define DDB_OPT_IPV4 0x0f0f
243}; 284};
244 285
245/* 286/*
@@ -411,7 +452,7 @@ struct scsi_qla_host {
411#define AF_FW_RECOVERY 19 /* 0x00080000 */ 452#define AF_FW_RECOVERY 19 /* 0x00080000 */
412#define AF_EEH_BUSY 20 /* 0x00100000 */ 453#define AF_EEH_BUSY 20 /* 0x00100000 */
413#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */ 454#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
414 455#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
415 unsigned long dpc_flags; 456 unsigned long dpc_flags;
416 457
417#define DPC_RESET_HA 1 /* 0x00000002 */ 458#define DPC_RESET_HA 1 /* 0x00000002 */
@@ -604,6 +645,7 @@ struct scsi_qla_host {
604 uint16_t bootload_minor; 645 uint16_t bootload_minor;
605 uint16_t bootload_patch; 646 uint16_t bootload_patch;
606 uint16_t bootload_build; 647 uint16_t bootload_build;
648 uint16_t def_timeout; /* Default login timeout */
607 649
608 uint32_t flash_state; 650 uint32_t flash_state;
609#define QLFLASH_WAITING 0 651#define QLFLASH_WAITING 0
@@ -623,6 +665,11 @@ struct scsi_qla_host {
623 uint16_t iscsi_pci_func_cnt; 665 uint16_t iscsi_pci_func_cnt;
624 uint8_t model_name[16]; 666 uint8_t model_name[16];
625 struct completion disable_acb_comp; 667 struct completion disable_acb_comp;
668 struct dma_pool *fw_ddb_dma_pool;
669#define DDB_DMA_BLOCK_SIZE 512
670 uint16_t pri_ddb_idx;
671 uint16_t sec_ddb_idx;
672 int is_reset;
626}; 673};
627 674
628struct ql4_task_data { 675struct ql4_task_data {
@@ -835,6 +882,10 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
835/*---------------------------------------------------------------------------*/ 882/*---------------------------------------------------------------------------*/
836 883
837/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */ 884/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
885
886#define INIT_ADAPTER 0
887#define RESET_ADAPTER 1
888
838#define PRESERVE_DDB_LIST 0 889#define PRESERVE_DDB_LIST 0
839#define REBUILD_DDB_LIST 1 890#define REBUILD_DDB_LIST 1
840 891
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index cbd5a20dbbd1..4ac07f882521 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -12,6 +12,7 @@
12#define MAX_PRST_DEV_DB_ENTRIES 64 12#define MAX_PRST_DEV_DB_ENTRIES 64
13#define MIN_DISC_DEV_DB_ENTRY MAX_PRST_DEV_DB_ENTRIES 13#define MIN_DISC_DEV_DB_ENTRY MAX_PRST_DEV_DB_ENTRIES
14#define MAX_DEV_DB_ENTRIES 512 14#define MAX_DEV_DB_ENTRIES 512
15#define MAX_DEV_DB_ENTRIES_40XX 256
15 16
16/************************************************************************* 17/*************************************************************************
17 * 18 *
@@ -604,6 +605,13 @@ struct addr_ctrl_blk {
604 uint8_t res14[140]; /* 274-2FF */ 605 uint8_t res14[140]; /* 274-2FF */
605}; 606};
606 607
608#define IP_ADDR_COUNT 4 /* Total 4 IP address supported in one interface
609 * One IPv4, one IPv6 link local and 2 IPv6
610 */
611
612#define IP_STATE_MASK 0x0F000000
613#define IP_STATE_SHIFT 24
614
607struct init_fw_ctrl_blk { 615struct init_fw_ctrl_blk {
608 struct addr_ctrl_blk pri; 616 struct addr_ctrl_blk pri;
609/* struct addr_ctrl_blk sec;*/ 617/* struct addr_ctrl_blk sec;*/
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 160db9d5ea21..d0dd4b330206 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -13,7 +13,7 @@ struct iscsi_cls_conn;
13int qla4xxx_hw_reset(struct scsi_qla_host *ha); 13int qla4xxx_hw_reset(struct scsi_qla_host *ha);
14int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a); 14int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
15int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb); 15int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
16int qla4xxx_initialize_adapter(struct scsi_qla_host *ha); 16int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset);
17int qla4xxx_soft_reset(struct scsi_qla_host *ha); 17int qla4xxx_soft_reset(struct scsi_qla_host *ha);
18irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id); 18irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
19 19
@@ -153,10 +153,13 @@ int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
153 uint32_t *mbx_sts); 153 uint32_t *mbx_sts);
154int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index); 154int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index);
155int qla4xxx_send_passthru0(struct iscsi_task *task); 155int qla4xxx_send_passthru0(struct iscsi_task *task);
156void qla4xxx_free_ddb_index(struct scsi_qla_host *ha);
156int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index, 157int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
157 uint16_t stats_size, dma_addr_t stats_dma); 158 uint16_t stats_size, dma_addr_t stats_dma);
158void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, 159void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
159 struct ddb_entry *ddb_entry); 160 struct ddb_entry *ddb_entry);
161void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
162 struct ddb_entry *ddb_entry);
160int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha, 163int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
161 struct dev_db_entry *fw_ddb_entry, 164 struct dev_db_entry *fw_ddb_entry,
162 dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index); 165 dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
@@ -169,11 +172,22 @@ int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
169int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha, 172int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
170 uint32_t region, uint32_t field0, 173 uint32_t region, uint32_t field0,
171 uint32_t field1); 174 uint32_t field1);
175int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index);
176void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session);
177int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session);
178int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session);
179int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
180 struct ddb_entry *ddb_entry, uint32_t state);
181int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
182 struct ddb_entry *ddb_entry, uint32_t state);
183void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
172 184
173/* BSG Functions */ 185/* BSG Functions */
174int qla4xxx_bsg_request(struct bsg_job *bsg_job); 186int qla4xxx_bsg_request(struct bsg_job *bsg_job);
175int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job); 187int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
176 188
189void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
190
177extern int ql4xextended_error_logging; 191extern int ql4xextended_error_logging;
178extern int ql4xdontresethba; 192extern int ql4xdontresethba;
179extern int ql4xenablemsix; 193extern int ql4xenablemsix;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 3075fbaef553..1bdfa8120ac8 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -773,22 +773,24 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
773 * be freed so that when login happens from user space there are free DDB 773 * be freed so that when login happens from user space there are free DDB
774 * indices available. 774 * indices available.
775 **/ 775 **/
776static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha) 776void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
777{ 777{
778 int max_ddbs; 778 int max_ddbs;
779 int ret; 779 int ret;
780 uint32_t idx = 0, next_idx = 0; 780 uint32_t idx = 0, next_idx = 0;
781 uint32_t state = 0, conn_err = 0; 781 uint32_t state = 0, conn_err = 0;
782 782
783 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : 783 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
784 MAX_DEV_DB_ENTRIES; 784 MAX_DEV_DB_ENTRIES;
785 785
786 for (idx = 0; idx < max_ddbs; idx = next_idx) { 786 for (idx = 0; idx < max_ddbs; idx = next_idx) {
787 ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL, 787 ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
788 &next_idx, &state, &conn_err, 788 &next_idx, &state, &conn_err,
789 NULL, NULL); 789 NULL, NULL);
790 if (ret == QLA_ERROR) 790 if (ret == QLA_ERROR) {
791 next_idx++;
791 continue; 792 continue;
793 }
792 if (state == DDB_DS_NO_CONNECTION_ACTIVE || 794 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
793 state == DDB_DS_SESSION_FAILED) { 795 state == DDB_DS_SESSION_FAILED) {
794 DEBUG2(ql4_printk(KERN_INFO, ha, 796 DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -804,7 +806,6 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
804 } 806 }
805} 807}
806 808
807
808/** 809/**
809 * qla4xxx_initialize_adapter - initiailizes hba 810 * qla4xxx_initialize_adapter - initiailizes hba
810 * @ha: Pointer to host adapter structure. 811 * @ha: Pointer to host adapter structure.
@@ -812,7 +813,7 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
812 * This routine parforms all of the steps necessary to initialize the adapter. 813 * This routine parforms all of the steps necessary to initialize the adapter.
813 * 814 *
814 **/ 815 **/
815int qla4xxx_initialize_adapter(struct scsi_qla_host *ha) 816int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
816{ 817{
817 int status = QLA_ERROR; 818 int status = QLA_ERROR;
818 819
@@ -840,7 +841,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
840 if (status == QLA_ERROR) 841 if (status == QLA_ERROR)
841 goto exit_init_hba; 842 goto exit_init_hba;
842 843
843 qla4xxx_free_ddb_index(ha); 844 if (is_reset == RESET_ADAPTER)
845 qla4xxx_build_ddb_list(ha, is_reset);
844 846
845 set_bit(AF_ONLINE, &ha->flags); 847 set_bit(AF_ONLINE, &ha->flags);
846exit_init_hba: 848exit_init_hba:
@@ -855,38 +857,12 @@ exit_init_hba:
855 return status; 857 return status;
856} 858}
857 859
858/** 860int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
859 * qla4xxx_process_ddb_changed - process ddb state change 861 struct ddb_entry *ddb_entry, uint32_t state)
860 * @ha - Pointer to host adapter structure.
861 * @fw_ddb_index - Firmware's device database index
862 * @state - Device state
863 *
864 * This routine processes a Decive Database Changed AEN Event.
865 **/
866int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
867 uint32_t state, uint32_t conn_err)
868{ 862{
869 struct ddb_entry * ddb_entry;
870 uint32_t old_fw_ddb_device_state; 863 uint32_t old_fw_ddb_device_state;
871 int status = QLA_ERROR; 864 int status = QLA_ERROR;
872 865
873 /* check for out of range index */
874 if (fw_ddb_index >= MAX_DDB_ENTRIES)
875 goto exit_ddb_event;
876
877 /* Get the corresponging ddb entry */
878 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
879 /* Device does not currently exist in our database. */
880 if (ddb_entry == NULL) {
881 ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
882 __func__, fw_ddb_index);
883
884 if (state == DDB_DS_NO_CONNECTION_ACTIVE)
885 clear_bit(fw_ddb_index, ha->ddb_idx_map);
886
887 goto exit_ddb_event;
888 }
889
890 old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state; 866 old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
891 DEBUG2(ql4_printk(KERN_INFO, ha, 867 DEBUG2(ql4_printk(KERN_INFO, ha,
892 "%s: DDB - old state = 0x%x, new state = 0x%x for " 868 "%s: DDB - old state = 0x%x, new state = 0x%x for "
@@ -900,9 +876,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
900 switch (state) { 876 switch (state) {
901 case DDB_DS_SESSION_ACTIVE: 877 case DDB_DS_SESSION_ACTIVE:
902 case DDB_DS_DISCOVERY: 878 case DDB_DS_DISCOVERY:
903 iscsi_conn_start(ddb_entry->conn); 879 ddb_entry->unblock_sess(ddb_entry->sess);
904 iscsi_conn_login_event(ddb_entry->conn,
905 ISCSI_CONN_STATE_LOGGED_IN);
906 qla4xxx_update_session_conn_param(ha, ddb_entry); 880 qla4xxx_update_session_conn_param(ha, ddb_entry);
907 status = QLA_SUCCESS; 881 status = QLA_SUCCESS;
908 break; 882 break;
@@ -936,9 +910,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
936 switch (state) { 910 switch (state) {
937 case DDB_DS_SESSION_ACTIVE: 911 case DDB_DS_SESSION_ACTIVE:
938 case DDB_DS_DISCOVERY: 912 case DDB_DS_DISCOVERY:
939 iscsi_conn_start(ddb_entry->conn); 913 ddb_entry->unblock_sess(ddb_entry->sess);
940 iscsi_conn_login_event(ddb_entry->conn,
941 ISCSI_CONN_STATE_LOGGED_IN);
942 qla4xxx_update_session_conn_param(ha, ddb_entry); 914 qla4xxx_update_session_conn_param(ha, ddb_entry);
943 status = QLA_SUCCESS; 915 status = QLA_SUCCESS;
944 break; 916 break;
@@ -954,7 +926,198 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
954 __func__)); 926 __func__));
955 break; 927 break;
956 } 928 }
929 return status;
930}
931
932void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry)
933{
934 /*
935 * This triggers a relogin. After the relogin_timer
936 * expires, the relogin gets scheduled. We must wait a
937 * minimum amount of time since receiving an 0x8014 AEN
938 * with failed device_state or a logout response before
939 * we can issue another relogin.
940 *
941 * Firmware pads this timeout: (time2wait +1).
942 * Driver retry to login should be longer than F/W.
943 * Otherwise F/W will fail
944 * set_ddb() mbx cmd with 0x4005 since it still
945 * counting down its time2wait.
946 */
947 atomic_set(&ddb_entry->relogin_timer, 0);
948 atomic_set(&ddb_entry->retry_relogin_timer,
949 ddb_entry->default_time2wait + 4);
950
951}
952
953int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
954 struct ddb_entry *ddb_entry, uint32_t state)
955{
956 uint32_t old_fw_ddb_device_state;
957 int status = QLA_ERROR;
958
959 old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
960 DEBUG2(ql4_printk(KERN_INFO, ha,
961 "%s: DDB - old state = 0x%x, new state = 0x%x for "
962 "index [%d]\n", __func__,
963 ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
964
965 ddb_entry->fw_ddb_device_state = state;
966
967 switch (old_fw_ddb_device_state) {
968 case DDB_DS_LOGIN_IN_PROCESS:
969 case DDB_DS_NO_CONNECTION_ACTIVE:
970 switch (state) {
971 case DDB_DS_SESSION_ACTIVE:
972 ddb_entry->unblock_sess(ddb_entry->sess);
973 qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
974 status = QLA_SUCCESS;
975 break;
976 case DDB_DS_SESSION_FAILED:
977 iscsi_block_session(ddb_entry->sess);
978 if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
979 qla4xxx_arm_relogin_timer(ddb_entry);
980 status = QLA_SUCCESS;
981 break;
982 }
983 break;
984 case DDB_DS_SESSION_ACTIVE:
985 switch (state) {
986 case DDB_DS_SESSION_FAILED:
987 iscsi_block_session(ddb_entry->sess);
988 if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
989 qla4xxx_arm_relogin_timer(ddb_entry);
990 status = QLA_SUCCESS;
991 break;
992 }
993 break;
994 case DDB_DS_SESSION_FAILED:
995 switch (state) {
996 case DDB_DS_SESSION_ACTIVE:
997 ddb_entry->unblock_sess(ddb_entry->sess);
998 qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
999 status = QLA_SUCCESS;
1000 break;
1001 case DDB_DS_SESSION_FAILED:
1002 if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
1003 qla4xxx_arm_relogin_timer(ddb_entry);
1004 status = QLA_SUCCESS;
1005 break;
1006 }
1007 break;
1008 default:
1009 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
1010 __func__));
1011 break;
1012 }
1013 return status;
1014}
1015
1016/**
1017 * qla4xxx_process_ddb_changed - process ddb state change
1018 * @ha - Pointer to host adapter structure.
1019 * @fw_ddb_index - Firmware's device database index
1020 * @state - Device state
1021 *
1022 * This routine processes a Decive Database Changed AEN Event.
1023 **/
1024int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
1025 uint32_t fw_ddb_index,
1026 uint32_t state, uint32_t conn_err)
1027{
1028 struct ddb_entry *ddb_entry;
1029 int status = QLA_ERROR;
1030
1031 /* check for out of range index */
1032 if (fw_ddb_index >= MAX_DDB_ENTRIES)
1033 goto exit_ddb_event;
1034
1035 /* Get the corresponging ddb entry */
1036 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
1037 /* Device does not currently exist in our database. */
1038 if (ddb_entry == NULL) {
1039 ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
1040 __func__, fw_ddb_index);
1041
1042 if (state == DDB_DS_NO_CONNECTION_ACTIVE)
1043 clear_bit(fw_ddb_index, ha->ddb_idx_map);
1044
1045 goto exit_ddb_event;
1046 }
1047
1048 ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
957 1049
958exit_ddb_event: 1050exit_ddb_event:
959 return status; 1051 return status;
960} 1052}
1053
1054/**
1055 * qla4xxx_login_flash_ddb - Login to target (DDB)
1056 * @cls_session: Pointer to the session to login
1057 *
1058 * This routine logins to the target.
1059 * Issues setddb and conn open mbx
1060 **/
1061void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session)
1062{
1063 struct iscsi_session *sess;
1064 struct ddb_entry *ddb_entry;
1065 struct scsi_qla_host *ha;
1066 struct dev_db_entry *fw_ddb_entry = NULL;
1067 dma_addr_t fw_ddb_dma;
1068 uint32_t mbx_sts = 0;
1069 int ret;
1070
1071 sess = cls_session->dd_data;
1072 ddb_entry = sess->dd_data;
1073 ha = ddb_entry->ha;
1074
1075 if (!test_bit(AF_LINK_UP, &ha->flags))
1076 return;
1077
1078 if (ddb_entry->ddb_type != FLASH_DDB) {
1079 DEBUG2(ql4_printk(KERN_INFO, ha,
1080 "Skipping login to non FLASH DB"));
1081 goto exit_login;
1082 }
1083
1084 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
1085 &fw_ddb_dma);
1086 if (fw_ddb_entry == NULL) {
1087 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
1088 goto exit_login;
1089 }
1090
1091 if (ddb_entry->fw_ddb_index == INVALID_ENTRY) {
1092 ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index);
1093 if (ret == QLA_ERROR)
1094 goto exit_login;
1095
1096 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1097 ha->tot_ddbs++;
1098 }
1099
1100 memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry,
1101 sizeof(struct dev_db_entry));
1102 ddb_entry->sess->target_id = ddb_entry->fw_ddb_index;
1103
1104 ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
1105 fw_ddb_dma, &mbx_sts);
1106 if (ret == QLA_ERROR) {
1107 DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n"));
1108 goto exit_login;
1109 }
1110
1111 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1112 ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1113 if (ret == QLA_ERROR) {
1114 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1115 sess->targetname);
1116 goto exit_login;
1117 }
1118
1119exit_login:
1120 if (fw_ddb_entry)
1121 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
1122}
1123
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 4c2b84870392..c2593782fbbe 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -41,6 +41,16 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
41 return status; 41 return status;
42 } 42 }
43 43
44 if (is_qla40XX(ha)) {
45 if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
46 DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
47 "prematurely completing mbx cmd as "
48 "adapter removal detected\n",
49 ha->host_no, __func__));
50 return status;
51 }
52 }
53
44 if (is_qla8022(ha)) { 54 if (is_qla8022(ha)) {
45 if (test_bit(AF_FW_RECOVERY, &ha->flags)) { 55 if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
46 DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: " 56 DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
@@ -413,6 +423,7 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
413 memcpy(ha->name_string, init_fw_cb->iscsi_name, 423 memcpy(ha->name_string, init_fw_cb->iscsi_name,
414 min(sizeof(ha->name_string), 424 min(sizeof(ha->name_string),
415 sizeof(init_fw_cb->iscsi_name))); 425 sizeof(init_fw_cb->iscsi_name)));
426 ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
416 /*memcpy(ha->alias, init_fw_cb->Alias, 427 /*memcpy(ha->alias, init_fw_cb->Alias,
417 min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/ 428 min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
418 429
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 30f31b127f33..4169c8baa112 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -8,6 +8,7 @@
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/blkdev.h> 9#include <linux/blkdev.h>
10#include <linux/iscsi_boot_sysfs.h> 10#include <linux/iscsi_boot_sysfs.h>
11#include <linux/inet.h>
11 12
12#include <scsi/scsi_tcq.h> 13#include <scsi/scsi_tcq.h>
13#include <scsi/scsicam.h> 14#include <scsi/scsicam.h>
@@ -31,6 +32,13 @@ static struct kmem_cache *srb_cachep;
31/* 32/*
32 * Module parameter information and variables 33 * Module parameter information and variables
33 */ 34 */
35int ql4xdisablesysfsboot = 1;
36module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(ql4xdisablesysfsboot,
38 "Set to disable exporting boot targets to sysfs\n"
39 " 0 - Export boot targets\n"
40 " 1 - Do not export boot targets (Default)");
41
34int ql4xdontresethba = 0; 42int ql4xdontresethba = 0;
35module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); 43module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
36MODULE_PARM_DESC(ql4xdontresethba, 44MODULE_PARM_DESC(ql4xdontresethba,
@@ -63,7 +71,7 @@ static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
63module_param(ql4xsess_recovery_tmo, int, S_IRUGO); 71module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
64MODULE_PARM_DESC(ql4xsess_recovery_tmo, 72MODULE_PARM_DESC(ql4xsess_recovery_tmo,
65 "Target Session Recovery Timeout.\n" 73 "Target Session Recovery Timeout.\n"
66 " Default: 30 sec."); 74 " Default: 120 sec.");
67 75
68static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); 76static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
69/* 77/*
@@ -415,7 +423,7 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
415 qla_ep = ep->dd_data; 423 qla_ep = ep->dd_data;
416 ha = to_qla_host(qla_ep->host); 424 ha = to_qla_host(qla_ep->host);
417 425
418 if (adapter_up(ha)) 426 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
419 ret = 1; 427 ret = 1;
420 428
421 return ret; 429 return ret;
@@ -975,6 +983,150 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
975 983
976} 984}
977 985
986int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
987{
988 uint32_t mbx_sts = 0;
989 uint16_t tmp_ddb_index;
990 int ret;
991
992get_ddb_index:
993 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
994
995 if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
996 DEBUG2(ql4_printk(KERN_INFO, ha,
997 "Free DDB index not available\n"));
998 ret = QLA_ERROR;
999 goto exit_get_ddb_index;
1000 }
1001
1002 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
1003 goto get_ddb_index;
1004
1005 DEBUG2(ql4_printk(KERN_INFO, ha,
1006 "Found a free DDB index at %d\n", tmp_ddb_index));
1007 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
1008 if (ret == QLA_ERROR) {
1009 if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1010 ql4_printk(KERN_INFO, ha,
1011 "DDB index = %d not available trying next\n",
1012 tmp_ddb_index);
1013 goto get_ddb_index;
1014 }
1015 DEBUG2(ql4_printk(KERN_INFO, ha,
1016 "Free FW DDB not available\n"));
1017 }
1018
1019 *ddb_index = tmp_ddb_index;
1020
1021exit_get_ddb_index:
1022 return ret;
1023}
1024
1025static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
1026 struct ddb_entry *ddb_entry,
1027 char *existing_ipaddr,
1028 char *user_ipaddr)
1029{
1030 uint8_t dst_ipaddr[IPv6_ADDR_LEN];
1031 char formatted_ipaddr[DDB_IPADDR_LEN];
1032 int status = QLA_SUCCESS, ret = 0;
1033
1034 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
1035 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1036 '\0', NULL);
1037 if (ret == 0) {
1038 status = QLA_ERROR;
1039 goto out_match;
1040 }
1041 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
1042 } else {
1043 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1044 '\0', NULL);
1045 if (ret == 0) {
1046 status = QLA_ERROR;
1047 goto out_match;
1048 }
1049 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
1050 }
1051
1052 if (strcmp(existing_ipaddr, formatted_ipaddr))
1053 status = QLA_ERROR;
1054
1055out_match:
1056 return status;
1057}
1058
1059static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
1060 struct iscsi_cls_conn *cls_conn)
1061{
1062 int idx = 0, max_ddbs, rval;
1063 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1064 struct iscsi_session *sess, *existing_sess;
1065 struct iscsi_conn *conn, *existing_conn;
1066 struct ddb_entry *ddb_entry;
1067
1068 sess = cls_sess->dd_data;
1069 conn = cls_conn->dd_data;
1070
1071 if (sess->targetname == NULL ||
1072 conn->persistent_address == NULL ||
1073 conn->persistent_port == 0)
1074 return QLA_ERROR;
1075
1076 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
1077 MAX_DEV_DB_ENTRIES;
1078
1079 for (idx = 0; idx < max_ddbs; idx++) {
1080 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
1081 if (ddb_entry == NULL)
1082 continue;
1083
1084 if (ddb_entry->ddb_type != FLASH_DDB)
1085 continue;
1086
1087 existing_sess = ddb_entry->sess->dd_data;
1088 existing_conn = ddb_entry->conn->dd_data;
1089
1090 if (existing_sess->targetname == NULL ||
1091 existing_conn->persistent_address == NULL ||
1092 existing_conn->persistent_port == 0)
1093 continue;
1094
1095 DEBUG2(ql4_printk(KERN_INFO, ha,
1096 "IQN = %s User IQN = %s\n",
1097 existing_sess->targetname,
1098 sess->targetname));
1099
1100 DEBUG2(ql4_printk(KERN_INFO, ha,
1101 "IP = %s User IP = %s\n",
1102 existing_conn->persistent_address,
1103 conn->persistent_address));
1104
1105 DEBUG2(ql4_printk(KERN_INFO, ha,
1106 "Port = %d User Port = %d\n",
1107 existing_conn->persistent_port,
1108 conn->persistent_port));
1109
1110 if (strcmp(existing_sess->targetname, sess->targetname))
1111 continue;
1112 rval = qla4xxx_match_ipaddress(ha, ddb_entry,
1113 existing_conn->persistent_address,
1114 conn->persistent_address);
1115 if (rval == QLA_ERROR)
1116 continue;
1117 if (existing_conn->persistent_port != conn->persistent_port)
1118 continue;
1119 break;
1120 }
1121
1122 if (idx == max_ddbs)
1123 return QLA_ERROR;
1124
1125 DEBUG2(ql4_printk(KERN_INFO, ha,
1126 "Match found in fwdb sessions\n"));
1127 return QLA_SUCCESS;
1128}
1129
978static struct iscsi_cls_session * 1130static struct iscsi_cls_session *
979qla4xxx_session_create(struct iscsi_endpoint *ep, 1131qla4xxx_session_create(struct iscsi_endpoint *ep,
980 uint16_t cmds_max, uint16_t qdepth, 1132 uint16_t cmds_max, uint16_t qdepth,
@@ -984,8 +1136,7 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
984 struct scsi_qla_host *ha; 1136 struct scsi_qla_host *ha;
985 struct qla_endpoint *qla_ep; 1137 struct qla_endpoint *qla_ep;
986 struct ddb_entry *ddb_entry; 1138 struct ddb_entry *ddb_entry;
987 uint32_t ddb_index; 1139 uint16_t ddb_index;
988 uint32_t mbx_sts = 0;
989 struct iscsi_session *sess; 1140 struct iscsi_session *sess;
990 struct sockaddr *dst_addr; 1141 struct sockaddr *dst_addr;
991 int ret; 1142 int ret;
@@ -1000,32 +1151,9 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
1000 dst_addr = (struct sockaddr *)&qla_ep->dst_addr; 1151 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1001 ha = to_qla_host(qla_ep->host); 1152 ha = to_qla_host(qla_ep->host);
1002 1153
1003get_ddb_index: 1154 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
1004 ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES); 1155 if (ret == QLA_ERROR)
1005
1006 if (ddb_index >= MAX_DDB_ENTRIES) {
1007 DEBUG2(ql4_printk(KERN_INFO, ha,
1008 "Free DDB index not available\n"));
1009 return NULL;
1010 }
1011
1012 if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
1013 goto get_ddb_index;
1014
1015 DEBUG2(ql4_printk(KERN_INFO, ha,
1016 "Found a free DDB index at %d\n", ddb_index));
1017 ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
1018 if (ret == QLA_ERROR) {
1019 if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1020 ql4_printk(KERN_INFO, ha,
1021 "DDB index = %d not available trying next\n",
1022 ddb_index);
1023 goto get_ddb_index;
1024 }
1025 DEBUG2(ql4_printk(KERN_INFO, ha,
1026 "Free FW DDB not available\n"));
1027 return NULL; 1156 return NULL;
1028 }
1029 1157
1030 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host, 1158 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1031 cmds_max, sizeof(struct ddb_entry), 1159 cmds_max, sizeof(struct ddb_entry),
@@ -1040,6 +1168,8 @@ get_ddb_index:
1040 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; 1168 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1041 ddb_entry->ha = ha; 1169 ddb_entry->ha = ha;
1042 ddb_entry->sess = cls_sess; 1170 ddb_entry->sess = cls_sess;
1171 ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
1172 ddb_entry->ddb_change = qla4xxx_ddb_change;
1043 cls_sess->recovery_tmo = ql4xsess_recovery_tmo; 1173 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1044 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry; 1174 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1045 ha->tot_ddbs++; 1175 ha->tot_ddbs++;
@@ -1077,6 +1207,9 @@ qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1077 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 1207 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1078 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), 1208 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1079 conn_idx); 1209 conn_idx);
1210 if (!cls_conn)
1211 return NULL;
1212
1080 sess = cls_sess->dd_data; 1213 sess = cls_sess->dd_data;
1081 ddb_entry = sess->dd_data; 1214 ddb_entry = sess->dd_data;
1082 ddb_entry->conn = cls_conn; 1215 ddb_entry->conn = cls_conn;
@@ -1109,7 +1242,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1109 struct iscsi_session *sess; 1242 struct iscsi_session *sess;
1110 struct ddb_entry *ddb_entry; 1243 struct ddb_entry *ddb_entry;
1111 struct scsi_qla_host *ha; 1244 struct scsi_qla_host *ha;
1112 struct dev_db_entry *fw_ddb_entry; 1245 struct dev_db_entry *fw_ddb_entry = NULL;
1113 dma_addr_t fw_ddb_entry_dma; 1246 dma_addr_t fw_ddb_entry_dma;
1114 uint32_t mbx_sts = 0; 1247 uint32_t mbx_sts = 0;
1115 int ret = 0; 1248 int ret = 0;
@@ -1120,12 +1253,25 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1120 ddb_entry = sess->dd_data; 1253 ddb_entry = sess->dd_data;
1121 ha = ddb_entry->ha; 1254 ha = ddb_entry->ha;
1122 1255
1256 /* Check if we have matching FW DDB, if yes then do not
1257 * login to this target. This could cause target to logout previous
1258 * connection
1259 */
1260 ret = qla4xxx_match_fwdb_session(ha, cls_conn);
1261 if (ret == QLA_SUCCESS) {
1262 ql4_printk(KERN_INFO, ha,
1263 "Session already exist in FW.\n");
1264 ret = -EEXIST;
1265 goto exit_conn_start;
1266 }
1267
1123 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 1268 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1124 &fw_ddb_entry_dma, GFP_KERNEL); 1269 &fw_ddb_entry_dma, GFP_KERNEL);
1125 if (!fw_ddb_entry) { 1270 if (!fw_ddb_entry) {
1126 ql4_printk(KERN_ERR, ha, 1271 ql4_printk(KERN_ERR, ha,
1127 "%s: Unable to allocate dma buffer\n", __func__); 1272 "%s: Unable to allocate dma buffer\n", __func__);
1128 return -ENOMEM; 1273 ret = -ENOMEM;
1274 goto exit_conn_start;
1129 } 1275 }
1130 1276
1131 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts); 1277 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
@@ -1138,9 +1284,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1138 if (mbx_sts) 1284 if (mbx_sts)
1139 if (ddb_entry->fw_ddb_device_state == 1285 if (ddb_entry->fw_ddb_device_state ==
1140 DDB_DS_SESSION_ACTIVE) { 1286 DDB_DS_SESSION_ACTIVE) {
1141 iscsi_conn_start(ddb_entry->conn); 1287 ddb_entry->unblock_sess(ddb_entry->sess);
1142 iscsi_conn_login_event(ddb_entry->conn,
1143 ISCSI_CONN_STATE_LOGGED_IN);
1144 goto exit_set_param; 1288 goto exit_set_param;
1145 } 1289 }
1146 1290
@@ -1167,8 +1311,9 @@ exit_set_param:
1167 ret = 0; 1311 ret = 0;
1168 1312
1169exit_conn_start: 1313exit_conn_start:
1170 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 1314 if (fw_ddb_entry)
1171 fw_ddb_entry, fw_ddb_entry_dma); 1315 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1316 fw_ddb_entry, fw_ddb_entry_dma);
1172 return ret; 1317 return ret;
1173} 1318}
1174 1319
@@ -1344,6 +1489,101 @@ static int qla4xxx_task_xmit(struct iscsi_task *task)
1344 return -ENOSYS; 1489 return -ENOSYS;
1345} 1490}
1346 1491
1492static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
1493 struct dev_db_entry *fw_ddb_entry,
1494 struct iscsi_cls_session *cls_sess,
1495 struct iscsi_cls_conn *cls_conn)
1496{
1497 int buflen = 0;
1498 struct iscsi_session *sess;
1499 struct iscsi_conn *conn;
1500 char ip_addr[DDB_IPADDR_LEN];
1501 uint16_t options = 0;
1502
1503 sess = cls_sess->dd_data;
1504 conn = cls_conn->dd_data;
1505
1506 conn->max_recv_dlength = BYTE_UNITS *
1507 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
1508
1509 conn->max_xmit_dlength = BYTE_UNITS *
1510 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
1511
1512 sess->initial_r2t_en =
1513 (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1514
1515 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
1516
1517 sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1518
1519 sess->first_burst = BYTE_UNITS *
1520 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
1521
1522 sess->max_burst = BYTE_UNITS *
1523 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
1524
1525 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1526
1527 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
1528
1529 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
1530
1531 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
1532
1533 options = le16_to_cpu(fw_ddb_entry->options);
1534 if (options & DDB_OPT_IPV6_DEVICE)
1535 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
1536 else
1537 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
1538
1539 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
1540 (char *)fw_ddb_entry->iscsi_name, buflen);
1541 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
1542 (char *)ha->name_string, buflen);
1543 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
1544 (char *)ip_addr, buflen);
1545}
1546
1547void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
1548 struct ddb_entry *ddb_entry)
1549{
1550 struct iscsi_cls_session *cls_sess;
1551 struct iscsi_cls_conn *cls_conn;
1552 uint32_t ddb_state;
1553 dma_addr_t fw_ddb_entry_dma;
1554 struct dev_db_entry *fw_ddb_entry;
1555
1556 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1557 &fw_ddb_entry_dma, GFP_KERNEL);
1558 if (!fw_ddb_entry) {
1559 ql4_printk(KERN_ERR, ha,
1560 "%s: Unable to allocate dma buffer\n", __func__);
1561 goto exit_session_conn_fwddb_param;
1562 }
1563
1564 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
1565 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
1566 NULL, NULL, NULL) == QLA_ERROR) {
1567 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
1568 "get_ddb_entry for fw_ddb_index %d\n",
1569 ha->host_no, __func__,
1570 ddb_entry->fw_ddb_index));
1571 goto exit_session_conn_fwddb_param;
1572 }
1573
1574 cls_sess = ddb_entry->sess;
1575
1576 cls_conn = ddb_entry->conn;
1577
1578 /* Update params */
1579 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
1580
1581exit_session_conn_fwddb_param:
1582 if (fw_ddb_entry)
1583 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1584 fw_ddb_entry, fw_ddb_entry_dma);
1585}
1586
1347void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, 1587void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1348 struct ddb_entry *ddb_entry) 1588 struct ddb_entry *ddb_entry)
1349{ 1589{
@@ -1360,7 +1600,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1360 if (!fw_ddb_entry) { 1600 if (!fw_ddb_entry) {
1361 ql4_printk(KERN_ERR, ha, 1601 ql4_printk(KERN_ERR, ha,
1362 "%s: Unable to allocate dma buffer\n", __func__); 1602 "%s: Unable to allocate dma buffer\n", __func__);
1363 return; 1603 goto exit_session_conn_param;
1364 } 1604 }
1365 1605
1366 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, 1606 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
@@ -1370,7 +1610,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1370 "get_ddb_entry for fw_ddb_index %d\n", 1610 "get_ddb_entry for fw_ddb_index %d\n",
1371 ha->host_no, __func__, 1611 ha->host_no, __func__,
1372 ddb_entry->fw_ddb_index)); 1612 ddb_entry->fw_ddb_index));
1373 return; 1613 goto exit_session_conn_param;
1374 } 1614 }
1375 1615
1376 cls_sess = ddb_entry->sess; 1616 cls_sess = ddb_entry->sess;
@@ -1379,6 +1619,12 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1379 cls_conn = ddb_entry->conn; 1619 cls_conn = ddb_entry->conn;
1380 conn = cls_conn->dd_data; 1620 conn = cls_conn->dd_data;
1381 1621
1622 /* Update timers after login */
1623 ddb_entry->default_relogin_timeout =
1624 le16_to_cpu(fw_ddb_entry->def_timeout);
1625 ddb_entry->default_time2wait =
1626 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1627
1382 /* Update params */ 1628 /* Update params */
1383 conn->max_recv_dlength = BYTE_UNITS * 1629 conn->max_recv_dlength = BYTE_UNITS *
1384 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 1630 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
@@ -1407,6 +1653,11 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1407 1653
1408 memcpy(sess->initiatorname, ha->name_string, 1654 memcpy(sess->initiatorname, ha->name_string,
1409 min(sizeof(ha->name_string), sizeof(sess->initiatorname))); 1655 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
1656
1657exit_session_conn_param:
1658 if (fw_ddb_entry)
1659 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1660 fw_ddb_entry, fw_ddb_entry_dma);
1410} 1661}
1411 1662
1412/* 1663/*
@@ -1607,6 +1858,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
1607 vfree(ha->chap_list); 1858 vfree(ha->chap_list);
1608 ha->chap_list = NULL; 1859 ha->chap_list = NULL;
1609 1860
1861 if (ha->fw_ddb_dma_pool)
1862 dma_pool_destroy(ha->fw_ddb_dma_pool);
1863
1610 /* release io space registers */ 1864 /* release io space registers */
1611 if (is_qla8022(ha)) { 1865 if (is_qla8022(ha)) {
1612 if (ha->nx_pcibase) 1866 if (ha->nx_pcibase)
@@ -1689,6 +1943,16 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
1689 goto mem_alloc_error_exit; 1943 goto mem_alloc_error_exit;
1690 } 1944 }
1691 1945
1946 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
1947 DDB_DMA_BLOCK_SIZE, 8, 0);
1948
1949 if (ha->fw_ddb_dma_pool == NULL) {
1950 ql4_printk(KERN_WARNING, ha,
1951 "%s: fw_ddb_dma_pool allocation failed..\n",
1952 __func__);
1953 goto mem_alloc_error_exit;
1954 }
1955
1692 return QLA_SUCCESS; 1956 return QLA_SUCCESS;
1693 1957
1694mem_alloc_error_exit: 1958mem_alloc_error_exit:
@@ -1800,6 +2064,60 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
1800 } 2064 }
1801} 2065}
1802 2066
2067void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
2068{
2069 struct iscsi_session *sess;
2070 struct ddb_entry *ddb_entry;
2071 struct scsi_qla_host *ha;
2072
2073 sess = cls_sess->dd_data;
2074 ddb_entry = sess->dd_data;
2075 ha = ddb_entry->ha;
2076
2077 if (!(ddb_entry->ddb_type == FLASH_DDB))
2078 return;
2079
2080 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
2081 !iscsi_is_session_online(cls_sess)) {
2082 if (atomic_read(&ddb_entry->retry_relogin_timer) !=
2083 INVALID_ENTRY) {
2084 if (atomic_read(&ddb_entry->retry_relogin_timer) ==
2085 0) {
2086 atomic_set(&ddb_entry->retry_relogin_timer,
2087 INVALID_ENTRY);
2088 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
2089 set_bit(DF_RELOGIN, &ddb_entry->flags);
2090 DEBUG2(ql4_printk(KERN_INFO, ha,
2091 "%s: index [%d] login device\n",
2092 __func__, ddb_entry->fw_ddb_index));
2093 } else
2094 atomic_dec(&ddb_entry->retry_relogin_timer);
2095 }
2096 }
2097
2098 /* Wait for relogin to timeout */
2099 if (atomic_read(&ddb_entry->relogin_timer) &&
2100 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
2101 /*
2102 * If the relogin times out and the device is
2103 * still NOT ONLINE then try and relogin again.
2104 */
2105 if (!iscsi_is_session_online(cls_sess)) {
2106 /* Reset retry relogin timer */
2107 atomic_inc(&ddb_entry->relogin_retry_count);
2108 DEBUG2(ql4_printk(KERN_INFO, ha,
2109 "%s: index[%d] relogin timed out-retrying"
2110 " relogin (%d), retry (%d)\n", __func__,
2111 ddb_entry->fw_ddb_index,
2112 atomic_read(&ddb_entry->relogin_retry_count),
2113 ddb_entry->default_time2wait + 4));
2114 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
2115 atomic_set(&ddb_entry->retry_relogin_timer,
2116 ddb_entry->default_time2wait + 4);
2117 }
2118 }
2119}
2120
1803/** 2121/**
1804 * qla4xxx_timer - checks every second for work to do. 2122 * qla4xxx_timer - checks every second for work to do.
1805 * @ha: Pointer to host adapter structure. 2123 * @ha: Pointer to host adapter structure.
@@ -1809,6 +2127,8 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
1809 int start_dpc = 0; 2127 int start_dpc = 0;
1810 uint16_t w; 2128 uint16_t w;
1811 2129
2130 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
2131
1812 /* If we are in the middle of AER/EEH processing 2132 /* If we are in the middle of AER/EEH processing
1813 * skip any processing and reschedule the timer 2133 * skip any processing and reschedule the timer
1814 */ 2134 */
@@ -2078,7 +2398,12 @@ static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
2078 sess = cls_session->dd_data; 2398 sess = cls_session->dd_data;
2079 ddb_entry = sess->dd_data; 2399 ddb_entry = sess->dd_data;
2080 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED; 2400 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
2081 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); 2401
2402 if (ddb_entry->ddb_type == FLASH_DDB)
2403 iscsi_block_session(ddb_entry->sess);
2404 else
2405 iscsi_session_failure(cls_session->dd_data,
2406 ISCSI_ERR_CONN_FAILED);
2082} 2407}
2083 2408
2084/** 2409/**
@@ -2163,7 +2488,7 @@ recover_ha_init_adapter:
2163 2488
2164 /* NOTE: AF_ONLINE flag set upon successful completion of 2489 /* NOTE: AF_ONLINE flag set upon successful completion of
2165 * qla4xxx_initialize_adapter */ 2490 * qla4xxx_initialize_adapter */
2166 status = qla4xxx_initialize_adapter(ha); 2491 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
2167 } 2492 }
2168 2493
2169 /* Retry failed adapter initialization, if necessary 2494 /* Retry failed adapter initialization, if necessary
@@ -2245,17 +2570,108 @@ static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
2245 iscsi_unblock_session(ddb_entry->sess); 2570 iscsi_unblock_session(ddb_entry->sess);
2246 } else { 2571 } else {
2247 /* Trigger relogin */ 2572 /* Trigger relogin */
2248 iscsi_session_failure(cls_session->dd_data, 2573 if (ddb_entry->ddb_type == FLASH_DDB) {
2249 ISCSI_ERR_CONN_FAILED); 2574 if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
2575 qla4xxx_arm_relogin_timer(ddb_entry);
2576 } else
2577 iscsi_session_failure(cls_session->dd_data,
2578 ISCSI_ERR_CONN_FAILED);
2250 } 2579 }
2251 } 2580 }
2252} 2581}
2253 2582
2583int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
2584{
2585 struct iscsi_session *sess;
2586 struct ddb_entry *ddb_entry;
2587 struct scsi_qla_host *ha;
2588
2589 sess = cls_session->dd_data;
2590 ddb_entry = sess->dd_data;
2591 ha = ddb_entry->ha;
2592 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
2593 " unblock session\n", ha->host_no, __func__,
2594 ddb_entry->fw_ddb_index);
2595
2596 iscsi_unblock_session(ddb_entry->sess);
2597
2598 /* Start scan target */
2599 if (test_bit(AF_ONLINE, &ha->flags)) {
2600 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
2601 " start scan\n", ha->host_no, __func__,
2602 ddb_entry->fw_ddb_index);
2603 scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
2604 }
2605 return QLA_SUCCESS;
2606}
2607
2608int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
2609{
2610 struct iscsi_session *sess;
2611 struct ddb_entry *ddb_entry;
2612 struct scsi_qla_host *ha;
2613
2614 sess = cls_session->dd_data;
2615 ddb_entry = sess->dd_data;
2616 ha = ddb_entry->ha;
2617 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
2618 " unblock user space session\n", ha->host_no, __func__,
2619 ddb_entry->fw_ddb_index);
2620 iscsi_conn_start(ddb_entry->conn);
2621 iscsi_conn_login_event(ddb_entry->conn,
2622 ISCSI_CONN_STATE_LOGGED_IN);
2623
2624 return QLA_SUCCESS;
2625}
2626
2254static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) 2627static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
2255{ 2628{
2256 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices); 2629 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
2257} 2630}
2258 2631
2632static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
2633{
2634 uint16_t relogin_timer;
2635 struct iscsi_session *sess;
2636 struct ddb_entry *ddb_entry;
2637 struct scsi_qla_host *ha;
2638
2639 sess = cls_sess->dd_data;
2640 ddb_entry = sess->dd_data;
2641 ha = ddb_entry->ha;
2642
2643 relogin_timer = max(ddb_entry->default_relogin_timeout,
2644 (uint16_t)RELOGIN_TOV);
2645 atomic_set(&ddb_entry->relogin_timer, relogin_timer);
2646
2647 DEBUG2(ql4_printk(KERN_INFO, ha,
2648 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
2649 ddb_entry->fw_ddb_index, relogin_timer));
2650
2651 qla4xxx_login_flash_ddb(cls_sess);
2652}
2653
2654static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
2655{
2656 struct iscsi_session *sess;
2657 struct ddb_entry *ddb_entry;
2658 struct scsi_qla_host *ha;
2659
2660 sess = cls_sess->dd_data;
2661 ddb_entry = sess->dd_data;
2662 ha = ddb_entry->ha;
2663
2664 if (!(ddb_entry->ddb_type == FLASH_DDB))
2665 return;
2666
2667 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
2668 !iscsi_is_session_online(cls_sess)) {
2669 DEBUG2(ql4_printk(KERN_INFO, ha,
2670 "relogin issued\n"));
2671 qla4xxx_relogin_flash_ddb(cls_sess);
2672 }
2673}
2674
2259void qla4xxx_wake_dpc(struct scsi_qla_host *ha) 2675void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
2260{ 2676{
2261 if (ha->dpc_thread) 2677 if (ha->dpc_thread)
@@ -2356,6 +2772,12 @@ dpc_post_reset_ha:
2356 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) 2772 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
2357 qla4xxx_get_dhcp_ip_address(ha); 2773 qla4xxx_get_dhcp_ip_address(ha);
2358 2774
2775 /* ---- relogin device? --- */
2776 if (adapter_up(ha) &&
2777 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
2778 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
2779 }
2780
2359 /* ---- link change? --- */ 2781 /* ---- link change? --- */
2360 if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) { 2782 if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
2361 if (!test_bit(AF_LINK_UP, &ha->flags)) { 2783 if (!test_bit(AF_LINK_UP, &ha->flags)) {
@@ -2368,8 +2790,12 @@ dpc_post_reset_ha:
2368 * fatal error recovery. Therefore, the driver must 2790 * fatal error recovery. Therefore, the driver must
2369 * manually relogin to devices when recovering from 2791 * manually relogin to devices when recovering from
2370 * connection failures, logouts, expired KATO, etc. */ 2792 * connection failures, logouts, expired KATO, etc. */
2371 2793 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
2372 qla4xxx_relogin_all_devices(ha); 2794 qla4xxx_build_ddb_list(ha, ha->is_reset);
2795 iscsi_host_for_each_session(ha->host,
2796 qla4xxx_login_flash_ddb);
2797 } else
2798 qla4xxx_relogin_all_devices(ha);
2373 } 2799 }
2374 } 2800 }
2375} 2801}
@@ -2867,6 +3293,9 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
2867 " target ID %d\n", __func__, ddb_index[0], 3293 " target ID %d\n", __func__, ddb_index[0],
2868 ddb_index[1])); 3294 ddb_index[1]));
2869 3295
3296 ha->pri_ddb_idx = ddb_index[0];
3297 ha->sec_ddb_idx = ddb_index[1];
3298
2870exit_boot_info_free: 3299exit_boot_info_free:
2871 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma); 3300 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
2872exit_boot_info: 3301exit_boot_info:
@@ -3034,6 +3463,9 @@ static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
3034 return ret; 3463 return ret;
3035 } 3464 }
3036 3465
3466 if (ql4xdisablesysfsboot)
3467 return QLA_SUCCESS;
3468
3037 if (ddb_index[0] == 0xffff) 3469 if (ddb_index[0] == 0xffff)
3038 goto sec_target; 3470 goto sec_target;
3039 3471
@@ -3066,7 +3498,15 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
3066 struct iscsi_boot_kobj *boot_kobj; 3498 struct iscsi_boot_kobj *boot_kobj;
3067 3499
3068 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS) 3500 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
3069 return 0; 3501 return QLA_ERROR;
3502
3503 if (ql4xdisablesysfsboot) {
3504 ql4_printk(KERN_INFO, ha,
3505 "%s: syfsboot disabled - driver will trigger login"
3506 "and publish session for discovery .\n", __func__);
3507 return QLA_SUCCESS;
3508 }
3509
3070 3510
3071 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no); 3511 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
3072 if (!ha->boot_kset) 3512 if (!ha->boot_kset)
@@ -3108,7 +3548,7 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
3108 if (!boot_kobj) 3548 if (!boot_kobj)
3109 goto put_host; 3549 goto put_host;
3110 3550
3111 return 0; 3551 return QLA_SUCCESS;
3112 3552
3113put_host: 3553put_host:
3114 scsi_host_put(ha->host); 3554 scsi_host_put(ha->host);
@@ -3174,9 +3614,507 @@ static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
3174exit_chap_list: 3614exit_chap_list:
3175 dma_free_coherent(&ha->pdev->dev, chap_size, 3615 dma_free_coherent(&ha->pdev->dev, chap_size,
3176 chap_flash_data, chap_dma); 3616 chap_flash_data, chap_dma);
3177 return;
3178} 3617}
3179 3618
3619static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
3620 struct ql4_tuple_ddb *tddb)
3621{
3622 struct scsi_qla_host *ha;
3623 struct iscsi_cls_session *cls_sess;
3624 struct iscsi_cls_conn *cls_conn;
3625 struct iscsi_session *sess;
3626 struct iscsi_conn *conn;
3627
3628 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
3629 ha = ddb_entry->ha;
3630 cls_sess = ddb_entry->sess;
3631 sess = cls_sess->dd_data;
3632 cls_conn = ddb_entry->conn;
3633 conn = cls_conn->dd_data;
3634
3635 tddb->tpgt = sess->tpgt;
3636 tddb->port = conn->persistent_port;
3637 strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
3638 strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
3639}
3640
3641static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
3642 struct ql4_tuple_ddb *tddb)
3643{
3644 uint16_t options = 0;
3645
3646 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
3647 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
3648 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
3649
3650 options = le16_to_cpu(fw_ddb_entry->options);
3651 if (options & DDB_OPT_IPV6_DEVICE)
3652 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
3653 else
3654 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
3655
3656 tddb->port = le16_to_cpu(fw_ddb_entry->port);
3657}
3658
3659static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
3660 struct ql4_tuple_ddb *old_tddb,
3661 struct ql4_tuple_ddb *new_tddb)
3662{
3663 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
3664 return QLA_ERROR;
3665
3666 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
3667 return QLA_ERROR;
3668
3669 if (old_tddb->port != new_tddb->port)
3670 return QLA_ERROR;
3671
3672 DEBUG2(ql4_printk(KERN_INFO, ha,
3673 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
3674 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
3675 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
3676 new_tddb->ip_addr, new_tddb->iscsi_name));
3677
3678 return QLA_SUCCESS;
3679}
3680
3681static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
3682 struct dev_db_entry *fw_ddb_entry)
3683{
3684 struct ddb_entry *ddb_entry;
3685 struct ql4_tuple_ddb *fw_tddb = NULL;
3686 struct ql4_tuple_ddb *tmp_tddb = NULL;
3687 int idx;
3688 int ret = QLA_ERROR;
3689
3690 fw_tddb = vzalloc(sizeof(*fw_tddb));
3691 if (!fw_tddb) {
3692 DEBUG2(ql4_printk(KERN_WARNING, ha,
3693 "Memory Allocation failed.\n"));
3694 ret = QLA_SUCCESS;
3695 goto exit_check;
3696 }
3697
3698 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
3699 if (!tmp_tddb) {
3700 DEBUG2(ql4_printk(KERN_WARNING, ha,
3701 "Memory Allocation failed.\n"));
3702 ret = QLA_SUCCESS;
3703 goto exit_check;
3704 }
3705
3706 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
3707
3708 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
3709 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
3710 if (ddb_entry == NULL)
3711 continue;
3712
3713 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
3714 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
3715 ret = QLA_SUCCESS; /* found */
3716 goto exit_check;
3717 }
3718 }
3719
3720exit_check:
3721 if (fw_tddb)
3722 vfree(fw_tddb);
3723 if (tmp_tddb)
3724 vfree(tmp_tddb);
3725 return ret;
3726}
3727
3728static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
3729 struct list_head *list_nt,
3730 struct dev_db_entry *fw_ddb_entry)
3731{
3732 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
3733 struct ql4_tuple_ddb *fw_tddb = NULL;
3734 struct ql4_tuple_ddb *tmp_tddb = NULL;
3735 int ret = QLA_ERROR;
3736
3737 fw_tddb = vzalloc(sizeof(*fw_tddb));
3738 if (!fw_tddb) {
3739 DEBUG2(ql4_printk(KERN_WARNING, ha,
3740 "Memory Allocation failed.\n"));
3741 ret = QLA_SUCCESS;
3742 goto exit_check;
3743 }
3744
3745 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
3746 if (!tmp_tddb) {
3747 DEBUG2(ql4_printk(KERN_WARNING, ha,
3748 "Memory Allocation failed.\n"));
3749 ret = QLA_SUCCESS;
3750 goto exit_check;
3751 }
3752
3753 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
3754
3755 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
3756 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
3757 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
3758 ret = QLA_SUCCESS; /* found */
3759 goto exit_check;
3760 }
3761 }
3762
3763exit_check:
3764 if (fw_tddb)
3765 vfree(fw_tddb);
3766 if (tmp_tddb)
3767 vfree(tmp_tddb);
3768 return ret;
3769}
3770
3771static void qla4xxx_free_nt_list(struct list_head *list_nt)
3772{
3773 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
3774
3775 /* Free up the normaltargets list */
3776 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
3777 list_del_init(&nt_ddb_idx->list);
3778 vfree(nt_ddb_idx);
3779 }
3780
3781}
3782
3783static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
3784 struct dev_db_entry *fw_ddb_entry)
3785{
3786 struct iscsi_endpoint *ep;
3787 struct sockaddr_in *addr;
3788 struct sockaddr_in6 *addr6;
3789 struct sockaddr *dst_addr;
3790 char *ip;
3791
3792 /* TODO: need to destroy on unload iscsi_endpoint*/
3793 dst_addr = vmalloc(sizeof(*dst_addr));
3794 if (!dst_addr)
3795 return NULL;
3796
3797 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
3798 dst_addr->sa_family = AF_INET6;
3799 addr6 = (struct sockaddr_in6 *)dst_addr;
3800 ip = (char *)&addr6->sin6_addr;
3801 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
3802 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
3803
3804 } else {
3805 dst_addr->sa_family = AF_INET;
3806 addr = (struct sockaddr_in *)dst_addr;
3807 ip = (char *)&addr->sin_addr;
3808 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
3809 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
3810 }
3811
3812 ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
3813 vfree(dst_addr);
3814 return ep;
3815}
3816
3817static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
3818{
3819 if (ql4xdisablesysfsboot)
3820 return QLA_SUCCESS;
3821 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
3822 return QLA_ERROR;
3823 return QLA_SUCCESS;
3824}
3825
3826static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
3827 struct ddb_entry *ddb_entry)
3828{
3829 ddb_entry->ddb_type = FLASH_DDB;
3830 ddb_entry->fw_ddb_index = INVALID_ENTRY;
3831 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
3832 ddb_entry->ha = ha;
3833 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
3834 ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
3835
3836 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
3837 atomic_set(&ddb_entry->relogin_timer, 0);
3838 atomic_set(&ddb_entry->relogin_retry_count, 0);
3839
3840 ddb_entry->default_relogin_timeout =
3841 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
3842 ddb_entry->default_time2wait =
3843 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
3844}
3845
3846static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
3847{
3848 uint32_t idx = 0;
3849 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
3850 uint32_t sts[MBOX_REG_COUNT];
3851 uint32_t ip_state;
3852 unsigned long wtime;
3853 int ret;
3854
3855 wtime = jiffies + (HZ * IP_CONFIG_TOV);
3856 do {
3857 for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
3858 if (ip_idx[idx] == -1)
3859 continue;
3860
3861 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
3862
3863 if (ret == QLA_ERROR) {
3864 ip_idx[idx] = -1;
3865 continue;
3866 }
3867
3868 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
3869
3870 DEBUG2(ql4_printk(KERN_INFO, ha,
3871 "Waiting for IP state for idx = %d, state = 0x%x\n",
3872 ip_idx[idx], ip_state));
3873 if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
3874 ip_state == IP_ADDRSTATE_INVALID ||
3875 ip_state == IP_ADDRSTATE_PREFERRED ||
3876 ip_state == IP_ADDRSTATE_DEPRICATED ||
3877 ip_state == IP_ADDRSTATE_DISABLING)
3878 ip_idx[idx] = -1;
3879
3880 }
3881
3882 /* Break if all IP states checked */
3883 if ((ip_idx[0] == -1) &&
3884 (ip_idx[1] == -1) &&
3885 (ip_idx[2] == -1) &&
3886 (ip_idx[3] == -1))
3887 break;
3888 schedule_timeout_uninterruptible(HZ);
3889 } while (time_after(wtime, jiffies));
3890}
3891
3892void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
3893{
3894 int max_ddbs;
3895 int ret;
3896 uint32_t idx = 0, next_idx = 0;
3897 uint32_t state = 0, conn_err = 0;
3898 uint16_t conn_id;
3899 struct dev_db_entry *fw_ddb_entry;
3900 struct ddb_entry *ddb_entry = NULL;
3901 dma_addr_t fw_ddb_dma;
3902 struct iscsi_cls_session *cls_sess;
3903 struct iscsi_session *sess;
3904 struct iscsi_cls_conn *cls_conn;
3905 struct iscsi_endpoint *ep;
3906 uint16_t cmds_max = 32, tmo = 0;
3907 uint32_t initial_cmdsn = 0;
3908 struct list_head list_st, list_nt; /* List of sendtargets */
3909 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
3910 int fw_idx_size;
3911 unsigned long wtime;
3912 struct qla_ddb_index *nt_ddb_idx;
3913
3914 if (!test_bit(AF_LINK_UP, &ha->flags)) {
3915 set_bit(AF_BUILD_DDB_LIST, &ha->flags);
3916 ha->is_reset = is_reset;
3917 return;
3918 }
3919 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
3920 MAX_DEV_DB_ENTRIES;
3921
3922 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
3923 &fw_ddb_dma);
3924 if (fw_ddb_entry == NULL) {
3925 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
3926 goto exit_ddb_list;
3927 }
3928
3929 INIT_LIST_HEAD(&list_st);
3930 INIT_LIST_HEAD(&list_nt);
3931 fw_idx_size = sizeof(struct qla_ddb_index);
3932
3933 for (idx = 0; idx < max_ddbs; idx = next_idx) {
3934 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
3935 fw_ddb_dma, NULL,
3936 &next_idx, &state, &conn_err,
3937 NULL, &conn_id);
3938 if (ret == QLA_ERROR)
3939 break;
3940
3941 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
3942 goto continue_next_st;
3943
3944 /* Check if ST, add to the list_st */
3945 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
3946 goto continue_next_st;
3947
3948 st_ddb_idx = vzalloc(fw_idx_size);
3949 if (!st_ddb_idx)
3950 break;
3951
3952 st_ddb_idx->fw_ddb_idx = idx;
3953
3954 list_add_tail(&st_ddb_idx->list, &list_st);
3955continue_next_st:
3956 if (next_idx == 0)
3957 break;
3958 }
3959
3960 /* Before issuing conn open mbox, ensure all IPs states are configured
3961 * Note, conn open fails if IPs are not configured
3962 */
3963 qla4xxx_wait_for_ip_configuration(ha);
3964
3965 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
3966 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
3967 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
3968 }
3969
3970 /* Wait to ensure all sendtargets are done for min 12 sec wait */
3971 tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
3972 DEBUG2(ql4_printk(KERN_INFO, ha,
3973 "Default time to wait for build ddb %d\n", tmo));
3974
3975 wtime = jiffies + (HZ * tmo);
3976 do {
3977 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st,
3978 list) {
3979 ret = qla4xxx_get_fwddb_entry(ha,
3980 st_ddb_idx->fw_ddb_idx,
3981 NULL, 0, NULL, &next_idx,
3982 &state, &conn_err, NULL,
3983 NULL);
3984 if (ret == QLA_ERROR)
3985 continue;
3986
3987 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
3988 state == DDB_DS_SESSION_FAILED) {
3989 list_del_init(&st_ddb_idx->list);
3990 vfree(st_ddb_idx);
3991 }
3992 }
3993 schedule_timeout_uninterruptible(HZ / 10);
3994 } while (time_after(wtime, jiffies));
3995
3996 /* Free up the sendtargets list */
3997 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
3998 list_del_init(&st_ddb_idx->list);
3999 vfree(st_ddb_idx);
4000 }
4001
4002 for (idx = 0; idx < max_ddbs; idx = next_idx) {
4003 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
4004 fw_ddb_dma, NULL,
4005 &next_idx, &state, &conn_err,
4006 NULL, &conn_id);
4007 if (ret == QLA_ERROR)
4008 break;
4009
4010 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
4011 goto continue_next_nt;
4012
4013 /* Check if NT, then add to list it */
4014 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
4015 goto continue_next_nt;
4016
4017 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
4018 state == DDB_DS_SESSION_FAILED) {
4019 DEBUG2(ql4_printk(KERN_INFO, ha,
4020 "Adding DDB to session = 0x%x\n",
4021 idx));
4022 if (is_reset == INIT_ADAPTER) {
4023 nt_ddb_idx = vmalloc(fw_idx_size);
4024 if (!nt_ddb_idx)
4025 break;
4026
4027 nt_ddb_idx->fw_ddb_idx = idx;
4028
4029 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
4030 sizeof(struct dev_db_entry));
4031
4032 if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
4033 fw_ddb_entry) == QLA_SUCCESS) {
4034 vfree(nt_ddb_idx);
4035 goto continue_next_nt;
4036 }
4037 list_add_tail(&nt_ddb_idx->list, &list_nt);
4038 } else if (is_reset == RESET_ADAPTER) {
4039 if (qla4xxx_is_session_exists(ha,
4040 fw_ddb_entry) == QLA_SUCCESS)
4041 goto continue_next_nt;
4042 }
4043
4044 /* Create session object, with INVALID_ENTRY,
4045 * the targer_id would get set when we issue the login
4046 */
4047 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport,
4048 ha->host, cmds_max,
4049 sizeof(struct ddb_entry),
4050 sizeof(struct ql4_task_data),
4051 initial_cmdsn, INVALID_ENTRY);
4052 if (!cls_sess)
4053 goto exit_ddb_list;
4054
4055 /*
4056 * iscsi_session_setup increments the driver reference
4057 * count which wouldn't let the driver to be unloaded.
4058 * so calling module_put function to decrement the
4059 * reference count.
4060 **/
4061 module_put(qla4xxx_iscsi_transport.owner);
4062 sess = cls_sess->dd_data;
4063 ddb_entry = sess->dd_data;
4064 ddb_entry->sess = cls_sess;
4065
4066 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
4067 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
4068 sizeof(struct dev_db_entry));
4069
4070 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
4071
4072 cls_conn = iscsi_conn_setup(cls_sess,
4073 sizeof(struct qla_conn),
4074 conn_id);
4075 if (!cls_conn)
4076 goto exit_ddb_list;
4077
4078 ddb_entry->conn = cls_conn;
4079
4080 /* Setup ep, for displaying attributes in sysfs */
4081 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
4082 if (ep) {
4083 ep->conn = cls_conn;
4084 cls_conn->ep = ep;
4085 } else {
4086 DEBUG2(ql4_printk(KERN_ERR, ha,
4087 "Unable to get ep\n"));
4088 }
4089
4090 /* Update sess/conn params */
4091 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
4092 cls_conn);
4093
4094 if (is_reset == RESET_ADAPTER) {
4095 iscsi_block_session(cls_sess);
4096 /* Use the relogin path to discover new devices
4097 * by short-circuting the logic of setting
4098 * timer to relogin - instead set the flags
4099 * to initiate login right away.
4100 */
4101 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4102 set_bit(DF_RELOGIN, &ddb_entry->flags);
4103 }
4104 }
4105continue_next_nt:
4106 if (next_idx == 0)
4107 break;
4108 }
4109exit_ddb_list:
4110 qla4xxx_free_nt_list(&list_nt);
4111 if (fw_ddb_entry)
4112 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4113
4114 qla4xxx_free_ddb_index(ha);
4115}
4116
4117
3180/** 4118/**
3181 * qla4xxx_probe_adapter - callback function to probe HBA 4119 * qla4xxx_probe_adapter - callback function to probe HBA
3182 * @pdev: pointer to pci_dev structure 4120 * @pdev: pointer to pci_dev structure
@@ -3298,7 +4236,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
3298 * firmware 4236 * firmware
3299 * NOTE: interrupts enabled upon successful completion 4237 * NOTE: interrupts enabled upon successful completion
3300 */ 4238 */
3301 status = qla4xxx_initialize_adapter(ha); 4239 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
3302 while ((!test_bit(AF_ONLINE, &ha->flags)) && 4240 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
3303 init_retry_count++ < MAX_INIT_RETRIES) { 4241 init_retry_count++ < MAX_INIT_RETRIES) {
3304 4242
@@ -3319,7 +4257,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
3319 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR) 4257 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
3320 continue; 4258 continue;
3321 4259
3322 status = qla4xxx_initialize_adapter(ha); 4260 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
3323 } 4261 }
3324 4262
3325 if (!test_bit(AF_ONLINE, &ha->flags)) { 4263 if (!test_bit(AF_ONLINE, &ha->flags)) {
@@ -3386,12 +4324,16 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
3386 ha->host_no, ha->firmware_version[0], ha->firmware_version[1], 4324 ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
3387 ha->patch_number, ha->build_number); 4325 ha->patch_number, ha->build_number);
3388 4326
3389 qla4xxx_create_chap_list(ha);
3390
3391 if (qla4xxx_setup_boot_info(ha)) 4327 if (qla4xxx_setup_boot_info(ha))
3392 ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n", 4328 ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
3393 __func__); 4329 __func__);
3394 4330
4331 /* Perform the build ddb list and login to each */
4332 qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
4333 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
4334
4335 qla4xxx_create_chap_list(ha);
4336
3395 qla4xxx_create_ifaces(ha); 4337 qla4xxx_create_ifaces(ha);
3396 return 0; 4338 return 0;
3397 4339
@@ -3449,6 +4391,38 @@ static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
3449 } 4391 }
3450} 4392}
3451 4393
4394static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
4395{
4396 struct ddb_entry *ddb_entry;
4397 int options;
4398 int idx;
4399
4400 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
4401
4402 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
4403 if ((ddb_entry != NULL) &&
4404 (ddb_entry->ddb_type == FLASH_DDB)) {
4405
4406 options = LOGOUT_OPTION_CLOSE_SESSION;
4407 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
4408 == QLA_ERROR)
4409 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
4410 __func__);
4411
4412 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
4413 /*
4414 * we have decremented the reference count of the driver
4415 * when we setup the session to have the driver unload
4416 * to be seamless without actually destroying the
4417 * session
4418 **/
4419 try_module_get(qla4xxx_iscsi_transport.owner);
4420 iscsi_destroy_endpoint(ddb_entry->conn->ep);
4421 qla4xxx_free_ddb(ha, ddb_entry);
4422 iscsi_session_teardown(ddb_entry->sess);
4423 }
4424 }
4425}
3452/** 4426/**
3453 * qla4xxx_remove_adapter - calback function to remove adapter. 4427 * qla4xxx_remove_adapter - calback function to remove adapter.
3454 * @pci_dev: PCI device pointer 4428 * @pci_dev: PCI device pointer
@@ -3465,9 +4439,11 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
3465 /* destroy iface from sysfs */ 4439 /* destroy iface from sysfs */
3466 qla4xxx_destroy_ifaces(ha); 4440 qla4xxx_destroy_ifaces(ha);
3467 4441
3468 if (ha->boot_kset) 4442 if ((!ql4xdisablesysfsboot) && ha->boot_kset)
3469 iscsi_boot_destroy_kset(ha->boot_kset); 4443 iscsi_boot_destroy_kset(ha->boot_kset);
3470 4444
4445 qla4xxx_destroy_fw_ddb_session(ha);
4446
3471 scsi_remove_host(ha->host); 4447 scsi_remove_host(ha->host);
3472 4448
3473 qla4xxx_free_adapter(ha); 4449 qla4xxx_free_adapter(ha);
@@ -4115,7 +5091,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
4115 5091
4116 qla4_8xxx_idc_unlock(ha); 5092 qla4_8xxx_idc_unlock(ha);
4117 clear_bit(AF_FW_RECOVERY, &ha->flags); 5093 clear_bit(AF_FW_RECOVERY, &ha->flags);
4118 rval = qla4xxx_initialize_adapter(ha); 5094 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
4119 qla4_8xxx_idc_lock(ha); 5095 qla4_8xxx_idc_lock(ha);
4120 5096
4121 if (rval != QLA_SUCCESS) { 5097 if (rval != QLA_SUCCESS) {
@@ -4151,7 +5127,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
4151 if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == 5127 if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
4152 QLA82XX_DEV_READY)) { 5128 QLA82XX_DEV_READY)) {
4153 clear_bit(AF_FW_RECOVERY, &ha->flags); 5129 clear_bit(AF_FW_RECOVERY, &ha->flags);
4154 rval = qla4xxx_initialize_adapter(ha); 5130 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
4155 if (rval == QLA_SUCCESS) { 5131 if (rval == QLA_SUCCESS) {
4156 ret = qla4xxx_request_irqs(ha); 5132 ret = qla4xxx_request_irqs(ha);
4157 if (ret) { 5133 if (ret) {
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index c15347d3f532..5254e57968f5 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k8" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k9"
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index d1e95c6ac776..5a35a2a2d3c5 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -147,6 +147,7 @@ struct fcoe_ctlr {
147 u8 map_dest; 147 u8 map_dest;
148 u8 spma; 148 u8 spma;
149 u8 probe_tries; 149 u8 probe_tries;
150 u8 priority;
150 u8 dest_addr[ETH_ALEN]; 151 u8 dest_addr[ETH_ALEN];
151 u8 ctl_src_addr[ETH_ALEN]; 152 u8 ctl_src_addr[ETH_ALEN];
152 153
@@ -301,6 +302,7 @@ struct fcoe_percpu_s {
301 * @lport: The associated local port 302 * @lport: The associated local port
302 * @fcoe_pending_queue: The pending Rx queue of skbs 303 * @fcoe_pending_queue: The pending Rx queue of skbs
303 * @fcoe_pending_queue_active: Indicates if the pending queue is active 304 * @fcoe_pending_queue_active: Indicates if the pending queue is active
305 * @priority: Packet priority (DCB)
304 * @max_queue_depth: Max queue depth of pending queue 306 * @max_queue_depth: Max queue depth of pending queue
305 * @min_queue_depth: Min queue depth of pending queue 307 * @min_queue_depth: Min queue depth of pending queue
306 * @timer: The queue timer 308 * @timer: The queue timer
@@ -316,6 +318,7 @@ struct fcoe_port {
316 struct fc_lport *lport; 318 struct fc_lport *lport;
317 struct sk_buff_head fcoe_pending_queue; 319 struct sk_buff_head fcoe_pending_queue;
318 u8 fcoe_pending_queue_active; 320 u8 fcoe_pending_queue_active;
321 u8 priority;
319 u32 max_queue_depth; 322 u32 max_queue_depth;
320 u32 min_queue_depth; 323 u32 min_queue_depth;
321 struct timer_list timer; 324 struct timer_list timer;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d3b9df5962c2..58690af323e4 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3558,9 +3558,13 @@ static void ring_buffer_wakeup(struct perf_event *event)
3558 3558
3559 rcu_read_lock(); 3559 rcu_read_lock();
3560 rb = rcu_dereference(event->rb); 3560 rb = rcu_dereference(event->rb);
3561 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { 3561 if (!rb)
3562 goto unlock;
3563
3564 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
3562 wake_up_all(&event->waitq); 3565 wake_up_all(&event->waitq);
3563 } 3566
3567unlock:
3564 rcu_read_unlock(); 3568 rcu_read_unlock();
3565} 3569}
3566 3570
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index a78ed2736ba7..8a39fa3e3c6c 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -2352,13 +2352,11 @@ again:
2352 if (!smt && (sd->flags & SD_SHARE_CPUPOWER)) 2352 if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
2353 continue; 2353 continue;
2354 2354
2355 if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) { 2355 if (smt && !(sd->flags & SD_SHARE_CPUPOWER))
2356 if (!smt) { 2356 break;
2357 smt = 1; 2357
2358 goto again; 2358 if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
2359 }
2360 break; 2359 break;
2361 }
2362 2360
2363 sg = sd->groups; 2361 sg = sd->groups;
2364 do { 2362 do {
@@ -2378,6 +2376,10 @@ next:
2378 sg = sg->next; 2376 sg = sg->next;
2379 } while (sg != sd->groups); 2377 } while (sg != sd->groups);
2380 } 2378 }
2379 if (!smt) {
2380 smt = 1;
2381 goto again;
2382 }
2381done: 2383done:
2382 rcu_read_unlock(); 2384 rcu_read_unlock();
2383 2385