diff options
| -rw-r--r-- | MAINTAINERS | 2 | ||||
| -rw-r--r-- | arch/um/drivers/line.c | 1 | ||||
| -rw-r--r-- | arch/um/os-Linux/helper.c | 1 | ||||
| -rw-r--r-- | drivers/firewire/core-iso.c | 14 | ||||
| -rw-r--r-- | drivers/firewire/ohci.c | 23 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 14 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 5 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 10 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 151 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem_tiling.c | 22 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 8 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_opregion.c | 54 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 13 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 11 | ||||
| -rw-r--r-- | drivers/md/raid5.c | 35 | ||||
| -rw-r--r-- | drivers/scsi/qla4xxx/ql4_mbx.c | 2 | ||||
| -rw-r--r-- | include/linux/firewire-cdev.h | 2 | ||||
| -rw-r--r-- | include/linux/firewire-constants.h | 2 |
18 files changed, 293 insertions, 77 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index a2d9254a2233..183887518fe3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -1960,7 +1960,7 @@ F: lib/kobj* | |||
| 1960 | 1960 | ||
| 1961 | DRM DRIVERS | 1961 | DRM DRIVERS |
| 1962 | M: David Airlie <airlied@linux.ie> | 1962 | M: David Airlie <airlied@linux.ie> |
| 1963 | L: dri-devel@lists.sourceforge.net | 1963 | L: dri-devel@lists.freedesktop.org |
| 1964 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git | 1964 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git |
| 1965 | S: Maintained | 1965 | S: Maintained |
| 1966 | F: drivers/gpu/drm/ | 1966 | F: drivers/gpu/drm/ |
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index 64cda95f59ca..7a656bd8bd3c 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include "linux/irqreturn.h" | 6 | #include "linux/irqreturn.h" |
| 7 | #include "linux/kd.h" | 7 | #include "linux/kd.h" |
| 8 | #include "linux/sched.h" | 8 | #include "linux/sched.h" |
| 9 | #include "linux/slab.h" | ||
| 9 | #include "chan_kern.h" | 10 | #include "chan_kern.h" |
| 10 | #include "irq_kern.h" | 11 | #include "irq_kern.h" |
| 11 | #include "irq_user.h" | 12 | #include "irq_user.h" |
diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c index 06d6ccf0e444..b6b1096152aa 100644 --- a/arch/um/os-Linux/helper.c +++ b/arch/um/os-Linux/helper.c | |||
| @@ -8,7 +8,6 @@ | |||
| 8 | #include <errno.h> | 8 | #include <errno.h> |
| 9 | #include <sched.h> | 9 | #include <sched.h> |
| 10 | #include <linux/limits.h> | 10 | #include <linux/limits.h> |
| 11 | #include <linux/slab.h> | ||
| 12 | #include <sys/socket.h> | 11 | #include <sys/socket.h> |
| 13 | #include <sys/wait.h> | 12 | #include <sys/wait.h> |
| 14 | #include "kern_constants.h" | 13 | #include "kern_constants.h" |
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 3784a47865b7..8f5aebfb29df 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c | |||
| @@ -190,7 +190,7 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, | |||
| 190 | for (try = 0; try < 5; try++) { | 190 | for (try = 0; try < 5; try++) { |
| 191 | new = allocate ? old - bandwidth : old + bandwidth; | 191 | new = allocate ? old - bandwidth : old + bandwidth; |
| 192 | if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) | 192 | if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) |
| 193 | break; | 193 | return -EBUSY; |
| 194 | 194 | ||
| 195 | data[0] = cpu_to_be32(old); | 195 | data[0] = cpu_to_be32(old); |
| 196 | data[1] = cpu_to_be32(new); | 196 | data[1] = cpu_to_be32(new); |
| @@ -218,7 +218,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
| 218 | u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) | 218 | u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) |
| 219 | { | 219 | { |
| 220 | __be32 c, all, old; | 220 | __be32 c, all, old; |
| 221 | int i, retry = 5; | 221 | int i, ret = -EIO, retry = 5; |
| 222 | 222 | ||
| 223 | old = all = allocate ? cpu_to_be32(~0) : 0; | 223 | old = all = allocate ? cpu_to_be32(~0) : 0; |
| 224 | 224 | ||
| @@ -226,6 +226,8 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
| 226 | if (!(channels_mask & 1 << i)) | 226 | if (!(channels_mask & 1 << i)) |
| 227 | continue; | 227 | continue; |
| 228 | 228 | ||
| 229 | ret = -EBUSY; | ||
| 230 | |||
| 229 | c = cpu_to_be32(1 << (31 - i)); | 231 | c = cpu_to_be32(1 << (31 - i)); |
| 230 | if ((old & c) != (all & c)) | 232 | if ((old & c) != (all & c)) |
| 231 | continue; | 233 | continue; |
| @@ -251,12 +253,16 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
| 251 | 253 | ||
| 252 | /* 1394-1995 IRM, fall through to retry. */ | 254 | /* 1394-1995 IRM, fall through to retry. */ |
| 253 | default: | 255 | default: |
| 254 | if (retry--) | 256 | if (retry) { |
| 257 | retry--; | ||
| 255 | i--; | 258 | i--; |
| 259 | } else { | ||
| 260 | ret = -EIO; | ||
| 261 | } | ||
| 256 | } | 262 | } |
| 257 | } | 263 | } |
| 258 | 264 | ||
| 259 | return -EIO; | 265 | return ret; |
| 260 | } | 266 | } |
| 261 | 267 | ||
| 262 | static void deallocate_channel(struct fw_card *card, int irm_id, | 268 | static void deallocate_channel(struct fw_card *card, int irm_id, |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 0cf4d7f562c5..94b16e0340ae 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
| @@ -1158,7 +1158,7 @@ static void handle_local_lock(struct fw_ohci *ohci, | |||
| 1158 | struct fw_packet *packet, u32 csr) | 1158 | struct fw_packet *packet, u32 csr) |
| 1159 | { | 1159 | { |
| 1160 | struct fw_packet response; | 1160 | struct fw_packet response; |
| 1161 | int tcode, length, ext_tcode, sel; | 1161 | int tcode, length, ext_tcode, sel, try; |
| 1162 | __be32 *payload, lock_old; | 1162 | __be32 *payload, lock_old; |
| 1163 | u32 lock_arg, lock_data; | 1163 | u32 lock_arg, lock_data; |
| 1164 | 1164 | ||
| @@ -1185,21 +1185,26 @@ static void handle_local_lock(struct fw_ohci *ohci, | |||
| 1185 | reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); | 1185 | reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); |
| 1186 | reg_write(ohci, OHCI1394_CSRControl, sel); | 1186 | reg_write(ohci, OHCI1394_CSRControl, sel); |
| 1187 | 1187 | ||
| 1188 | if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) | 1188 | for (try = 0; try < 20; try++) |
| 1189 | lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData)); | 1189 | if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) { |
| 1190 | else | 1190 | lock_old = cpu_to_be32(reg_read(ohci, |
| 1191 | fw_notify("swap not done yet\n"); | 1191 | OHCI1394_CSRData)); |
| 1192 | fw_fill_response(&response, packet->header, | ||
| 1193 | RCODE_COMPLETE, | ||
| 1194 | &lock_old, sizeof(lock_old)); | ||
| 1195 | goto out; | ||
| 1196 | } | ||
| 1197 | |||
| 1198 | fw_error("swap not done (CSR lock timeout)\n"); | ||
| 1199 | fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); | ||
| 1192 | 1200 | ||
| 1193 | fw_fill_response(&response, packet->header, | ||
| 1194 | RCODE_COMPLETE, &lock_old, sizeof(lock_old)); | ||
| 1195 | out: | 1201 | out: |
| 1196 | fw_core_handle_response(&ohci->card, &response); | 1202 | fw_core_handle_response(&ohci->card, &response); |
| 1197 | } | 1203 | } |
| 1198 | 1204 | ||
| 1199 | static void handle_local_request(struct context *ctx, struct fw_packet *packet) | 1205 | static void handle_local_request(struct context *ctx, struct fw_packet *packet) |
| 1200 | { | 1206 | { |
| 1201 | u64 offset; | 1207 | u64 offset, csr; |
| 1202 | u32 csr; | ||
| 1203 | 1208 | ||
| 1204 | if (ctx == &ctx->ohci->at_request_ctx) { | 1209 | if (ctx == &ctx->ohci->at_request_ctx) { |
| 1205 | packet->ack = ACK_PENDING; | 1210 | packet->ack = ACK_PENDING; |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2dc93939507d..c3cfafcbfe7d 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -1357,6 +1357,8 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
| 1357 | 1357 | ||
| 1358 | dev_priv->cfb_size = size; | 1358 | dev_priv->cfb_size = size; |
| 1359 | 1359 | ||
| 1360 | dev_priv->compressed_fb = compressed_fb; | ||
| 1361 | |||
| 1360 | if (IS_GM45(dev)) { | 1362 | if (IS_GM45(dev)) { |
| 1361 | g4x_disable_fbc(dev); | 1363 | g4x_disable_fbc(dev); |
| 1362 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); | 1364 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); |
| @@ -1364,12 +1366,22 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
| 1364 | i8xx_disable_fbc(dev); | 1366 | i8xx_disable_fbc(dev); |
| 1365 | I915_WRITE(FBC_CFB_BASE, cfb_base); | 1367 | I915_WRITE(FBC_CFB_BASE, cfb_base); |
| 1366 | I915_WRITE(FBC_LL_BASE, ll_base); | 1368 | I915_WRITE(FBC_LL_BASE, ll_base); |
| 1369 | dev_priv->compressed_llb = compressed_llb; | ||
| 1367 | } | 1370 | } |
| 1368 | 1371 | ||
| 1369 | DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, | 1372 | DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, |
| 1370 | ll_base, size >> 20); | 1373 | ll_base, size >> 20); |
| 1371 | } | 1374 | } |
| 1372 | 1375 | ||
| 1376 | static void i915_cleanup_compression(struct drm_device *dev) | ||
| 1377 | { | ||
| 1378 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 1379 | |||
| 1380 | drm_mm_put_block(dev_priv->compressed_fb); | ||
| 1381 | if (!IS_GM45(dev)) | ||
| 1382 | drm_mm_put_block(dev_priv->compressed_llb); | ||
| 1383 | } | ||
| 1384 | |||
| 1373 | /* true = enable decode, false = disable decoder */ | 1385 | /* true = enable decode, false = disable decoder */ |
| 1374 | static unsigned int i915_vga_set_decode(void *cookie, bool state) | 1386 | static unsigned int i915_vga_set_decode(void *cookie, bool state) |
| 1375 | { | 1387 | { |
| @@ -1787,6 +1799,8 @@ int i915_driver_unload(struct drm_device *dev) | |||
| 1787 | mutex_lock(&dev->struct_mutex); | 1799 | mutex_lock(&dev->struct_mutex); |
| 1788 | i915_gem_cleanup_ringbuffer(dev); | 1800 | i915_gem_cleanup_ringbuffer(dev); |
| 1789 | mutex_unlock(&dev->struct_mutex); | 1801 | mutex_unlock(&dev->struct_mutex); |
| 1802 | if (I915_HAS_FBC(dev) && i915_powersave) | ||
| 1803 | i915_cleanup_compression(dev); | ||
| 1790 | drm_mm_takedown(&dev_priv->vram); | 1804 | drm_mm_takedown(&dev_priv->vram); |
| 1791 | i915_gem_lastclose(dev); | 1805 | i915_gem_lastclose(dev); |
| 1792 | 1806 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0af3dcc85ce9..cc03537bb883 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -69,7 +69,8 @@ const static struct intel_device_info intel_845g_info = { | |||
| 69 | }; | 69 | }; |
| 70 | 70 | ||
| 71 | const static struct intel_device_info intel_i85x_info = { | 71 | const static struct intel_device_info intel_i85x_info = { |
| 72 | .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, | 72 | .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, |
| 73 | .cursor_needs_physical = 1, | ||
| 73 | }; | 74 | }; |
| 74 | 75 | ||
| 75 | const static struct intel_device_info intel_i865g_info = { | 76 | const static struct intel_device_info intel_i865g_info = { |
| @@ -151,7 +152,7 @@ const static struct pci_device_id pciidlist[] = { | |||
| 151 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), | 152 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), |
| 152 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), | 153 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), |
| 153 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), | 154 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), |
| 154 | INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), | 155 | INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), |
| 155 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), | 156 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), |
| 156 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), | 157 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), |
| 157 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), | 158 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6960849522f8..6e4790065d9e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -195,6 +195,7 @@ struct intel_overlay; | |||
| 195 | struct intel_device_info { | 195 | struct intel_device_info { |
| 196 | u8 is_mobile : 1; | 196 | u8 is_mobile : 1; |
| 197 | u8 is_i8xx : 1; | 197 | u8 is_i8xx : 1; |
| 198 | u8 is_i85x : 1; | ||
| 198 | u8 is_i915g : 1; | 199 | u8 is_i915g : 1; |
| 199 | u8 is_i9xx : 1; | 200 | u8 is_i9xx : 1; |
| 200 | u8 is_i945gm : 1; | 201 | u8 is_i945gm : 1; |
| @@ -235,11 +236,14 @@ typedef struct drm_i915_private { | |||
| 235 | 236 | ||
| 236 | drm_dma_handle_t *status_page_dmah; | 237 | drm_dma_handle_t *status_page_dmah; |
| 237 | void *hw_status_page; | 238 | void *hw_status_page; |
| 239 | void *seqno_page; | ||
| 238 | dma_addr_t dma_status_page; | 240 | dma_addr_t dma_status_page; |
| 239 | uint32_t counter; | 241 | uint32_t counter; |
| 240 | unsigned int status_gfx_addr; | 242 | unsigned int status_gfx_addr; |
| 243 | unsigned int seqno_gfx_addr; | ||
| 241 | drm_local_map_t hws_map; | 244 | drm_local_map_t hws_map; |
| 242 | struct drm_gem_object *hws_obj; | 245 | struct drm_gem_object *hws_obj; |
| 246 | struct drm_gem_object *seqno_obj; | ||
| 243 | struct drm_gem_object *pwrctx; | 247 | struct drm_gem_object *pwrctx; |
| 244 | 248 | ||
| 245 | struct resource mch_res; | 249 | struct resource mch_res; |
| @@ -630,6 +634,9 @@ typedef struct drm_i915_private { | |||
| 630 | u8 max_delay; | 634 | u8 max_delay; |
| 631 | 635 | ||
| 632 | enum no_fbc_reason no_fbc_reason; | 636 | enum no_fbc_reason no_fbc_reason; |
| 637 | |||
| 638 | struct drm_mm_node *compressed_fb; | ||
| 639 | struct drm_mm_node *compressed_llb; | ||
| 633 | } drm_i915_private_t; | 640 | } drm_i915_private_t; |
| 634 | 641 | ||
| 635 | /** driver private structure attached to each drm_gem_object */ | 642 | /** driver private structure attached to each drm_gem_object */ |
| @@ -1070,7 +1077,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
| 1070 | 1077 | ||
| 1071 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) | 1078 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) |
| 1072 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) | 1079 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) |
| 1073 | #define IS_I85X(dev) ((dev)->pci_device == 0x3582) | 1080 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) |
| 1074 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | 1081 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) |
| 1075 | #define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) | 1082 | #define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) |
| 1076 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) | 1083 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) |
| @@ -1135,6 +1142,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
| 1135 | 1142 | ||
| 1136 | #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ | 1143 | #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ |
| 1137 | IS_GEN6(dev)) | 1144 | IS_GEN6(dev)) |
| 1145 | #define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev)) | ||
| 1138 | 1146 | ||
| 1139 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 1147 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
| 1140 | 1148 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 80871c62a571..7f52cc124cfe 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -1588,6 +1588,13 @@ i915_gem_process_flushing_list(struct drm_device *dev, | |||
| 1588 | } | 1588 | } |
| 1589 | } | 1589 | } |
| 1590 | 1590 | ||
| 1591 | #define PIPE_CONTROL_FLUSH(addr) \ | ||
| 1592 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ | ||
| 1593 | PIPE_CONTROL_DEPTH_STALL); \ | ||
| 1594 | OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ | ||
| 1595 | OUT_RING(0); \ | ||
| 1596 | OUT_RING(0); \ | ||
| 1597 | |||
| 1591 | /** | 1598 | /** |
| 1592 | * Creates a new sequence number, emitting a write of it to the status page | 1599 | * Creates a new sequence number, emitting a write of it to the status page |
| 1593 | * plus an interrupt, which will trigger i915_user_interrupt_handler. | 1600 | * plus an interrupt, which will trigger i915_user_interrupt_handler. |
| @@ -1622,13 +1629,47 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
| 1622 | if (dev_priv->mm.next_gem_seqno == 0) | 1629 | if (dev_priv->mm.next_gem_seqno == 0) |
| 1623 | dev_priv->mm.next_gem_seqno++; | 1630 | dev_priv->mm.next_gem_seqno++; |
| 1624 | 1631 | ||
| 1625 | BEGIN_LP_RING(4); | 1632 | if (HAS_PIPE_CONTROL(dev)) { |
| 1626 | OUT_RING(MI_STORE_DWORD_INDEX); | 1633 | u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; |
| 1627 | OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
| 1628 | OUT_RING(seqno); | ||
| 1629 | 1634 | ||
| 1630 | OUT_RING(MI_USER_INTERRUPT); | 1635 | /* |
| 1631 | ADVANCE_LP_RING(); | 1636 | * Workaround qword write incoherence by flushing the |
| 1637 | * PIPE_NOTIFY buffers out to memory before requesting | ||
| 1638 | * an interrupt. | ||
| 1639 | */ | ||
| 1640 | BEGIN_LP_RING(32); | ||
| 1641 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
| 1642 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); | ||
| 1643 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
| 1644 | OUT_RING(seqno); | ||
| 1645 | OUT_RING(0); | ||
| 1646 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 1647 | scratch_addr += 128; /* write to separate cachelines */ | ||
| 1648 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 1649 | scratch_addr += 128; | ||
| 1650 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 1651 | scratch_addr += 128; | ||
| 1652 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 1653 | scratch_addr += 128; | ||
| 1654 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 1655 | scratch_addr += 128; | ||
| 1656 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
| 1657 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
| 1658 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | | ||
| 1659 | PIPE_CONTROL_NOTIFY); | ||
| 1660 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
| 1661 | OUT_RING(seqno); | ||
| 1662 | OUT_RING(0); | ||
| 1663 | ADVANCE_LP_RING(); | ||
| 1664 | } else { | ||
| 1665 | BEGIN_LP_RING(4); | ||
| 1666 | OUT_RING(MI_STORE_DWORD_INDEX); | ||
| 1667 | OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
| 1668 | OUT_RING(seqno); | ||
| 1669 | |||
| 1670 | OUT_RING(MI_USER_INTERRUPT); | ||
| 1671 | ADVANCE_LP_RING(); | ||
| 1672 | } | ||
| 1632 | 1673 | ||
| 1633 | DRM_DEBUG_DRIVER("%d\n", seqno); | 1674 | DRM_DEBUG_DRIVER("%d\n", seqno); |
| 1634 | 1675 | ||
| @@ -1752,7 +1793,10 @@ i915_get_gem_seqno(struct drm_device *dev) | |||
| 1752 | { | 1793 | { |
| 1753 | drm_i915_private_t *dev_priv = dev->dev_private; | 1794 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 1754 | 1795 | ||
| 1755 | return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); | 1796 | if (IS_I965G(dev)) |
| 1797 | return ((volatile u32 *)(dev_priv->seqno_page))[0]; | ||
| 1798 | else | ||
| 1799 | return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); | ||
| 1756 | } | 1800 | } |
| 1757 | 1801 | ||
| 1758 | /** | 1802 | /** |
| @@ -2362,6 +2406,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) | |||
| 2362 | pitch_val = obj_priv->stride / tile_width; | 2406 | pitch_val = obj_priv->stride / tile_width; |
| 2363 | pitch_val = ffs(pitch_val) - 1; | 2407 | pitch_val = ffs(pitch_val) - 1; |
| 2364 | 2408 | ||
| 2409 | if (obj_priv->tiling_mode == I915_TILING_Y && | ||
| 2410 | HAS_128_BYTE_Y_TILING(dev)) | ||
| 2411 | WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); | ||
| 2412 | else | ||
| 2413 | WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); | ||
| 2414 | |||
| 2365 | val = obj_priv->gtt_offset; | 2415 | val = obj_priv->gtt_offset; |
| 2366 | if (obj_priv->tiling_mode == I915_TILING_Y) | 2416 | if (obj_priv->tiling_mode == I915_TILING_Y) |
| 2367 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; | 2417 | val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
| @@ -4546,6 +4596,49 @@ i915_gem_idle(struct drm_device *dev) | |||
| 4546 | return 0; | 4596 | return 0; |
| 4547 | } | 4597 | } |
| 4548 | 4598 | ||
| 4599 | /* | ||
| 4600 | * 965+ support PIPE_CONTROL commands, which provide finer grained control | ||
| 4601 | * over cache flushing. | ||
| 4602 | */ | ||
| 4603 | static int | ||
| 4604 | i915_gem_init_pipe_control(struct drm_device *dev) | ||
| 4605 | { | ||
| 4606 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 4607 | struct drm_gem_object *obj; | ||
| 4608 | struct drm_i915_gem_object *obj_priv; | ||
| 4609 | int ret; | ||
| 4610 | |||
| 4611 | obj = drm_gem_object_alloc(dev, 4096); | ||
| 4612 | if (obj == NULL) { | ||
| 4613 | DRM_ERROR("Failed to allocate seqno page\n"); | ||
| 4614 | ret = -ENOMEM; | ||
| 4615 | goto err; | ||
| 4616 | } | ||
| 4617 | obj_priv = to_intel_bo(obj); | ||
| 4618 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | ||
| 4619 | |||
| 4620 | ret = i915_gem_object_pin(obj, 4096); | ||
| 4621 | if (ret) | ||
| 4622 | goto err_unref; | ||
| 4623 | |||
| 4624 | dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; | ||
| 4625 | dev_priv->seqno_page = kmap(obj_priv->pages[0]); | ||
| 4626 | if (dev_priv->seqno_page == NULL) | ||
| 4627 | goto err_unpin; | ||
| 4628 | |||
| 4629 | dev_priv->seqno_obj = obj; | ||
| 4630 | memset(dev_priv->seqno_page, 0, PAGE_SIZE); | ||
| 4631 | |||
| 4632 | return 0; | ||
| 4633 | |||
| 4634 | err_unpin: | ||
| 4635 | i915_gem_object_unpin(obj); | ||
| 4636 | err_unref: | ||
| 4637 | drm_gem_object_unreference(obj); | ||
| 4638 | err: | ||
| 4639 | return ret; | ||
| 4640 | } | ||
| 4641 | |||
| 4549 | static int | 4642 | static int |
| 4550 | i915_gem_init_hws(struct drm_device *dev) | 4643 | i915_gem_init_hws(struct drm_device *dev) |
| 4551 | { | 4644 | { |
| @@ -4563,7 +4656,8 @@ i915_gem_init_hws(struct drm_device *dev) | |||
| 4563 | obj = drm_gem_object_alloc(dev, 4096); | 4656 | obj = drm_gem_object_alloc(dev, 4096); |
| 4564 | if (obj == NULL) { | 4657 | if (obj == NULL) { |
| 4565 | DRM_ERROR("Failed to allocate status page\n"); | 4658 | DRM_ERROR("Failed to allocate status page\n"); |
| 4566 | return -ENOMEM; | 4659 | ret = -ENOMEM; |
| 4660 | goto err; | ||
| 4567 | } | 4661 | } |
| 4568 | obj_priv = to_intel_bo(obj); | 4662 | obj_priv = to_intel_bo(obj); |
| 4569 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | 4663 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; |
| @@ -4571,7 +4665,7 @@ i915_gem_init_hws(struct drm_device *dev) | |||
| 4571 | ret = i915_gem_object_pin(obj, 4096); | 4665 | ret = i915_gem_object_pin(obj, 4096); |
| 4572 | if (ret != 0) { | 4666 | if (ret != 0) { |
| 4573 | drm_gem_object_unreference(obj); | 4667 | drm_gem_object_unreference(obj); |
| 4574 | return ret; | 4668 | goto err_unref; |
| 4575 | } | 4669 | } |
| 4576 | 4670 | ||
| 4577 | dev_priv->status_gfx_addr = obj_priv->gtt_offset; | 4671 | dev_priv->status_gfx_addr = obj_priv->gtt_offset; |
| @@ -4580,10 +4674,16 @@ i915_gem_init_hws(struct drm_device *dev) | |||
| 4580 | if (dev_priv->hw_status_page == NULL) { | 4674 | if (dev_priv->hw_status_page == NULL) { |
| 4581 | DRM_ERROR("Failed to map status page.\n"); | 4675 | DRM_ERROR("Failed to map status page.\n"); |
| 4582 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 4676 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
| 4583 | i915_gem_object_unpin(obj); | 4677 | ret = -EINVAL; |
| 4584 | drm_gem_object_unreference(obj); | 4678 | goto err_unpin; |
| 4585 | return -EINVAL; | ||
| 4586 | } | 4679 | } |
| 4680 | |||
| 4681 | if (HAS_PIPE_CONTROL(dev)) { | ||
| 4682 | ret = i915_gem_init_pipe_control(dev); | ||
| 4683 | if (ret) | ||
| 4684 | goto err_unpin; | ||
| 4685 | } | ||
| 4686 | |||
| 4587 | dev_priv->hws_obj = obj; | 4687 | dev_priv->hws_obj = obj; |
| 4588 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | 4688 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); |
| 4589 | if (IS_GEN6(dev)) { | 4689 | if (IS_GEN6(dev)) { |
| @@ -4596,6 +4696,30 @@ i915_gem_init_hws(struct drm_device *dev) | |||
| 4596 | DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); | 4696 | DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); |
| 4597 | 4697 | ||
| 4598 | return 0; | 4698 | return 0; |
| 4699 | |||
| 4700 | err_unpin: | ||
| 4701 | i915_gem_object_unpin(obj); | ||
| 4702 | err_unref: | ||
| 4703 | drm_gem_object_unreference(obj); | ||
| 4704 | err: | ||
| 4705 | return 0; | ||
| 4706 | } | ||
| 4707 | |||
| 4708 | static void | ||
| 4709 | i915_gem_cleanup_pipe_control(struct drm_device *dev) | ||
| 4710 | { | ||
| 4711 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 4712 | struct drm_gem_object *obj; | ||
| 4713 | struct drm_i915_gem_object *obj_priv; | ||
| 4714 | |||
| 4715 | obj = dev_priv->seqno_obj; | ||
| 4716 | obj_priv = to_intel_bo(obj); | ||
| 4717 | kunmap(obj_priv->pages[0]); | ||
| 4718 | i915_gem_object_unpin(obj); | ||
| 4719 | drm_gem_object_unreference(obj); | ||
| 4720 | dev_priv->seqno_obj = NULL; | ||
| 4721 | |||
| 4722 | dev_priv->seqno_page = NULL; | ||
| 4599 | } | 4723 | } |
| 4600 | 4724 | ||
| 4601 | static void | 4725 | static void |
| @@ -4619,6 +4743,9 @@ i915_gem_cleanup_hws(struct drm_device *dev) | |||
| 4619 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 4743 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
| 4620 | dev_priv->hw_status_page = NULL; | 4744 | dev_priv->hw_status_page = NULL; |
| 4621 | 4745 | ||
| 4746 | if (HAS_PIPE_CONTROL(dev)) | ||
| 4747 | i915_gem_cleanup_pipe_control(dev); | ||
| 4748 | |||
| 4622 | /* Write high address into HWS_PGA when disabling. */ | 4749 | /* Write high address into HWS_PGA when disabling. */ |
| 4623 | I915_WRITE(HWS_PGA, 0x1ffff000); | 4750 | I915_WRITE(HWS_PGA, 0x1ffff000); |
| 4624 | } | 4751 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 449157f71610..4bdccefcf2cf 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
| @@ -202,21 +202,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | |||
| 202 | * reg, so dont bother to check the size */ | 202 | * reg, so dont bother to check the size */ |
| 203 | if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) | 203 | if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) |
| 204 | return false; | 204 | return false; |
| 205 | } else if (IS_I9XX(dev)) { | 205 | } else if (IS_GEN3(dev) || IS_GEN2(dev)) { |
| 206 | uint32_t pitch_val = ffs(stride / tile_width) - 1; | 206 | if (stride > 8192) |
| 207 | |||
| 208 | /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB) | ||
| 209 | * instead of 4 (2KB) on 945s. | ||
| 210 | */ | ||
| 211 | if (pitch_val > I915_FENCE_MAX_PITCH_VAL || | ||
| 212 | size > (I830_FENCE_MAX_SIZE_VAL << 20)) | ||
| 213 | return false; | 207 | return false; |
| 214 | } else { | ||
| 215 | uint32_t pitch_val = ffs(stride / tile_width) - 1; | ||
| 216 | 208 | ||
| 217 | if (pitch_val > I830_FENCE_MAX_PITCH_VAL || | 209 | if (IS_GEN3(dev)) { |
| 218 | size > (I830_FENCE_MAX_SIZE_VAL << 19)) | 210 | if (size > I830_FENCE_MAX_SIZE_VAL << 20) |
| 219 | return false; | 211 | return false; |
| 212 | } else { | ||
| 213 | if (size > I830_FENCE_MAX_SIZE_VAL << 19) | ||
| 214 | return false; | ||
| 215 | } | ||
| 220 | } | 216 | } |
| 221 | 217 | ||
| 222 | /* 965+ just needs multiples of tile width */ | 218 | /* 965+ just needs multiples of tile width */ |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6421481d6222..2b8b969d0c15 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -349,7 +349,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
| 349 | READ_BREADCRUMB(dev_priv); | 349 | READ_BREADCRUMB(dev_priv); |
| 350 | } | 350 | } |
| 351 | 351 | ||
| 352 | if (gt_iir & GT_USER_INTERRUPT) { | 352 | if (gt_iir & GT_PIPE_NOTIFY) { |
| 353 | u32 seqno = i915_get_gem_seqno(dev); | 353 | u32 seqno = i915_get_gem_seqno(dev); |
| 354 | dev_priv->mm.irq_gem_seqno = seqno; | 354 | dev_priv->mm.irq_gem_seqno = seqno; |
| 355 | trace_i915_gem_request_complete(dev, seqno); | 355 | trace_i915_gem_request_complete(dev, seqno); |
| @@ -1005,7 +1005,7 @@ void i915_user_irq_get(struct drm_device *dev) | |||
| 1005 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 1005 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
| 1006 | if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { | 1006 | if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { |
| 1007 | if (HAS_PCH_SPLIT(dev)) | 1007 | if (HAS_PCH_SPLIT(dev)) |
| 1008 | ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); | 1008 | ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); |
| 1009 | else | 1009 | else |
| 1010 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); | 1010 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); |
| 1011 | } | 1011 | } |
| @@ -1021,7 +1021,7 @@ void i915_user_irq_put(struct drm_device *dev) | |||
| 1021 | BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); | 1021 | BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); |
| 1022 | if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { | 1022 | if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { |
| 1023 | if (HAS_PCH_SPLIT(dev)) | 1023 | if (HAS_PCH_SPLIT(dev)) |
| 1024 | ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); | 1024 | ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); |
| 1025 | else | 1025 | else |
| 1026 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); | 1026 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); |
| 1027 | } | 1027 | } |
| @@ -1305,7 +1305,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
| 1305 | /* enable kind of interrupts always enabled */ | 1305 | /* enable kind of interrupts always enabled */ |
| 1306 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | 1306 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
| 1307 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; | 1307 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; |
| 1308 | u32 render_mask = GT_USER_INTERRUPT; | 1308 | u32 render_mask = GT_PIPE_NOTIFY; |
| 1309 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1309 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | |
| 1310 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1310 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; |
| 1311 | 1311 | ||
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c index 7cc8410239cb..8fcc75c1aa28 100644 --- a/drivers/gpu/drm/i915/i915_opregion.c +++ b/drivers/gpu/drm/i915/i915_opregion.c | |||
| @@ -382,8 +382,57 @@ static void intel_didl_outputs(struct drm_device *dev) | |||
| 382 | struct drm_i915_private *dev_priv = dev->dev_private; | 382 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 383 | struct intel_opregion *opregion = &dev_priv->opregion; | 383 | struct intel_opregion *opregion = &dev_priv->opregion; |
| 384 | struct drm_connector *connector; | 384 | struct drm_connector *connector; |
| 385 | acpi_handle handle; | ||
| 386 | struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; | ||
| 387 | unsigned long long device_id; | ||
| 388 | acpi_status status; | ||
| 385 | int i = 0; | 389 | int i = 0; |
| 386 | 390 | ||
| 391 | handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); | ||
| 392 | if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) | ||
| 393 | return; | ||
| 394 | |||
| 395 | if (acpi_is_video_device(acpi_dev)) | ||
| 396 | acpi_video_bus = acpi_dev; | ||
| 397 | else { | ||
| 398 | list_for_each_entry(acpi_cdev, &acpi_dev->children, node) { | ||
| 399 | if (acpi_is_video_device(acpi_cdev)) { | ||
| 400 | acpi_video_bus = acpi_cdev; | ||
| 401 | break; | ||
| 402 | } | ||
| 403 | } | ||
| 404 | } | ||
| 405 | |||
| 406 | if (!acpi_video_bus) { | ||
| 407 | printk(KERN_WARNING "No ACPI video bus found\n"); | ||
| 408 | return; | ||
| 409 | } | ||
| 410 | |||
| 411 | list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { | ||
| 412 | if (i >= 8) { | ||
| 413 | dev_printk (KERN_ERR, &dev->pdev->dev, | ||
| 414 | "More than 8 outputs detected\n"); | ||
| 415 | return; | ||
| 416 | } | ||
| 417 | status = | ||
| 418 | acpi_evaluate_integer(acpi_cdev->handle, "_ADR", | ||
| 419 | NULL, &device_id); | ||
| 420 | if (ACPI_SUCCESS(status)) { | ||
| 421 | if (!device_id) | ||
| 422 | goto blind_set; | ||
| 423 | opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f); | ||
| 424 | i++; | ||
| 425 | } | ||
| 426 | } | ||
| 427 | |||
| 428 | end: | ||
| 429 | /* If fewer than 8 outputs, the list must be null terminated */ | ||
| 430 | if (i < 8) | ||
| 431 | opregion->acpi->didl[i] = 0; | ||
| 432 | return; | ||
| 433 | |||
| 434 | blind_set: | ||
| 435 | i = 0; | ||
| 387 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 436 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 388 | int output_type = ACPI_OTHER_OUTPUT; | 437 | int output_type = ACPI_OTHER_OUTPUT; |
| 389 | if (i >= 8) { | 438 | if (i >= 8) { |
| @@ -416,10 +465,7 @@ static void intel_didl_outputs(struct drm_device *dev) | |||
| 416 | opregion->acpi->didl[i] |= (1<<31) | output_type | i; | 465 | opregion->acpi->didl[i] |= (1<<31) | output_type | i; |
| 417 | i++; | 466 | i++; |
| 418 | } | 467 | } |
| 419 | 468 | goto end; | |
| 420 | /* If fewer than 8 outputs, the list must be null terminated */ | ||
| 421 | if (i < 8) | ||
| 422 | opregion->acpi->didl[i] = 0; | ||
| 423 | } | 469 | } |
| 424 | 470 | ||
| 425 | int intel_opregion_init(struct drm_device *dev, int resume) | 471 | int intel_opregion_init(struct drm_device *dev, int resume) |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index cbbf59f56dfa..4cbc5210fd30 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -230,6 +230,16 @@ | |||
| 230 | #define ASYNC_FLIP (1<<22) | 230 | #define ASYNC_FLIP (1<<22) |
| 231 | #define DISPLAY_PLANE_A (0<<20) | 231 | #define DISPLAY_PLANE_A (0<<20) |
| 232 | #define DISPLAY_PLANE_B (1<<20) | 232 | #define DISPLAY_PLANE_B (1<<20) |
| 233 | #define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2) | ||
| 234 | #define PIPE_CONTROL_QW_WRITE (1<<14) | ||
| 235 | #define PIPE_CONTROL_DEPTH_STALL (1<<13) | ||
| 236 | #define PIPE_CONTROL_WC_FLUSH (1<<12) | ||
| 237 | #define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */ | ||
| 238 | #define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */ | ||
| 239 | #define PIPE_CONTROL_ISP_DIS (1<<9) | ||
| 240 | #define PIPE_CONTROL_NOTIFY (1<<8) | ||
| 241 | #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ | ||
| 242 | #define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */ | ||
| 233 | 243 | ||
| 234 | /* | 244 | /* |
| 235 | * Fence registers | 245 | * Fence registers |
| @@ -241,7 +251,7 @@ | |||
| 241 | #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) | 251 | #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) |
| 242 | #define I830_FENCE_PITCH_SHIFT 4 | 252 | #define I830_FENCE_PITCH_SHIFT 4 |
| 243 | #define I830_FENCE_REG_VALID (1<<0) | 253 | #define I830_FENCE_REG_VALID (1<<0) |
| 244 | #define I915_FENCE_MAX_PITCH_VAL 0x10 | 254 | #define I915_FENCE_MAX_PITCH_VAL 4 |
| 245 | #define I830_FENCE_MAX_PITCH_VAL 6 | 255 | #define I830_FENCE_MAX_PITCH_VAL 6 |
| 246 | #define I830_FENCE_MAX_SIZE_VAL (1<<8) | 256 | #define I830_FENCE_MAX_SIZE_VAL (1<<8) |
| 247 | 257 | ||
| @@ -2285,6 +2295,7 @@ | |||
| 2285 | #define DEIER 0x4400c | 2295 | #define DEIER 0x4400c |
| 2286 | 2296 | ||
| 2287 | /* GT interrupt */ | 2297 | /* GT interrupt */ |
| 2298 | #define GT_PIPE_NOTIFY (1 << 4) | ||
| 2288 | #define GT_SYNC_STATUS (1 << 2) | 2299 | #define GT_SYNC_STATUS (1 << 2) |
| 2289 | #define GT_USER_INTERRUPT (1 << 0) | 2300 | #define GT_USER_INTERRUPT (1 << 0) |
| 2290 | 2301 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e7356fb6c918..c7502b6b1600 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -4853,17 +4853,18 @@ static void intel_init_display(struct drm_device *dev) | |||
| 4853 | dev_priv->display.update_wm = g4x_update_wm; | 4853 | dev_priv->display.update_wm = g4x_update_wm; |
| 4854 | else if (IS_I965G(dev)) | 4854 | else if (IS_I965G(dev)) |
| 4855 | dev_priv->display.update_wm = i965_update_wm; | 4855 | dev_priv->display.update_wm = i965_update_wm; |
| 4856 | else if (IS_I9XX(dev) || IS_MOBILE(dev)) { | 4856 | else if (IS_I9XX(dev)) { |
| 4857 | dev_priv->display.update_wm = i9xx_update_wm; | 4857 | dev_priv->display.update_wm = i9xx_update_wm; |
| 4858 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; | 4858 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; |
| 4859 | } else if (IS_I85X(dev)) { | ||
| 4860 | dev_priv->display.update_wm = i9xx_update_wm; | ||
| 4861 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; | ||
| 4859 | } else { | 4862 | } else { |
| 4860 | if (IS_I85X(dev)) | 4863 | dev_priv->display.update_wm = i830_update_wm; |
| 4861 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; | 4864 | if (IS_845G(dev)) |
| 4862 | else if (IS_845G(dev)) | ||
| 4863 | dev_priv->display.get_fifo_size = i845_get_fifo_size; | 4865 | dev_priv->display.get_fifo_size = i845_get_fifo_size; |
| 4864 | else | 4866 | else |
| 4865 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | 4867 | dev_priv->display.get_fifo_size = i830_get_fifo_size; |
| 4866 | dev_priv->display.update_wm = i830_update_wm; | ||
| 4867 | } | 4868 | } |
| 4868 | } | 4869 | } |
| 4869 | 4870 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 20e48401910e..58ea0ecae7c3 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -1650,7 +1650,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1650 | int previous, int *dd_idx, | 1650 | int previous, int *dd_idx, |
| 1651 | struct stripe_head *sh) | 1651 | struct stripe_head *sh) |
| 1652 | { | 1652 | { |
| 1653 | sector_t stripe; | 1653 | sector_t stripe, stripe2; |
| 1654 | sector_t chunk_number; | 1654 | sector_t chunk_number; |
| 1655 | unsigned int chunk_offset; | 1655 | unsigned int chunk_offset; |
| 1656 | int pd_idx, qd_idx; | 1656 | int pd_idx, qd_idx; |
| @@ -1677,7 +1677,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1677 | */ | 1677 | */ |
| 1678 | stripe = chunk_number; | 1678 | stripe = chunk_number; |
| 1679 | *dd_idx = sector_div(stripe, data_disks); | 1679 | *dd_idx = sector_div(stripe, data_disks); |
| 1680 | 1680 | stripe2 = stripe; | |
| 1681 | /* | 1681 | /* |
| 1682 | * Select the parity disk based on the user selected algorithm. | 1682 | * Select the parity disk based on the user selected algorithm. |
| 1683 | */ | 1683 | */ |
| @@ -1689,21 +1689,21 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1689 | case 5: | 1689 | case 5: |
| 1690 | switch (algorithm) { | 1690 | switch (algorithm) { |
| 1691 | case ALGORITHM_LEFT_ASYMMETRIC: | 1691 | case ALGORITHM_LEFT_ASYMMETRIC: |
| 1692 | pd_idx = data_disks - stripe % raid_disks; | 1692 | pd_idx = data_disks - sector_div(stripe2, raid_disks); |
| 1693 | if (*dd_idx >= pd_idx) | 1693 | if (*dd_idx >= pd_idx) |
| 1694 | (*dd_idx)++; | 1694 | (*dd_idx)++; |
| 1695 | break; | 1695 | break; |
| 1696 | case ALGORITHM_RIGHT_ASYMMETRIC: | 1696 | case ALGORITHM_RIGHT_ASYMMETRIC: |
| 1697 | pd_idx = stripe % raid_disks; | 1697 | pd_idx = sector_div(stripe2, raid_disks); |
| 1698 | if (*dd_idx >= pd_idx) | 1698 | if (*dd_idx >= pd_idx) |
| 1699 | (*dd_idx)++; | 1699 | (*dd_idx)++; |
| 1700 | break; | 1700 | break; |
| 1701 | case ALGORITHM_LEFT_SYMMETRIC: | 1701 | case ALGORITHM_LEFT_SYMMETRIC: |
| 1702 | pd_idx = data_disks - stripe % raid_disks; | 1702 | pd_idx = data_disks - sector_div(stripe2, raid_disks); |
| 1703 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; | 1703 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
| 1704 | break; | 1704 | break; |
| 1705 | case ALGORITHM_RIGHT_SYMMETRIC: | 1705 | case ALGORITHM_RIGHT_SYMMETRIC: |
| 1706 | pd_idx = stripe % raid_disks; | 1706 | pd_idx = sector_div(stripe2, raid_disks); |
| 1707 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; | 1707 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
| 1708 | break; | 1708 | break; |
| 1709 | case ALGORITHM_PARITY_0: | 1709 | case ALGORITHM_PARITY_0: |
| @@ -1723,7 +1723,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1723 | 1723 | ||
| 1724 | switch (algorithm) { | 1724 | switch (algorithm) { |
| 1725 | case ALGORITHM_LEFT_ASYMMETRIC: | 1725 | case ALGORITHM_LEFT_ASYMMETRIC: |
| 1726 | pd_idx = raid_disks - 1 - (stripe % raid_disks); | 1726 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
| 1727 | qd_idx = pd_idx + 1; | 1727 | qd_idx = pd_idx + 1; |
| 1728 | if (pd_idx == raid_disks-1) { | 1728 | if (pd_idx == raid_disks-1) { |
| 1729 | (*dd_idx)++; /* Q D D D P */ | 1729 | (*dd_idx)++; /* Q D D D P */ |
| @@ -1732,7 +1732,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1732 | (*dd_idx) += 2; /* D D P Q D */ | 1732 | (*dd_idx) += 2; /* D D P Q D */ |
| 1733 | break; | 1733 | break; |
| 1734 | case ALGORITHM_RIGHT_ASYMMETRIC: | 1734 | case ALGORITHM_RIGHT_ASYMMETRIC: |
| 1735 | pd_idx = stripe % raid_disks; | 1735 | pd_idx = sector_div(stripe2, raid_disks); |
| 1736 | qd_idx = pd_idx + 1; | 1736 | qd_idx = pd_idx + 1; |
| 1737 | if (pd_idx == raid_disks-1) { | 1737 | if (pd_idx == raid_disks-1) { |
| 1738 | (*dd_idx)++; /* Q D D D P */ | 1738 | (*dd_idx)++; /* Q D D D P */ |
| @@ -1741,12 +1741,12 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1741 | (*dd_idx) += 2; /* D D P Q D */ | 1741 | (*dd_idx) += 2; /* D D P Q D */ |
| 1742 | break; | 1742 | break; |
| 1743 | case ALGORITHM_LEFT_SYMMETRIC: | 1743 | case ALGORITHM_LEFT_SYMMETRIC: |
| 1744 | pd_idx = raid_disks - 1 - (stripe % raid_disks); | 1744 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
| 1745 | qd_idx = (pd_idx + 1) % raid_disks; | 1745 | qd_idx = (pd_idx + 1) % raid_disks; |
| 1746 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; | 1746 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; |
| 1747 | break; | 1747 | break; |
| 1748 | case ALGORITHM_RIGHT_SYMMETRIC: | 1748 | case ALGORITHM_RIGHT_SYMMETRIC: |
| 1749 | pd_idx = stripe % raid_disks; | 1749 | pd_idx = sector_div(stripe2, raid_disks); |
| 1750 | qd_idx = (pd_idx + 1) % raid_disks; | 1750 | qd_idx = (pd_idx + 1) % raid_disks; |
| 1751 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; | 1751 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; |
| 1752 | break; | 1752 | break; |
| @@ -1765,7 +1765,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1765 | /* Exactly the same as RIGHT_ASYMMETRIC, but or | 1765 | /* Exactly the same as RIGHT_ASYMMETRIC, but or |
| 1766 | * of blocks for computing Q is different. | 1766 | * of blocks for computing Q is different. |
| 1767 | */ | 1767 | */ |
| 1768 | pd_idx = stripe % raid_disks; | 1768 | pd_idx = sector_div(stripe2, raid_disks); |
| 1769 | qd_idx = pd_idx + 1; | 1769 | qd_idx = pd_idx + 1; |
| 1770 | if (pd_idx == raid_disks-1) { | 1770 | if (pd_idx == raid_disks-1) { |
| 1771 | (*dd_idx)++; /* Q D D D P */ | 1771 | (*dd_idx)++; /* Q D D D P */ |
| @@ -1780,7 +1780,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1780 | * D D D P Q rather than | 1780 | * D D D P Q rather than |
| 1781 | * Q D D D P | 1781 | * Q D D D P |
| 1782 | */ | 1782 | */ |
| 1783 | pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); | 1783 | stripe2 += 1; |
| 1784 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); | ||
| 1784 | qd_idx = pd_idx + 1; | 1785 | qd_idx = pd_idx + 1; |
| 1785 | if (pd_idx == raid_disks-1) { | 1786 | if (pd_idx == raid_disks-1) { |
| 1786 | (*dd_idx)++; /* Q D D D P */ | 1787 | (*dd_idx)++; /* Q D D D P */ |
| @@ -1792,7 +1793,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1792 | 1793 | ||
| 1793 | case ALGORITHM_ROTATING_N_CONTINUE: | 1794 | case ALGORITHM_ROTATING_N_CONTINUE: |
| 1794 | /* Same as left_symmetric but Q is before P */ | 1795 | /* Same as left_symmetric but Q is before P */ |
| 1795 | pd_idx = raid_disks - 1 - (stripe % raid_disks); | 1796 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
| 1796 | qd_idx = (pd_idx + raid_disks - 1) % raid_disks; | 1797 | qd_idx = (pd_idx + raid_disks - 1) % raid_disks; |
| 1797 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; | 1798 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
| 1798 | ddf_layout = 1; | 1799 | ddf_layout = 1; |
| @@ -1800,27 +1801,27 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, | |||
| 1800 | 1801 | ||
| 1801 | case ALGORITHM_LEFT_ASYMMETRIC_6: | 1802 | case ALGORITHM_LEFT_ASYMMETRIC_6: |
| 1802 | /* RAID5 left_asymmetric, with Q on last device */ | 1803 | /* RAID5 left_asymmetric, with Q on last device */ |
| 1803 | pd_idx = data_disks - stripe % (raid_disks-1); | 1804 | pd_idx = data_disks - sector_div(stripe2, raid_disks-1); |
| 1804 | if (*dd_idx >= pd_idx) | 1805 | if (*dd_idx >= pd_idx) |
| 1805 | (*dd_idx)++; | 1806 | (*dd_idx)++; |
| 1806 | qd_idx = raid_disks - 1; | 1807 | qd_idx = raid_disks - 1; |
| 1807 | break; | 1808 | break; |
| 1808 | 1809 | ||
| 1809 | case ALGORITHM_RIGHT_ASYMMETRIC_6: | 1810 | case ALGORITHM_RIGHT_ASYMMETRIC_6: |
| 1810 | pd_idx = stripe % (raid_disks-1); | 1811 | pd_idx = sector_div(stripe2, raid_disks-1); |
| 1811 | if (*dd_idx >= pd_idx) | 1812 | if (*dd_idx >= pd_idx) |
| 1812 | (*dd_idx)++; | 1813 | (*dd_idx)++; |
| 1813 | qd_idx = raid_disks - 1; | 1814 | qd_idx = raid_disks - 1; |
| 1814 | break; | 1815 | break; |
| 1815 | 1816 | ||
| 1816 | case ALGORITHM_LEFT_SYMMETRIC_6: | 1817 | case ALGORITHM_LEFT_SYMMETRIC_6: |
| 1817 | pd_idx = data_disks - stripe % (raid_disks-1); | 1818 | pd_idx = data_disks - sector_div(stripe2, raid_disks-1); |
| 1818 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); | 1819 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); |
| 1819 | qd_idx = raid_disks - 1; | 1820 | qd_idx = raid_disks - 1; |
| 1820 | break; | 1821 | break; |
| 1821 | 1822 | ||
| 1822 | case ALGORITHM_RIGHT_SYMMETRIC_6: | 1823 | case ALGORITHM_RIGHT_SYMMETRIC_6: |
| 1823 | pd_idx = stripe % (raid_disks-1); | 1824 | pd_idx = sector_div(stripe2, raid_disks-1); |
| 1824 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); | 1825 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); |
| 1825 | qd_idx = raid_disks - 1; | 1826 | qd_idx = raid_disks - 1; |
| 1826 | break; | 1827 | break; |
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 09d6d4b76f39..caeb7d10ae04 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c | |||
| @@ -467,7 +467,7 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha, | |||
| 467 | if (conn_err_detail) | 467 | if (conn_err_detail) |
| 468 | *conn_err_detail = mbox_sts[5]; | 468 | *conn_err_detail = mbox_sts[5]; |
| 469 | if (tcp_source_port_num) | 469 | if (tcp_source_port_num) |
| 470 | *tcp_source_port_num = (uint16_t) mbox_sts[6] >> 16; | 470 | *tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16); |
| 471 | if (connection_id) | 471 | if (connection_id) |
| 472 | *connection_id = (uint16_t) mbox_sts[6] & 0x00FF; | 472 | *connection_id = (uint16_t) mbox_sts[6] & 0x00FF; |
| 473 | status = QLA_SUCCESS; | 473 | status = QLA_SUCCESS; |
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 81f3b14d5d76..68f883b30a53 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 20 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | 20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | 22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
| 23 | * DEALINGS IN THE SOFTWARE. | 23 | * DEALINGS IN THE SOFTWARE. |
diff --git a/include/linux/firewire-constants.h b/include/linux/firewire-constants.h index 9c63f06e67f2..9b4bb5fbba4b 100644 --- a/include/linux/firewire-constants.h +++ b/include/linux/firewire-constants.h | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 20 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | 20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | 22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
| 23 | * DEALINGS IN THE SOFTWARE. | 23 | * DEALINGS IN THE SOFTWARE. |
