diff options
Diffstat (limited to 'drivers')
196 files changed, 1688 insertions, 1501 deletions
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c index 0b6ae6eb5c4a..368f9ddb8480 100644 --- a/drivers/acpi/container.c +++ b/drivers/acpi/container.c | |||
@@ -79,9 +79,10 @@ static int container_device_attach(struct acpi_device *adev, | |||
79 | ACPI_COMPANION_SET(dev, adev); | 79 | ACPI_COMPANION_SET(dev, adev); |
80 | dev->release = acpi_container_release; | 80 | dev->release = acpi_container_release; |
81 | ret = device_register(dev); | 81 | ret = device_register(dev); |
82 | if (ret) | 82 | if (ret) { |
83 | put_device(dev); | ||
83 | return ret; | 84 | return ret; |
84 | 85 | } | |
85 | adev->driver_data = dev; | 86 | adev->driver_data = dev; |
86 | return 1; | 87 | return 1; |
87 | } | 88 | } |
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index c431c88faaff..e9b3081c4fe9 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c | |||
@@ -609,7 +609,7 @@ static int handle_eject_request(struct dock_station *ds, u32 event) | |||
609 | static void dock_notify(struct dock_station *ds, u32 event) | 609 | static void dock_notify(struct dock_station *ds, u32 event) |
610 | { | 610 | { |
611 | acpi_handle handle = ds->handle; | 611 | acpi_handle handle = ds->handle; |
612 | struct acpi_device *ad; | 612 | struct acpi_device *adev = NULL; |
613 | int surprise_removal = 0; | 613 | int surprise_removal = 0; |
614 | 614 | ||
615 | /* | 615 | /* |
@@ -632,7 +632,8 @@ static void dock_notify(struct dock_station *ds, u32 event) | |||
632 | switch (event) { | 632 | switch (event) { |
633 | case ACPI_NOTIFY_BUS_CHECK: | 633 | case ACPI_NOTIFY_BUS_CHECK: |
634 | case ACPI_NOTIFY_DEVICE_CHECK: | 634 | case ACPI_NOTIFY_DEVICE_CHECK: |
635 | if (!dock_in_progress(ds) && acpi_bus_get_device(handle, &ad)) { | 635 | acpi_bus_get_device(handle, &adev); |
636 | if (!dock_in_progress(ds) && !acpi_device_enumerated(adev)) { | ||
636 | begin_dock(ds); | 637 | begin_dock(ds); |
637 | dock(ds); | 638 | dock(ds); |
638 | if (!dock_present(ds)) { | 639 | if (!dock_present(ds)) { |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 20a7517bd339..52b8181ddafd 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -4126,12 +4126,14 @@ static int mv_platform_probe(struct platform_device *pdev) | |||
4126 | clk_prepare_enable(hpriv->port_clks[port]); | 4126 | clk_prepare_enable(hpriv->port_clks[port]); |
4127 | 4127 | ||
4128 | sprintf(port_number, "port%d", port); | 4128 | sprintf(port_number, "port%d", port); |
4129 | hpriv->port_phys[port] = devm_phy_get(&pdev->dev, port_number); | 4129 | hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev, |
4130 | port_number); | ||
4130 | if (IS_ERR(hpriv->port_phys[port])) { | 4131 | if (IS_ERR(hpriv->port_phys[port])) { |
4131 | rc = PTR_ERR(hpriv->port_phys[port]); | 4132 | rc = PTR_ERR(hpriv->port_phys[port]); |
4132 | hpriv->port_phys[port] = NULL; | 4133 | hpriv->port_phys[port] = NULL; |
4133 | if ((rc != -EPROBE_DEFER) && (rc != -ENODEV)) | 4134 | if (rc != -EPROBE_DEFER) |
4134 | dev_warn(&pdev->dev, "error getting phy"); | 4135 | dev_warn(&pdev->dev, "error getting phy %d", |
4136 | rc); | ||
4135 | goto err; | 4137 | goto err; |
4136 | } else | 4138 | } else |
4137 | phy_power_on(hpriv->port_phys[port]); | 4139 | phy_power_on(hpriv->port_phys[port]); |
diff --git a/drivers/base/component.c b/drivers/base/component.c index c53efe6c6d8e..c4778995cd72 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c | |||
@@ -133,9 +133,16 @@ static int try_to_bring_up_master(struct master *master, | |||
133 | goto out; | 133 | goto out; |
134 | } | 134 | } |
135 | 135 | ||
136 | if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) { | ||
137 | ret = -ENOMEM; | ||
138 | goto out; | ||
139 | } | ||
140 | |||
136 | /* Found all components */ | 141 | /* Found all components */ |
137 | ret = master->ops->bind(master->dev); | 142 | ret = master->ops->bind(master->dev); |
138 | if (ret < 0) { | 143 | if (ret < 0) { |
144 | devres_release_group(master->dev, NULL); | ||
145 | dev_info(master->dev, "master bind failed: %d\n", ret); | ||
139 | master_remove_components(master); | 146 | master_remove_components(master); |
140 | goto out; | 147 | goto out; |
141 | } | 148 | } |
@@ -166,6 +173,7 @@ static void take_down_master(struct master *master) | |||
166 | { | 173 | { |
167 | if (master->bound) { | 174 | if (master->bound) { |
168 | master->ops->unbind(master->dev); | 175 | master->ops->unbind(master->dev); |
176 | devres_release_group(master->dev, NULL); | ||
169 | master->bound = false; | 177 | master->bound = false; |
170 | } | 178 | } |
171 | 179 | ||
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 3107282a9741..091b9ea14feb 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
@@ -60,7 +60,9 @@ enum { | |||
60 | NULL_IRQ_NONE = 0, | 60 | NULL_IRQ_NONE = 0, |
61 | NULL_IRQ_SOFTIRQ = 1, | 61 | NULL_IRQ_SOFTIRQ = 1, |
62 | NULL_IRQ_TIMER = 2, | 62 | NULL_IRQ_TIMER = 2, |
63 | }; | ||
63 | 64 | ||
65 | enum { | ||
64 | NULL_Q_BIO = 0, | 66 | NULL_Q_BIO = 0, |
65 | NULL_Q_RQ = 1, | 67 | NULL_Q_RQ = 1, |
66 | NULL_Q_MQ = 2, | 68 | NULL_Q_MQ = 2, |
@@ -172,18 +174,20 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) | |||
172 | 174 | ||
173 | static void end_cmd(struct nullb_cmd *cmd) | 175 | static void end_cmd(struct nullb_cmd *cmd) |
174 | { | 176 | { |
175 | if (cmd->rq) { | 177 | switch (queue_mode) { |
176 | if (queue_mode == NULL_Q_MQ) | 178 | case NULL_Q_MQ: |
177 | blk_mq_end_io(cmd->rq, 0); | 179 | blk_mq_end_io(cmd->rq, 0); |
178 | else { | 180 | return; |
179 | INIT_LIST_HEAD(&cmd->rq->queuelist); | 181 | case NULL_Q_RQ: |
180 | blk_end_request_all(cmd->rq, 0); | 182 | INIT_LIST_HEAD(&cmd->rq->queuelist); |
181 | } | 183 | blk_end_request_all(cmd->rq, 0); |
182 | } else if (cmd->bio) | 184 | break; |
185 | case NULL_Q_BIO: | ||
183 | bio_endio(cmd->bio, 0); | 186 | bio_endio(cmd->bio, 0); |
187 | break; | ||
188 | } | ||
184 | 189 | ||
185 | if (queue_mode != NULL_Q_MQ) | 190 | free_cmd(cmd); |
186 | free_cmd(cmd); | ||
187 | } | 191 | } |
188 | 192 | ||
189 | static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) | 193 | static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) |
@@ -195,6 +199,7 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) | |||
195 | cq = &per_cpu(completion_queues, smp_processor_id()); | 199 | cq = &per_cpu(completion_queues, smp_processor_id()); |
196 | 200 | ||
197 | while ((entry = llist_del_all(&cq->list)) != NULL) { | 201 | while ((entry = llist_del_all(&cq->list)) != NULL) { |
202 | entry = llist_reverse_order(entry); | ||
198 | do { | 203 | do { |
199 | cmd = container_of(entry, struct nullb_cmd, ll_list); | 204 | cmd = container_of(entry, struct nullb_cmd, ll_list); |
200 | end_cmd(cmd); | 205 | end_cmd(cmd); |
@@ -221,61 +226,31 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd) | |||
221 | 226 | ||
222 | static void null_softirq_done_fn(struct request *rq) | 227 | static void null_softirq_done_fn(struct request *rq) |
223 | { | 228 | { |
224 | blk_end_request_all(rq, 0); | 229 | end_cmd(rq->special); |
225 | } | ||
226 | |||
227 | #ifdef CONFIG_SMP | ||
228 | |||
229 | static void null_ipi_cmd_end_io(void *data) | ||
230 | { | ||
231 | struct completion_queue *cq; | ||
232 | struct llist_node *entry, *next; | ||
233 | struct nullb_cmd *cmd; | ||
234 | |||
235 | cq = &per_cpu(completion_queues, smp_processor_id()); | ||
236 | |||
237 | entry = llist_del_all(&cq->list); | ||
238 | |||
239 | while (entry) { | ||
240 | next = entry->next; | ||
241 | cmd = llist_entry(entry, struct nullb_cmd, ll_list); | ||
242 | end_cmd(cmd); | ||
243 | entry = next; | ||
244 | } | ||
245 | } | ||
246 | |||
247 | static void null_cmd_end_ipi(struct nullb_cmd *cmd) | ||
248 | { | ||
249 | struct call_single_data *data = &cmd->csd; | ||
250 | int cpu = get_cpu(); | ||
251 | struct completion_queue *cq = &per_cpu(completion_queues, cpu); | ||
252 | |||
253 | cmd->ll_list.next = NULL; | ||
254 | |||
255 | if (llist_add(&cmd->ll_list, &cq->list)) { | ||
256 | data->func = null_ipi_cmd_end_io; | ||
257 | data->flags = 0; | ||
258 | __smp_call_function_single(cpu, data, 0); | ||
259 | } | ||
260 | |||
261 | put_cpu(); | ||
262 | } | 230 | } |
263 | 231 | ||
264 | #endif /* CONFIG_SMP */ | ||
265 | |||
266 | static inline void null_handle_cmd(struct nullb_cmd *cmd) | 232 | static inline void null_handle_cmd(struct nullb_cmd *cmd) |
267 | { | 233 | { |
268 | /* Complete IO by inline, softirq or timer */ | 234 | /* Complete IO by inline, softirq or timer */ |
269 | switch (irqmode) { | 235 | switch (irqmode) { |
270 | case NULL_IRQ_NONE: | ||
271 | end_cmd(cmd); | ||
272 | break; | ||
273 | case NULL_IRQ_SOFTIRQ: | 236 | case NULL_IRQ_SOFTIRQ: |
274 | #ifdef CONFIG_SMP | 237 | switch (queue_mode) { |
275 | null_cmd_end_ipi(cmd); | 238 | case NULL_Q_MQ: |
276 | #else | 239 | blk_mq_complete_request(cmd->rq); |
240 | break; | ||
241 | case NULL_Q_RQ: | ||
242 | blk_complete_request(cmd->rq); | ||
243 | break; | ||
244 | case NULL_Q_BIO: | ||
245 | /* | ||
246 | * XXX: no proper submitting cpu information available. | ||
247 | */ | ||
248 | end_cmd(cmd); | ||
249 | break; | ||
250 | } | ||
251 | break; | ||
252 | case NULL_IRQ_NONE: | ||
277 | end_cmd(cmd); | 253 | end_cmd(cmd); |
278 | #endif | ||
279 | break; | 254 | break; |
280 | case NULL_IRQ_TIMER: | 255 | case NULL_IRQ_TIMER: |
281 | null_cmd_end_timer(cmd); | 256 | null_cmd_end_timer(cmd); |
@@ -411,6 +386,7 @@ static struct blk_mq_ops null_mq_ops = { | |||
411 | .queue_rq = null_queue_rq, | 386 | .queue_rq = null_queue_rq, |
412 | .map_queue = blk_mq_map_queue, | 387 | .map_queue = blk_mq_map_queue, |
413 | .init_hctx = null_init_hctx, | 388 | .init_hctx = null_init_hctx, |
389 | .complete = null_softirq_done_fn, | ||
414 | }; | 390 | }; |
415 | 391 | ||
416 | static struct blk_mq_reg null_mq_reg = { | 392 | static struct blk_mq_reg null_mq_reg = { |
@@ -609,13 +585,6 @@ static int __init null_init(void) | |||
609 | { | 585 | { |
610 | unsigned int i; | 586 | unsigned int i; |
611 | 587 | ||
612 | #if !defined(CONFIG_SMP) | ||
613 | if (irqmode == NULL_IRQ_SOFTIRQ) { | ||
614 | pr_warn("null_blk: softirq completions not available.\n"); | ||
615 | pr_warn("null_blk: using direct completions.\n"); | ||
616 | irqmode = NULL_IRQ_NONE; | ||
617 | } | ||
618 | #endif | ||
619 | if (bs > PAGE_SIZE) { | 588 | if (bs > PAGE_SIZE) { |
620 | pr_warn("null_blk: invalid block size\n"); | 589 | pr_warn("null_blk: invalid block size\n"); |
621 | pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); | 590 | pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 6a680d4de7f1..b1cb3f4c4db4 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -110,9 +110,9 @@ static int __virtblk_add_req(struct virtqueue *vq, | |||
110 | return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); | 110 | return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); |
111 | } | 111 | } |
112 | 112 | ||
113 | static inline void virtblk_request_done(struct virtblk_req *vbr) | 113 | static inline void virtblk_request_done(struct request *req) |
114 | { | 114 | { |
115 | struct request *req = vbr->req; | 115 | struct virtblk_req *vbr = req->special; |
116 | int error = virtblk_result(vbr); | 116 | int error = virtblk_result(vbr); |
117 | 117 | ||
118 | if (req->cmd_type == REQ_TYPE_BLOCK_PC) { | 118 | if (req->cmd_type == REQ_TYPE_BLOCK_PC) { |
@@ -138,7 +138,7 @@ static void virtblk_done(struct virtqueue *vq) | |||
138 | do { | 138 | do { |
139 | virtqueue_disable_cb(vq); | 139 | virtqueue_disable_cb(vq); |
140 | while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { | 140 | while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { |
141 | virtblk_request_done(vbr); | 141 | blk_mq_complete_request(vbr->req); |
142 | req_done = true; | 142 | req_done = true; |
143 | } | 143 | } |
144 | if (unlikely(virtqueue_is_broken(vq))) | 144 | if (unlikely(virtqueue_is_broken(vq))) |
@@ -479,6 +479,7 @@ static struct blk_mq_ops virtio_mq_ops = { | |||
479 | .map_queue = blk_mq_map_queue, | 479 | .map_queue = blk_mq_map_queue, |
480 | .alloc_hctx = blk_mq_alloc_single_hw_queue, | 480 | .alloc_hctx = blk_mq_alloc_single_hw_queue, |
481 | .free_hctx = blk_mq_free_single_hw_queue, | 481 | .free_hctx = blk_mq_free_single_hw_queue, |
482 | .complete = virtblk_request_done, | ||
482 | }; | 483 | }; |
483 | 484 | ||
484 | static struct blk_mq_reg virtio_mq_reg = { | 485 | static struct blk_mq_reg virtio_mq_reg = { |
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 4b97b86da926..64c60edcdfbc 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -299,7 +299,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, | |||
299 | BUG_ON(num != 0); | 299 | BUG_ON(num != 0); |
300 | } | 300 | } |
301 | 301 | ||
302 | static void unmap_purged_grants(struct work_struct *work) | 302 | void xen_blkbk_unmap_purged_grants(struct work_struct *work) |
303 | { | 303 | { |
304 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 304 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
305 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 305 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
@@ -375,7 +375,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif) | |||
375 | 375 | ||
376 | pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); | 376 | pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); |
377 | 377 | ||
378 | INIT_LIST_HEAD(&blkif->persistent_purge_list); | 378 | BUG_ON(!list_empty(&blkif->persistent_purge_list)); |
379 | root = &blkif->persistent_gnts; | 379 | root = &blkif->persistent_gnts; |
380 | purge_list: | 380 | purge_list: |
381 | foreach_grant_safe(persistent_gnt, n, root, node) { | 381 | foreach_grant_safe(persistent_gnt, n, root, node) { |
@@ -420,7 +420,6 @@ finished: | |||
420 | blkif->vbd.overflow_max_grants = 0; | 420 | blkif->vbd.overflow_max_grants = 0; |
421 | 421 | ||
422 | /* We can defer this work */ | 422 | /* We can defer this work */ |
423 | INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants); | ||
424 | schedule_work(&blkif->persistent_purge_work); | 423 | schedule_work(&blkif->persistent_purge_work); |
425 | pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); | 424 | pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); |
426 | return; | 425 | return; |
@@ -625,9 +624,23 @@ purge_gnt_list: | |||
625 | print_stats(blkif); | 624 | print_stats(blkif); |
626 | } | 625 | } |
627 | 626 | ||
628 | /* Since we are shutting down remove all pages from the buffer */ | 627 | /* Drain pending purge work */ |
629 | shrink_free_pagepool(blkif, 0 /* All */); | 628 | flush_work(&blkif->persistent_purge_work); |
630 | 629 | ||
630 | if (log_stats) | ||
631 | print_stats(blkif); | ||
632 | |||
633 | blkif->xenblkd = NULL; | ||
634 | xen_blkif_put(blkif); | ||
635 | |||
636 | return 0; | ||
637 | } | ||
638 | |||
639 | /* | ||
640 | * Remove persistent grants and empty the pool of free pages | ||
641 | */ | ||
642 | void xen_blkbk_free_caches(struct xen_blkif *blkif) | ||
643 | { | ||
631 | /* Free all persistent grant pages */ | 644 | /* Free all persistent grant pages */ |
632 | if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) | 645 | if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) |
633 | free_persistent_gnts(blkif, &blkif->persistent_gnts, | 646 | free_persistent_gnts(blkif, &blkif->persistent_gnts, |
@@ -636,13 +649,8 @@ purge_gnt_list: | |||
636 | BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); | 649 | BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); |
637 | blkif->persistent_gnt_c = 0; | 650 | blkif->persistent_gnt_c = 0; |
638 | 651 | ||
639 | if (log_stats) | 652 | /* Since we are shutting down remove all pages from the buffer */ |
640 | print_stats(blkif); | 653 | shrink_free_pagepool(blkif, 0 /* All */); |
641 | |||
642 | blkif->xenblkd = NULL; | ||
643 | xen_blkif_put(blkif); | ||
644 | |||
645 | return 0; | ||
646 | } | 654 | } |
647 | 655 | ||
648 | /* | 656 | /* |
@@ -838,7 +846,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, | |||
838 | struct grant_page **pages = pending_req->indirect_pages; | 846 | struct grant_page **pages = pending_req->indirect_pages; |
839 | struct xen_blkif *blkif = pending_req->blkif; | 847 | struct xen_blkif *blkif = pending_req->blkif; |
840 | int indirect_grefs, rc, n, nseg, i; | 848 | int indirect_grefs, rc, n, nseg, i; |
841 | struct blkif_request_segment_aligned *segments = NULL; | 849 | struct blkif_request_segment *segments = NULL; |
842 | 850 | ||
843 | nseg = pending_req->nr_pages; | 851 | nseg = pending_req->nr_pages; |
844 | indirect_grefs = INDIRECT_PAGES(nseg); | 852 | indirect_grefs = INDIRECT_PAGES(nseg); |
@@ -934,9 +942,7 @@ static void xen_blk_drain_io(struct xen_blkif *blkif) | |||
934 | { | 942 | { |
935 | atomic_set(&blkif->drain, 1); | 943 | atomic_set(&blkif->drain, 1); |
936 | do { | 944 | do { |
937 | /* The initial value is one, and one refcnt taken at the | 945 | if (atomic_read(&blkif->inflight) == 0) |
938 | * start of the xen_blkif_schedule thread. */ | ||
939 | if (atomic_read(&blkif->refcnt) <= 2) | ||
940 | break; | 946 | break; |
941 | wait_for_completion_interruptible_timeout( | 947 | wait_for_completion_interruptible_timeout( |
942 | &blkif->drain_complete, HZ); | 948 | &blkif->drain_complete, HZ); |
@@ -976,17 +982,30 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) | |||
976 | * the proper response on the ring. | 982 | * the proper response on the ring. |
977 | */ | 983 | */ |
978 | if (atomic_dec_and_test(&pending_req->pendcnt)) { | 984 | if (atomic_dec_and_test(&pending_req->pendcnt)) { |
979 | xen_blkbk_unmap(pending_req->blkif, | 985 | struct xen_blkif *blkif = pending_req->blkif; |
986 | |||
987 | xen_blkbk_unmap(blkif, | ||
980 | pending_req->segments, | 988 | pending_req->segments, |
981 | pending_req->nr_pages); | 989 | pending_req->nr_pages); |
982 | make_response(pending_req->blkif, pending_req->id, | 990 | make_response(blkif, pending_req->id, |
983 | pending_req->operation, pending_req->status); | 991 | pending_req->operation, pending_req->status); |
984 | xen_blkif_put(pending_req->blkif); | 992 | free_req(blkif, pending_req); |
985 | if (atomic_read(&pending_req->blkif->refcnt) <= 2) { | 993 | /* |
986 | if (atomic_read(&pending_req->blkif->drain)) | 994 | * Make sure the request is freed before releasing blkif, |
987 | complete(&pending_req->blkif->drain_complete); | 995 | * or there could be a race between free_req and the |
996 | * cleanup done in xen_blkif_free during shutdown. | ||
997 | * | ||
998 | * NB: The fact that we might try to wake up pending_free_wq | ||
999 | * before drain_complete (in case there's a drain going on) | ||
1000 | * it's not a problem with our current implementation | ||
1001 | * because we can assure there's no thread waiting on | ||
1002 | * pending_free_wq if there's a drain going on, but it has | ||
1003 | * to be taken into account if the current model is changed. | ||
1004 | */ | ||
1005 | if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) { | ||
1006 | complete(&blkif->drain_complete); | ||
988 | } | 1007 | } |
989 | free_req(pending_req->blkif, pending_req); | 1008 | xen_blkif_put(blkif); |
990 | } | 1009 | } |
991 | } | 1010 | } |
992 | 1011 | ||
@@ -1240,6 +1259,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
1240 | * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. | 1259 | * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. |
1241 | */ | 1260 | */ |
1242 | xen_blkif_get(blkif); | 1261 | xen_blkif_get(blkif); |
1262 | atomic_inc(&blkif->inflight); | ||
1243 | 1263 | ||
1244 | for (i = 0; i < nseg; i++) { | 1264 | for (i = 0; i < nseg; i++) { |
1245 | while ((bio == NULL) || | 1265 | while ((bio == NULL) || |
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 8d8807563d99..be052773ad03 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
@@ -57,7 +57,7 @@ | |||
57 | #define MAX_INDIRECT_SEGMENTS 256 | 57 | #define MAX_INDIRECT_SEGMENTS 256 |
58 | 58 | ||
59 | #define SEGS_PER_INDIRECT_FRAME \ | 59 | #define SEGS_PER_INDIRECT_FRAME \ |
60 | (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) | 60 | (PAGE_SIZE/sizeof(struct blkif_request_segment)) |
61 | #define MAX_INDIRECT_PAGES \ | 61 | #define MAX_INDIRECT_PAGES \ |
62 | ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) | 62 | ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) |
63 | #define INDIRECT_PAGES(_segs) \ | 63 | #define INDIRECT_PAGES(_segs) \ |
@@ -278,6 +278,7 @@ struct xen_blkif { | |||
278 | /* for barrier (drain) requests */ | 278 | /* for barrier (drain) requests */ |
279 | struct completion drain_complete; | 279 | struct completion drain_complete; |
280 | atomic_t drain; | 280 | atomic_t drain; |
281 | atomic_t inflight; | ||
281 | /* One thread per one blkif. */ | 282 | /* One thread per one blkif. */ |
282 | struct task_struct *xenblkd; | 283 | struct task_struct *xenblkd; |
283 | unsigned int waiting_reqs; | 284 | unsigned int waiting_reqs; |
@@ -376,6 +377,7 @@ int xen_blkif_xenbus_init(void); | |||
376 | irqreturn_t xen_blkif_be_int(int irq, void *dev_id); | 377 | irqreturn_t xen_blkif_be_int(int irq, void *dev_id); |
377 | int xen_blkif_schedule(void *arg); | 378 | int xen_blkif_schedule(void *arg); |
378 | int xen_blkif_purge_persistent(void *arg); | 379 | int xen_blkif_purge_persistent(void *arg); |
380 | void xen_blkbk_free_caches(struct xen_blkif *blkif); | ||
379 | 381 | ||
380 | int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, | 382 | int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, |
381 | struct backend_info *be, int state); | 383 | struct backend_info *be, int state); |
@@ -383,6 +385,7 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, | |||
383 | int xen_blkbk_barrier(struct xenbus_transaction xbt, | 385 | int xen_blkbk_barrier(struct xenbus_transaction xbt, |
384 | struct backend_info *be, int state); | 386 | struct backend_info *be, int state); |
385 | struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be); | 387 | struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be); |
388 | void xen_blkbk_unmap_purged_grants(struct work_struct *work); | ||
386 | 389 | ||
387 | static inline void blkif_get_x86_32_req(struct blkif_request *dst, | 390 | static inline void blkif_get_x86_32_req(struct blkif_request *dst, |
388 | struct blkif_x86_32_request *src) | 391 | struct blkif_x86_32_request *src) |
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index c2014a0aa206..9a547e6b6ebf 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c | |||
@@ -125,8 +125,11 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) | |||
125 | blkif->persistent_gnts.rb_node = NULL; | 125 | blkif->persistent_gnts.rb_node = NULL; |
126 | spin_lock_init(&blkif->free_pages_lock); | 126 | spin_lock_init(&blkif->free_pages_lock); |
127 | INIT_LIST_HEAD(&blkif->free_pages); | 127 | INIT_LIST_HEAD(&blkif->free_pages); |
128 | INIT_LIST_HEAD(&blkif->persistent_purge_list); | ||
128 | blkif->free_pages_num = 0; | 129 | blkif->free_pages_num = 0; |
129 | atomic_set(&blkif->persistent_gnt_in_use, 0); | 130 | atomic_set(&blkif->persistent_gnt_in_use, 0); |
131 | atomic_set(&blkif->inflight, 0); | ||
132 | INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants); | ||
130 | 133 | ||
131 | INIT_LIST_HEAD(&blkif->pending_free); | 134 | INIT_LIST_HEAD(&blkif->pending_free); |
132 | 135 | ||
@@ -259,6 +262,17 @@ static void xen_blkif_free(struct xen_blkif *blkif) | |||
259 | if (!atomic_dec_and_test(&blkif->refcnt)) | 262 | if (!atomic_dec_and_test(&blkif->refcnt)) |
260 | BUG(); | 263 | BUG(); |
261 | 264 | ||
265 | /* Remove all persistent grants and the cache of ballooned pages. */ | ||
266 | xen_blkbk_free_caches(blkif); | ||
267 | |||
268 | /* Make sure everything is drained before shutting down */ | ||
269 | BUG_ON(blkif->persistent_gnt_c != 0); | ||
270 | BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0); | ||
271 | BUG_ON(blkif->free_pages_num != 0); | ||
272 | BUG_ON(!list_empty(&blkif->persistent_purge_list)); | ||
273 | BUG_ON(!list_empty(&blkif->free_pages)); | ||
274 | BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); | ||
275 | |||
262 | /* Check that there is no request in use */ | 276 | /* Check that there is no request in use */ |
263 | list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { | 277 | list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { |
264 | list_del(&req->free_list); | 278 | list_del(&req->free_list); |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 8dcfb54f1603..efe1b4761735 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(minor_lock); | |||
162 | #define DEV_NAME "xvd" /* name in /dev */ | 162 | #define DEV_NAME "xvd" /* name in /dev */ |
163 | 163 | ||
164 | #define SEGS_PER_INDIRECT_FRAME \ | 164 | #define SEGS_PER_INDIRECT_FRAME \ |
165 | (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned)) | 165 | (PAGE_SIZE/sizeof(struct blkif_request_segment)) |
166 | #define INDIRECT_GREFS(_segs) \ | 166 | #define INDIRECT_GREFS(_segs) \ |
167 | ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) | 167 | ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) |
168 | 168 | ||
@@ -393,7 +393,7 @@ static int blkif_queue_request(struct request *req) | |||
393 | unsigned long id; | 393 | unsigned long id; |
394 | unsigned int fsect, lsect; | 394 | unsigned int fsect, lsect; |
395 | int i, ref, n; | 395 | int i, ref, n; |
396 | struct blkif_request_segment_aligned *segments = NULL; | 396 | struct blkif_request_segment *segments = NULL; |
397 | 397 | ||
398 | /* | 398 | /* |
399 | * Used to store if we are able to queue the request by just using | 399 | * Used to store if we are able to queue the request by just using |
@@ -550,7 +550,7 @@ static int blkif_queue_request(struct request *req) | |||
550 | } else { | 550 | } else { |
551 | n = i % SEGS_PER_INDIRECT_FRAME; | 551 | n = i % SEGS_PER_INDIRECT_FRAME; |
552 | segments[n] = | 552 | segments[n] = |
553 | (struct blkif_request_segment_aligned) { | 553 | (struct blkif_request_segment) { |
554 | .gref = ref, | 554 | .gref = ref, |
555 | .first_sect = fsect, | 555 | .first_sect = fsect, |
556 | .last_sect = lsect }; | 556 | .last_sect = lsect }; |
@@ -1904,13 +1904,16 @@ static void blkback_changed(struct xenbus_device *dev, | |||
1904 | case XenbusStateReconfiguring: | 1904 | case XenbusStateReconfiguring: |
1905 | case XenbusStateReconfigured: | 1905 | case XenbusStateReconfigured: |
1906 | case XenbusStateUnknown: | 1906 | case XenbusStateUnknown: |
1907 | case XenbusStateClosed: | ||
1908 | break; | 1907 | break; |
1909 | 1908 | ||
1910 | case XenbusStateConnected: | 1909 | case XenbusStateConnected: |
1911 | blkfront_connect(info); | 1910 | blkfront_connect(info); |
1912 | break; | 1911 | break; |
1913 | 1912 | ||
1913 | case XenbusStateClosed: | ||
1914 | if (dev->state == XenbusStateClosed) | ||
1915 | break; | ||
1916 | /* Missed the backend's Closing state -- fallthrough */ | ||
1914 | case XenbusStateClosing: | 1917 | case XenbusStateClosing: |
1915 | blkfront_closing(info); | 1918 | blkfront_closing(info); |
1916 | break; | 1919 | break; |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index fa3243d71c76..1386749b48ff 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -499,6 +499,7 @@ config RAW_DRIVER | |||
499 | config MAX_RAW_DEVS | 499 | config MAX_RAW_DEVS |
500 | int "Maximum number of RAW devices to support (1-65536)" | 500 | int "Maximum number of RAW devices to support (1-65536)" |
501 | depends on RAW_DRIVER | 501 | depends on RAW_DRIVER |
502 | range 1 65536 | ||
502 | default "256" | 503 | default "256" |
503 | help | 504 | help |
504 | The maximum number of RAW devices that are supported. | 505 | The maximum number of RAW devices that are supported. |
diff --git a/drivers/char/raw.c b/drivers/char/raw.c index f3223aac4df1..6e8d65e9b1d3 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c | |||
@@ -190,7 +190,7 @@ static int bind_get(int number, dev_t *dev) | |||
190 | struct raw_device_data *rawdev; | 190 | struct raw_device_data *rawdev; |
191 | struct block_device *bdev; | 191 | struct block_device *bdev; |
192 | 192 | ||
193 | if (number <= 0 || number >= MAX_RAW_MINORS) | 193 | if (number <= 0 || number >= max_raw_minors) |
194 | return -EINVAL; | 194 | return -EINVAL; |
195 | 195 | ||
196 | rawdev = &raw_devices[number]; | 196 | rawdev = &raw_devices[number]; |
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c index 974b2db2fe10..0595dc6c453e 100644 --- a/drivers/clocksource/bcm_kona_timer.c +++ b/drivers/clocksource/bcm_kona_timer.c | |||
@@ -99,31 +99,6 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw) | |||
99 | return; | 99 | return; |
100 | } | 100 | } |
101 | 101 | ||
102 | static void __init kona_timers_init(struct device_node *node) | ||
103 | { | ||
104 | u32 freq; | ||
105 | struct clk *external_clk; | ||
106 | |||
107 | external_clk = of_clk_get_by_name(node, NULL); | ||
108 | |||
109 | if (!IS_ERR(external_clk)) { | ||
110 | arch_timer_rate = clk_get_rate(external_clk); | ||
111 | clk_prepare_enable(external_clk); | ||
112 | } else if (!of_property_read_u32(node, "clock-frequency", &freq)) { | ||
113 | arch_timer_rate = freq; | ||
114 | } else { | ||
115 | panic("unable to determine clock-frequency"); | ||
116 | } | ||
117 | |||
118 | /* Setup IRQ numbers */ | ||
119 | timers.tmr_irq = irq_of_parse_and_map(node, 0); | ||
120 | |||
121 | /* Setup IO addresses */ | ||
122 | timers.tmr_regs = of_iomap(node, 0); | ||
123 | |||
124 | kona_timer_disable_and_clear(timers.tmr_regs); | ||
125 | } | ||
126 | |||
127 | static int kona_timer_set_next_event(unsigned long clc, | 102 | static int kona_timer_set_next_event(unsigned long clc, |
128 | struct clock_event_device *unused) | 103 | struct clock_event_device *unused) |
129 | { | 104 | { |
@@ -198,7 +173,34 @@ static struct irqaction kona_timer_irq = { | |||
198 | 173 | ||
199 | static void __init kona_timer_init(struct device_node *node) | 174 | static void __init kona_timer_init(struct device_node *node) |
200 | { | 175 | { |
201 | kona_timers_init(node); | 176 | u32 freq; |
177 | struct clk *external_clk; | ||
178 | |||
179 | if (!of_device_is_available(node)) { | ||
180 | pr_info("Kona Timer v1 marked as disabled in device tree\n"); | ||
181 | return; | ||
182 | } | ||
183 | |||
184 | external_clk = of_clk_get_by_name(node, NULL); | ||
185 | |||
186 | if (!IS_ERR(external_clk)) { | ||
187 | arch_timer_rate = clk_get_rate(external_clk); | ||
188 | clk_prepare_enable(external_clk); | ||
189 | } else if (!of_property_read_u32(node, "clock-frequency", &freq)) { | ||
190 | arch_timer_rate = freq; | ||
191 | } else { | ||
192 | pr_err("Kona Timer v1 unable to determine clock-frequency"); | ||
193 | return; | ||
194 | } | ||
195 | |||
196 | /* Setup IRQ numbers */ | ||
197 | timers.tmr_irq = irq_of_parse_and_map(node, 0); | ||
198 | |||
199 | /* Setup IO addresses */ | ||
200 | timers.tmr_regs = of_iomap(node, 0); | ||
201 | |||
202 | kona_timer_disable_and_clear(timers.tmr_regs); | ||
203 | |||
202 | kona_timer_clockevents_init(); | 204 | kona_timer_clockevents_init(); |
203 | setup_irq(timers.tmr_irq, &kona_timer_irq); | 205 | setup_irq(timers.tmr_irq, &kona_timer_irq); |
204 | kona_timer_set_next_event((arch_timer_rate / HZ), NULL); | 206 | kona_timer_set_next_event((arch_timer_rate / HZ), NULL); |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 79606f473f48..c788abf1c457 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -51,8 +51,6 @@ static inline int32_t div_fp(int32_t x, int32_t y) | |||
51 | return div_s64((int64_t)x << FRAC_BITS, (int64_t)y); | 51 | return div_s64((int64_t)x << FRAC_BITS, (int64_t)y); |
52 | } | 52 | } |
53 | 53 | ||
54 | static u64 energy_divisor; | ||
55 | |||
56 | struct sample { | 54 | struct sample { |
57 | int32_t core_pct_busy; | 55 | int32_t core_pct_busy; |
58 | u64 aperf; | 56 | u64 aperf; |
@@ -630,12 +628,10 @@ static void intel_pstate_timer_func(unsigned long __data) | |||
630 | { | 628 | { |
631 | struct cpudata *cpu = (struct cpudata *) __data; | 629 | struct cpudata *cpu = (struct cpudata *) __data; |
632 | struct sample *sample; | 630 | struct sample *sample; |
633 | u64 energy; | ||
634 | 631 | ||
635 | intel_pstate_sample(cpu); | 632 | intel_pstate_sample(cpu); |
636 | 633 | ||
637 | sample = &cpu->samples[cpu->sample_ptr]; | 634 | sample = &cpu->samples[cpu->sample_ptr]; |
638 | rdmsrl(MSR_PKG_ENERGY_STATUS, energy); | ||
639 | 635 | ||
640 | intel_pstate_adjust_busy_pstate(cpu); | 636 | intel_pstate_adjust_busy_pstate(cpu); |
641 | 637 | ||
@@ -644,7 +640,6 @@ static void intel_pstate_timer_func(unsigned long __data) | |||
644 | cpu->pstate.current_pstate, | 640 | cpu->pstate.current_pstate, |
645 | sample->mperf, | 641 | sample->mperf, |
646 | sample->aperf, | 642 | sample->aperf, |
647 | div64_u64(energy, energy_divisor), | ||
648 | sample->freq); | 643 | sample->freq); |
649 | 644 | ||
650 | intel_pstate_set_sample_time(cpu); | 645 | intel_pstate_set_sample_time(cpu); |
@@ -926,7 +921,6 @@ static int __init intel_pstate_init(void) | |||
926 | int cpu, rc = 0; | 921 | int cpu, rc = 0; |
927 | const struct x86_cpu_id *id; | 922 | const struct x86_cpu_id *id; |
928 | struct cpu_defaults *cpu_info; | 923 | struct cpu_defaults *cpu_info; |
929 | u64 units; | ||
930 | 924 | ||
931 | if (no_load) | 925 | if (no_load) |
932 | return -ENODEV; | 926 | return -ENODEV; |
@@ -960,9 +954,6 @@ static int __init intel_pstate_init(void) | |||
960 | if (rc) | 954 | if (rc) |
961 | goto out; | 955 | goto out; |
962 | 956 | ||
963 | rdmsrl(MSR_RAPL_POWER_UNIT, units); | ||
964 | energy_divisor = 1 << ((units >> 8) & 0x1f); /* bits{12:8} */ | ||
965 | |||
966 | intel_pstate_debug_expose_params(); | 957 | intel_pstate_debug_expose_params(); |
967 | intel_pstate_sysfs_expose_params(); | 958 | intel_pstate_sysfs_expose_params(); |
968 | 959 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 9bed1a2a67a1..605b016bcea4 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -346,6 +346,7 @@ config MOXART_DMA | |||
346 | tristate "MOXART DMA support" | 346 | tristate "MOXART DMA support" |
347 | depends on ARCH_MOXART | 347 | depends on ARCH_MOXART |
348 | select DMA_ENGINE | 348 | select DMA_ENGINE |
349 | select DMA_OF | ||
349 | select DMA_VIRTUAL_CHANNELS | 350 | select DMA_VIRTUAL_CHANNELS |
350 | help | 351 | help |
351 | Enable support for the MOXA ART SoC DMA controller. | 352 | Enable support for the MOXA ART SoC DMA controller. |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 53fb0c8365b0..766b68ed505c 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -497,8 +497,8 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |||
497 | if (!mv_can_chain(grp_start)) | 497 | if (!mv_can_chain(grp_start)) |
498 | goto submit_done; | 498 | goto submit_done; |
499 | 499 | ||
500 | dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n", | 500 | dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", |
501 | old_chain_tail->async_tx.phys); | 501 | &old_chain_tail->async_tx.phys); |
502 | 502 | ||
503 | /* fix up the hardware chain */ | 503 | /* fix up the hardware chain */ |
504 | mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); | 504 | mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); |
@@ -527,7 +527,8 @@ submit_done: | |||
527 | /* returns the number of allocated descriptors */ | 527 | /* returns the number of allocated descriptors */ |
528 | static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | 528 | static int mv_xor_alloc_chan_resources(struct dma_chan *chan) |
529 | { | 529 | { |
530 | char *hw_desc; | 530 | void *virt_desc; |
531 | dma_addr_t dma_desc; | ||
531 | int idx; | 532 | int idx; |
532 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | 533 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); |
533 | struct mv_xor_desc_slot *slot = NULL; | 534 | struct mv_xor_desc_slot *slot = NULL; |
@@ -542,17 +543,16 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | |||
542 | " %d descriptor slots", idx); | 543 | " %d descriptor slots", idx); |
543 | break; | 544 | break; |
544 | } | 545 | } |
545 | hw_desc = (char *) mv_chan->dma_desc_pool_virt; | 546 | virt_desc = mv_chan->dma_desc_pool_virt; |
546 | slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; | 547 | slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE; |
547 | 548 | ||
548 | dma_async_tx_descriptor_init(&slot->async_tx, chan); | 549 | dma_async_tx_descriptor_init(&slot->async_tx, chan); |
549 | slot->async_tx.tx_submit = mv_xor_tx_submit; | 550 | slot->async_tx.tx_submit = mv_xor_tx_submit; |
550 | INIT_LIST_HEAD(&slot->chain_node); | 551 | INIT_LIST_HEAD(&slot->chain_node); |
551 | INIT_LIST_HEAD(&slot->slot_node); | 552 | INIT_LIST_HEAD(&slot->slot_node); |
552 | INIT_LIST_HEAD(&slot->tx_list); | 553 | INIT_LIST_HEAD(&slot->tx_list); |
553 | hw_desc = (char *) mv_chan->dma_desc_pool; | 554 | dma_desc = mv_chan->dma_desc_pool; |
554 | slot->async_tx.phys = | 555 | slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; |
555 | (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; | ||
556 | slot->idx = idx++; | 556 | slot->idx = idx++; |
557 | 557 | ||
558 | spin_lock_bh(&mv_chan->lock); | 558 | spin_lock_bh(&mv_chan->lock); |
@@ -582,8 +582,8 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
582 | int slot_cnt; | 582 | int slot_cnt; |
583 | 583 | ||
584 | dev_dbg(mv_chan_to_devp(mv_chan), | 584 | dev_dbg(mv_chan_to_devp(mv_chan), |
585 | "%s dest: %x src %x len: %u flags: %ld\n", | 585 | "%s dest: %pad src %pad len: %u flags: %ld\n", |
586 | __func__, dest, src, len, flags); | 586 | __func__, &dest, &src, len, flags); |
587 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | 587 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) |
588 | return NULL; | 588 | return NULL; |
589 | 589 | ||
@@ -626,8 +626,8 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |||
626 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); | 626 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
627 | 627 | ||
628 | dev_dbg(mv_chan_to_devp(mv_chan), | 628 | dev_dbg(mv_chan_to_devp(mv_chan), |
629 | "%s src_cnt: %d len: dest %x %u flags: %ld\n", | 629 | "%s src_cnt: %d len: %u dest %pad flags: %ld\n", |
630 | __func__, src_cnt, len, dest, flags); | 630 | __func__, src_cnt, len, &dest, flags); |
631 | 631 | ||
632 | spin_lock_bh(&mv_chan->lock); | 632 | spin_lock_bh(&mv_chan->lock); |
633 | slot_cnt = mv_chan_xor_slot_count(len, src_cnt); | 633 | slot_cnt = mv_chan_xor_slot_count(len, src_cnt); |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index e8c9ef03495b..33edd6766344 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -559,7 +559,8 @@ static void edac_mc_workq_function(struct work_struct *work_req) | |||
559 | * | 559 | * |
560 | * called with the mem_ctls_mutex held | 560 | * called with the mem_ctls_mutex held |
561 | */ | 561 | */ |
562 | static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) | 562 | static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec, |
563 | bool init) | ||
563 | { | 564 | { |
564 | edac_dbg(0, "\n"); | 565 | edac_dbg(0, "\n"); |
565 | 566 | ||
@@ -567,7 +568,9 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) | |||
567 | if (mci->op_state != OP_RUNNING_POLL) | 568 | if (mci->op_state != OP_RUNNING_POLL) |
568 | return; | 569 | return; |
569 | 570 | ||
570 | INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); | 571 | if (init) |
572 | INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); | ||
573 | |||
571 | mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); | 574 | mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); |
572 | } | 575 | } |
573 | 576 | ||
@@ -601,7 +604,7 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci) | |||
601 | * user space has updated our poll period value, need to | 604 | * user space has updated our poll period value, need to |
602 | * reset our workq delays | 605 | * reset our workq delays |
603 | */ | 606 | */ |
604 | void edac_mc_reset_delay_period(int value) | 607 | void edac_mc_reset_delay_period(unsigned long value) |
605 | { | 608 | { |
606 | struct mem_ctl_info *mci; | 609 | struct mem_ctl_info *mci; |
607 | struct list_head *item; | 610 | struct list_head *item; |
@@ -611,7 +614,7 @@ void edac_mc_reset_delay_period(int value) | |||
611 | list_for_each(item, &mc_devices) { | 614 | list_for_each(item, &mc_devices) { |
612 | mci = list_entry(item, struct mem_ctl_info, link); | 615 | mci = list_entry(item, struct mem_ctl_info, link); |
613 | 616 | ||
614 | edac_mc_workq_setup(mci, (unsigned long) value); | 617 | edac_mc_workq_setup(mci, value, false); |
615 | } | 618 | } |
616 | 619 | ||
617 | mutex_unlock(&mem_ctls_mutex); | 620 | mutex_unlock(&mem_ctls_mutex); |
@@ -782,7 +785,7 @@ int edac_mc_add_mc(struct mem_ctl_info *mci) | |||
782 | /* This instance is NOW RUNNING */ | 785 | /* This instance is NOW RUNNING */ |
783 | mci->op_state = OP_RUNNING_POLL; | 786 | mci->op_state = OP_RUNNING_POLL; |
784 | 787 | ||
785 | edac_mc_workq_setup(mci, edac_mc_get_poll_msec()); | 788 | edac_mc_workq_setup(mci, edac_mc_get_poll_msec(), true); |
786 | } else { | 789 | } else { |
787 | mci->op_state = OP_RUNNING_INTERRUPT; | 790 | mci->op_state = OP_RUNNING_INTERRUPT; |
788 | } | 791 | } |
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 8ec1747b1c39..b335c6ab5efe 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c | |||
@@ -52,18 +52,20 @@ int edac_mc_get_poll_msec(void) | |||
52 | 52 | ||
53 | static int edac_set_poll_msec(const char *val, struct kernel_param *kp) | 53 | static int edac_set_poll_msec(const char *val, struct kernel_param *kp) |
54 | { | 54 | { |
55 | long l; | 55 | unsigned long l; |
56 | int ret; | 56 | int ret; |
57 | 57 | ||
58 | if (!val) | 58 | if (!val) |
59 | return -EINVAL; | 59 | return -EINVAL; |
60 | 60 | ||
61 | ret = kstrtol(val, 0, &l); | 61 | ret = kstrtoul(val, 0, &l); |
62 | if (ret) | 62 | if (ret) |
63 | return ret; | 63 | return ret; |
64 | if (!l || ((int)l != l)) | 64 | |
65 | if (l < 1000) | ||
65 | return -EINVAL; | 66 | return -EINVAL; |
66 | *((int *)kp->arg) = l; | 67 | |
68 | *((unsigned long *)kp->arg) = l; | ||
67 | 69 | ||
68 | /* notify edac_mc engine to reset the poll period */ | 70 | /* notify edac_mc engine to reset the poll period */ |
69 | edac_mc_reset_delay_period(l); | 71 | edac_mc_reset_delay_period(l); |
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h index 3d139c6e7fe3..f2118bfcf8df 100644 --- a/drivers/edac/edac_module.h +++ b/drivers/edac/edac_module.h | |||
@@ -52,7 +52,7 @@ extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, | |||
52 | extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev); | 52 | extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev); |
53 | extern void edac_device_reset_delay_period(struct edac_device_ctl_info | 53 | extern void edac_device_reset_delay_period(struct edac_device_ctl_info |
54 | *edac_dev, unsigned long value); | 54 | *edac_dev, unsigned long value); |
55 | extern void edac_mc_reset_delay_period(int value); | 55 | extern void edac_mc_reset_delay_period(unsigned long value); |
56 | 56 | ||
57 | extern void *edac_align_ptr(void **p, unsigned size, int n_elems); | 57 | extern void *edac_align_ptr(void **p, unsigned size, int n_elems); |
58 | 58 | ||
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 697338772b64..903f24d28ba0 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -403,6 +403,7 @@ config GPIO_GRGPIO | |||
403 | 403 | ||
404 | config GPIO_TB10X | 404 | config GPIO_TB10X |
405 | bool | 405 | bool |
406 | select GENERIC_IRQ_CHIP | ||
406 | select OF_GPIO | 407 | select OF_GPIO |
407 | 408 | ||
408 | comment "I2C GPIO expanders:" | 409 | comment "I2C GPIO expanders:" |
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index 233d088ac59f..f32357e2d78d 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2012-2013 Broadcom Corporation | 2 | * Copyright (C) 2012-2014 Broadcom Corporation |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU General Public License as | 5 | * modify it under the terms of the GNU General Public License as |
@@ -657,6 +657,6 @@ static struct platform_driver bcm_kona_gpio_driver = { | |||
657 | 657 | ||
658 | module_platform_driver(bcm_kona_gpio_driver); | 658 | module_platform_driver(bcm_kona_gpio_driver); |
659 | 659 | ||
660 | MODULE_AUTHOR("Broadcom"); | 660 | MODULE_AUTHOR("Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>"); |
661 | MODULE_DESCRIPTION("Broadcom Kona GPIO Driver"); | 661 | MODULE_DESCRIPTION("Broadcom Kona GPIO Driver"); |
662 | MODULE_LICENSE("GPL v2"); | 662 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c index d3550274b8f7..3c2ba2ad0ada 100644 --- a/drivers/gpio/gpio-clps711x.c +++ b/drivers/gpio/gpio-clps711x.c | |||
@@ -97,3 +97,4 @@ module_platform_driver(clps711x_gpio_driver); | |||
97 | MODULE_LICENSE("GPL"); | 97 | MODULE_LICENSE("GPL"); |
98 | MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>"); | 98 | MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>"); |
99 | MODULE_DESCRIPTION("CLPS711X GPIO driver"); | 99 | MODULE_DESCRIPTION("CLPS711X GPIO driver"); |
100 | MODULE_ALIAS("platform:clps711x-gpio"); | ||
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c index d1b50ef5fab8..e585163f1ad5 100644 --- a/drivers/gpio/gpio-intel-mid.c +++ b/drivers/gpio/gpio-intel-mid.c | |||
@@ -394,8 +394,8 @@ static const struct irq_domain_ops intel_gpio_irq_ops = { | |||
394 | 394 | ||
395 | static int intel_gpio_runtime_idle(struct device *dev) | 395 | static int intel_gpio_runtime_idle(struct device *dev) |
396 | { | 396 | { |
397 | pm_schedule_suspend(dev, 500); | 397 | int err = pm_schedule_suspend(dev, 500); |
398 | return -EBUSY; | 398 | return err ?: -EBUSY; |
399 | } | 399 | } |
400 | 400 | ||
401 | static const struct dev_pm_ops intel_gpio_pm_ops = { | 401 | static const struct dev_pm_ops intel_gpio_pm_ops = { |
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c index 1d136eceda62..7081304d6797 100644 --- a/drivers/gpio/gpio-xtensa.c +++ b/drivers/gpio/gpio-xtensa.c | |||
@@ -40,6 +40,8 @@ | |||
40 | #error GPIO32 option is not enabled for your xtensa core variant | 40 | #error GPIO32 option is not enabled for your xtensa core variant |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | #if XCHAL_HAVE_CP | ||
44 | |||
43 | static inline unsigned long enable_cp(unsigned long *cpenable) | 45 | static inline unsigned long enable_cp(unsigned long *cpenable) |
44 | { | 46 | { |
45 | unsigned long flags; | 47 | unsigned long flags; |
@@ -57,6 +59,20 @@ static inline void disable_cp(unsigned long flags, unsigned long cpenable) | |||
57 | local_irq_restore(flags); | 59 | local_irq_restore(flags); |
58 | } | 60 | } |
59 | 61 | ||
62 | #else | ||
63 | |||
64 | static inline unsigned long enable_cp(unsigned long *cpenable) | ||
65 | { | ||
66 | *cpenable = 0; /* avoid uninitialized value warning */ | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static inline void disable_cp(unsigned long flags, unsigned long cpenable) | ||
71 | { | ||
72 | } | ||
73 | |||
74 | #endif /* XCHAL_HAVE_CP */ | ||
75 | |||
60 | static int xtensa_impwire_get_direction(struct gpio_chip *gc, unsigned offset) | 76 | static int xtensa_impwire_get_direction(struct gpio_chip *gc, unsigned offset) |
61 | { | 77 | { |
62 | return 1; /* input only */ | 78 | return 1; /* input only */ |
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index f227f544aa36..6e1a1a20cf6b 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig | |||
@@ -51,7 +51,7 @@ config DRM_EXYNOS_G2D | |||
51 | 51 | ||
52 | config DRM_EXYNOS_IPP | 52 | config DRM_EXYNOS_IPP |
53 | bool "Exynos DRM IPP" | 53 | bool "Exynos DRM IPP" |
54 | depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM | 54 | depends on DRM_EXYNOS |
55 | help | 55 | help |
56 | Choose this option if you want to use IPP feature for DRM. | 56 | Choose this option if you want to use IPP feature for DRM. |
57 | 57 | ||
@@ -69,6 +69,6 @@ config DRM_EXYNOS_ROTATOR | |||
69 | 69 | ||
70 | config DRM_EXYNOS_GSC | 70 | config DRM_EXYNOS_GSC |
71 | bool "Exynos DRM GSC" | 71 | bool "Exynos DRM GSC" |
72 | depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 | 72 | depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM |
73 | help | 73 | help |
74 | Choose this option if you want to use Exynos GSC for DRM. | 74 | Choose this option if you want to use Exynos GSC for DRM. |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 9d096a0c5f8d..215131ab1dd2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -171,22 +171,24 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) | |||
171 | file->driver_priv = file_priv; | 171 | file->driver_priv = file_priv; |
172 | 172 | ||
173 | ret = exynos_drm_subdrv_open(dev, file); | 173 | ret = exynos_drm_subdrv_open(dev, file); |
174 | if (ret) { | 174 | if (ret) |
175 | kfree(file_priv); | 175 | goto out; |
176 | file->driver_priv = NULL; | ||
177 | } | ||
178 | 176 | ||
179 | anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops, | 177 | anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops, |
180 | NULL, 0); | 178 | NULL, 0); |
181 | if (IS_ERR(anon_filp)) { | 179 | if (IS_ERR(anon_filp)) { |
182 | kfree(file_priv); | 180 | ret = PTR_ERR(anon_filp); |
183 | return PTR_ERR(anon_filp); | 181 | goto out; |
184 | } | 182 | } |
185 | 183 | ||
186 | anon_filp->f_mode = FMODE_READ | FMODE_WRITE; | 184 | anon_filp->f_mode = FMODE_READ | FMODE_WRITE; |
187 | file_priv->anon_filp = anon_filp; | 185 | file_priv->anon_filp = anon_filp; |
188 | 186 | ||
189 | return ret; | 187 | return ret; |
188 | out: | ||
189 | kfree(file_priv); | ||
190 | file->driver_priv = NULL; | ||
191 | return ret; | ||
190 | } | 192 | } |
191 | 193 | ||
192 | static void exynos_drm_preclose(struct drm_device *dev, | 194 | static void exynos_drm_preclose(struct drm_device *dev, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 380aec28840b..6c1885eedfdf 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
@@ -607,7 +607,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset) | |||
607 | reg_type = REG_TYPE_NONE; | 607 | reg_type = REG_TYPE_NONE; |
608 | DRM_ERROR("Unknown register offset![%d]\n", reg_offset); | 608 | DRM_ERROR("Unknown register offset![%d]\n", reg_offset); |
609 | break; | 609 | break; |
610 | }; | 610 | } |
611 | 611 | ||
612 | return reg_type; | 612 | return reg_type; |
613 | } | 613 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index d519a4e5fe40..09312b877470 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
18 | #include <linux/pm_runtime.h> | 18 | #include <linux/pm_runtime.h> |
19 | #include <plat/map-base.h> | ||
20 | 19 | ||
21 | #include <drm/drmP.h> | 20 | #include <drm/drmP.h> |
22 | #include <drm/exynos_drm.h> | 21 | #include <drm/exynos_drm.h> |
@@ -826,7 +825,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node, | |||
826 | DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); | 825 | DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); |
827 | 826 | ||
828 | /* | 827 | /* |
829 | * quf == NULL condition means all event deletion. | 828 | * qbuf == NULL condition means all event deletion. |
830 | * stop operations want to delete all event list. | 829 | * stop operations want to delete all event list. |
831 | * another case delete only same buf id. | 830 | * another case delete only same buf id. |
832 | */ | 831 | */ |
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index a0e10aeb0e67..c021ddc1ffb4 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/io.h> | 34 | #include <linux/io.h> |
35 | #include <linux/of.h> | 35 | #include <linux/of.h> |
36 | #include <linux/of_gpio.h> | 36 | #include <linux/of_gpio.h> |
37 | #include <linux/hdmi.h> | ||
37 | 38 | ||
38 | #include <drm/exynos_drm.h> | 39 | #include <drm/exynos_drm.h> |
39 | 40 | ||
@@ -59,19 +60,6 @@ | |||
59 | #define HDMI_AUI_VERSION 0x01 | 60 | #define HDMI_AUI_VERSION 0x01 |
60 | #define HDMI_AUI_LENGTH 0x0A | 61 | #define HDMI_AUI_LENGTH 0x0A |
61 | 62 | ||
62 | /* HDMI infoframe to configure HDMI out packet header, AUI and AVI */ | ||
63 | enum HDMI_PACKET_TYPE { | ||
64 | /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */ | ||
65 | /* InfoFrame packet type */ | ||
66 | HDMI_PACKET_TYPE_INFOFRAME = 0x80, | ||
67 | /* Vendor-Specific InfoFrame */ | ||
68 | HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1, | ||
69 | /* Auxiliary Video information InfoFrame */ | ||
70 | HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2, | ||
71 | /* Audio information InfoFrame */ | ||
72 | HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4 | ||
73 | }; | ||
74 | |||
75 | enum hdmi_type { | 63 | enum hdmi_type { |
76 | HDMI_TYPE13, | 64 | HDMI_TYPE13, |
77 | HDMI_TYPE14, | 65 | HDMI_TYPE14, |
@@ -379,12 +367,6 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = { | |||
379 | }, | 367 | }, |
380 | }; | 368 | }; |
381 | 369 | ||
382 | struct hdmi_infoframe { | ||
383 | enum HDMI_PACKET_TYPE type; | ||
384 | u8 ver; | ||
385 | u8 len; | ||
386 | }; | ||
387 | |||
388 | static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) | 370 | static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) |
389 | { | 371 | { |
390 | return readl(hdata->regs + reg_id); | 372 | return readl(hdata->regs + reg_id); |
@@ -682,7 +664,7 @@ static u8 hdmi_chksum(struct hdmi_context *hdata, | |||
682 | } | 664 | } |
683 | 665 | ||
684 | static void hdmi_reg_infoframe(struct hdmi_context *hdata, | 666 | static void hdmi_reg_infoframe(struct hdmi_context *hdata, |
685 | struct hdmi_infoframe *infoframe) | 667 | union hdmi_infoframe *infoframe) |
686 | { | 668 | { |
687 | u32 hdr_sum; | 669 | u32 hdr_sum; |
688 | u8 chksum; | 670 | u8 chksum; |
@@ -700,13 +682,15 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata, | |||
700 | return; | 682 | return; |
701 | } | 683 | } |
702 | 684 | ||
703 | switch (infoframe->type) { | 685 | switch (infoframe->any.type) { |
704 | case HDMI_PACKET_TYPE_AVI: | 686 | case HDMI_INFOFRAME_TYPE_AVI: |
705 | hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC); | 687 | hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC); |
706 | hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type); | 688 | hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type); |
707 | hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver); | 689 | hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, |
708 | hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len); | 690 | infoframe->any.version); |
709 | hdr_sum = infoframe->type + infoframe->ver + infoframe->len; | 691 | hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length); |
692 | hdr_sum = infoframe->any.type + infoframe->any.version + | ||
693 | infoframe->any.length; | ||
710 | 694 | ||
711 | /* Output format zero hardcoded ,RGB YBCR selection */ | 695 | /* Output format zero hardcoded ,RGB YBCR selection */ |
712 | hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 | | 696 | hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 | |
@@ -722,18 +706,20 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata, | |||
722 | hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); | 706 | hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); |
723 | 707 | ||
724 | chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), | 708 | chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), |
725 | infoframe->len, hdr_sum); | 709 | infoframe->any.length, hdr_sum); |
726 | DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum); | 710 | DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum); |
727 | hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum); | 711 | hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum); |
728 | break; | 712 | break; |
729 | case HDMI_PACKET_TYPE_AUI: | 713 | case HDMI_INFOFRAME_TYPE_AUDIO: |
730 | hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02); | 714 | hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02); |
731 | hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type); | 715 | hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type); |
732 | hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver); | 716 | hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, |
733 | hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len); | 717 | infoframe->any.version); |
734 | hdr_sum = infoframe->type + infoframe->ver + infoframe->len; | 718 | hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length); |
719 | hdr_sum = infoframe->any.type + infoframe->any.version + | ||
720 | infoframe->any.length; | ||
735 | chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1), | 721 | chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1), |
736 | infoframe->len, hdr_sum); | 722 | infoframe->any.length, hdr_sum); |
737 | DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum); | 723 | DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum); |
738 | hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum); | 724 | hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum); |
739 | break; | 725 | break; |
@@ -985,7 +971,7 @@ static void hdmi_conf_reset(struct hdmi_context *hdata) | |||
985 | 971 | ||
986 | static void hdmi_conf_init(struct hdmi_context *hdata) | 972 | static void hdmi_conf_init(struct hdmi_context *hdata) |
987 | { | 973 | { |
988 | struct hdmi_infoframe infoframe; | 974 | union hdmi_infoframe infoframe; |
989 | 975 | ||
990 | /* disable HPD interrupts from HDMI IP block, use GPIO instead */ | 976 | /* disable HPD interrupts from HDMI IP block, use GPIO instead */ |
991 | hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | | 977 | hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | |
@@ -1021,14 +1007,14 @@ static void hdmi_conf_init(struct hdmi_context *hdata) | |||
1021 | hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); | 1007 | hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); |
1022 | hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); | 1008 | hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); |
1023 | } else { | 1009 | } else { |
1024 | infoframe.type = HDMI_PACKET_TYPE_AVI; | 1010 | infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI; |
1025 | infoframe.ver = HDMI_AVI_VERSION; | 1011 | infoframe.any.version = HDMI_AVI_VERSION; |
1026 | infoframe.len = HDMI_AVI_LENGTH; | 1012 | infoframe.any.length = HDMI_AVI_LENGTH; |
1027 | hdmi_reg_infoframe(hdata, &infoframe); | 1013 | hdmi_reg_infoframe(hdata, &infoframe); |
1028 | 1014 | ||
1029 | infoframe.type = HDMI_PACKET_TYPE_AUI; | 1015 | infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO; |
1030 | infoframe.ver = HDMI_AUI_VERSION; | 1016 | infoframe.any.version = HDMI_AUI_VERSION; |
1031 | infoframe.len = HDMI_AUI_LENGTH; | 1017 | infoframe.any.length = HDMI_AUI_LENGTH; |
1032 | hdmi_reg_infoframe(hdata, &infoframe); | 1018 | hdmi_reg_infoframe(hdata, &infoframe); |
1033 | 1019 | ||
1034 | /* enable AVI packet every vsync, fixes purple line problem */ | 1020 | /* enable AVI packet every vsync, fixes purple line problem */ |
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 400b0c4a10fb..fa18cf374470 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c | |||
@@ -208,7 +208,7 @@ struct tda998x_priv { | |||
208 | # define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1) | 208 | # define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1) |
209 | # define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6) | 209 | # define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6) |
210 | #define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */ | 210 | #define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */ |
211 | # define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0) | 211 | # define PLL_SERIAL_2_SRL_NOSC(x) ((x) << 0) |
212 | # define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4) | 212 | # define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4) |
213 | #define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */ | 213 | #define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */ |
214 | # define PLL_SERIAL_3_SRL_CCIR (1 << 0) | 214 | # define PLL_SERIAL_3_SRL_CCIR (1 << 0) |
@@ -528,10 +528,10 @@ tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p) | |||
528 | { | 528 | { |
529 | uint8_t buf[PB(5) + 1]; | 529 | uint8_t buf[PB(5) + 1]; |
530 | 530 | ||
531 | memset(buf, 0, sizeof(buf)); | ||
531 | buf[HB(0)] = 0x84; | 532 | buf[HB(0)] = 0x84; |
532 | buf[HB(1)] = 0x01; | 533 | buf[HB(1)] = 0x01; |
533 | buf[HB(2)] = 10; | 534 | buf[HB(2)] = 10; |
534 | buf[PB(0)] = 0; | ||
535 | buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */ | 535 | buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */ |
536 | buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */ | 536 | buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */ |
537 | buf[PB(4)] = p->audio_frame[4]; | 537 | buf[PB(4)] = p->audio_frame[4]; |
@@ -824,6 +824,11 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder, | |||
824 | } | 824 | } |
825 | 825 | ||
826 | div = 148500 / mode->clock; | 826 | div = 148500 / mode->clock; |
827 | if (div != 0) { | ||
828 | div--; | ||
829 | if (div > 3) | ||
830 | div = 3; | ||
831 | } | ||
827 | 832 | ||
828 | /* mute the audio FIFO: */ | 833 | /* mute the audio FIFO: */ |
829 | reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); | 834 | reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); |
@@ -913,7 +918,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder, | |||
913 | 918 | ||
914 | if (priv->rev == TDA19988) { | 919 | if (priv->rev == TDA19988) { |
915 | /* let incoming pixels fill the active space (if any) */ | 920 | /* let incoming pixels fill the active space (if any) */ |
916 | reg_write(encoder, REG_ENABLE_SPACE, 0x01); | 921 | reg_write(encoder, REG_ENABLE_SPACE, 0x00); |
917 | } | 922 | } |
918 | 923 | ||
919 | /* must be last register set: */ | 924 | /* must be last register set: */ |
@@ -1094,6 +1099,8 @@ tda998x_encoder_destroy(struct drm_encoder *encoder) | |||
1094 | { | 1099 | { |
1095 | struct tda998x_priv *priv = to_tda998x_priv(encoder); | 1100 | struct tda998x_priv *priv = to_tda998x_priv(encoder); |
1096 | drm_i2c_encoder_destroy(encoder); | 1101 | drm_i2c_encoder_destroy(encoder); |
1102 | if (priv->cec) | ||
1103 | i2c_unregister_device(priv->cec); | ||
1097 | kfree(priv); | 1104 | kfree(priv); |
1098 | } | 1105 | } |
1099 | 1106 | ||
@@ -1142,8 +1149,10 @@ tda998x_encoder_init(struct i2c_client *client, | |||
1142 | priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); | 1149 | priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); |
1143 | priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5); | 1150 | priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5); |
1144 | 1151 | ||
1145 | priv->current_page = 0; | 1152 | priv->current_page = 0xff; |
1146 | priv->cec = i2c_new_dummy(client->adapter, 0x34); | 1153 | priv->cec = i2c_new_dummy(client->adapter, 0x34); |
1154 | if (!priv->cec) | ||
1155 | return -ENODEV; | ||
1147 | priv->dpms = DRM_MODE_DPMS_OFF; | 1156 | priv->dpms = DRM_MODE_DPMS_OFF; |
1148 | 1157 | ||
1149 | encoder_slave->slave_priv = priv; | 1158 | encoder_slave->slave_priv = priv; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4a2bf8e3f739..df77e20e3c3d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1831,6 +1831,14 @@ struct drm_i915_file_private { | |||
1831 | 1831 | ||
1832 | /* Early gen2 have a totally busted CS tlb and require pinned batches. */ | 1832 | /* Early gen2 have a totally busted CS tlb and require pinned batches. */ |
1833 | #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) | 1833 | #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) |
1834 | /* | ||
1835 | * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts | ||
1836 | * even when in MSI mode. This results in spurious interrupt warnings if the | ||
1837 | * legacy irq no. is shared with another device. The kernel then disables that | ||
1838 | * interrupt source and so prevents the other device from working properly. | ||
1839 | */ | ||
1840 | #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) | ||
1841 | #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) | ||
1834 | 1842 | ||
1835 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | 1843 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
1836 | * rows, which changed the alignment requirements and fence programming. | 1844 | * rows, which changed the alignment requirements and fence programming. |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index d7fd2fd2f0a5..990cf8f43efd 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -146,7 +146,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e, | |||
146 | va_list tmp; | 146 | va_list tmp; |
147 | 147 | ||
148 | va_copy(tmp, args); | 148 | va_copy(tmp, args); |
149 | if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp))) | 149 | len = vsnprintf(NULL, 0, f, tmp); |
150 | va_end(tmp); | ||
151 | |||
152 | if (!__i915_error_seek(e, len)) | ||
150 | return; | 153 | return; |
151 | } | 154 | } |
152 | 155 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 17d8fcb1b6f7..9fec71175571 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -567,8 +567,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | |||
567 | 567 | ||
568 | vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; | 568 | vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; |
569 | } else { | 569 | } else { |
570 | enum transcoder cpu_transcoder = | 570 | enum transcoder cpu_transcoder = (enum transcoder) pipe; |
571 | intel_pipe_to_cpu_transcoder(dev_priv, pipe); | ||
572 | u32 htotal; | 571 | u32 htotal; |
573 | 572 | ||
574 | htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; | 573 | htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 5ede4e8e290d..2f517b85b3f4 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -404,7 +404,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
404 | int i, ret, recv_bytes; | 404 | int i, ret, recv_bytes; |
405 | uint32_t status; | 405 | uint32_t status; |
406 | int try, precharge, clock = 0; | 406 | int try, precharge, clock = 0; |
407 | bool has_aux_irq = true; | 407 | bool has_aux_irq = HAS_AUX_IRQ(dev); |
408 | uint32_t timeout; | 408 | uint32_t timeout; |
409 | 409 | ||
410 | /* dp aux is extremely sensitive to irq latency, hence request the | 410 | /* dp aux is extremely sensitive to irq latency, hence request the |
@@ -1869,10 +1869,12 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder) | |||
1869 | 1869 | ||
1870 | mutex_unlock(&dev_priv->dpio_lock); | 1870 | mutex_unlock(&dev_priv->dpio_lock); |
1871 | 1871 | ||
1872 | /* init power sequencer on this pipe and port */ | 1872 | if (is_edp(intel_dp)) { |
1873 | intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); | 1873 | /* init power sequencer on this pipe and port */ |
1874 | intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, | 1874 | intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); |
1875 | &power_seq); | 1875 | intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, |
1876 | &power_seq); | ||
1877 | } | ||
1876 | 1878 | ||
1877 | intel_enable_dp(encoder); | 1879 | intel_enable_dp(encoder); |
1878 | 1880 | ||
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index b1dc33f47899..d33b61d0dd33 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -258,13 +258,6 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin) | |||
258 | algo->data = bus; | 258 | algo->data = bus; |
259 | } | 259 | } |
260 | 260 | ||
261 | /* | ||
262 | * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI | ||
263 | * mode. This results in spurious interrupt warnings if the legacy irq no. is | ||
264 | * shared with another device. The kernel then disables that interrupt source | ||
265 | * and so prevents the other device from working properly. | ||
266 | */ | ||
267 | #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) | ||
268 | static int | 261 | static int |
269 | gmbus_wait_hw_status(struct drm_i915_private *dev_priv, | 262 | gmbus_wait_hw_status(struct drm_i915_private *dev_priv, |
270 | u32 gmbus2_status, | 263 | u32 gmbus2_status, |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 4e960ec7419f..acde2945eb8a 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -226,6 +226,8 @@ struct opregion_asle { | |||
226 | #define ACPI_DIGITAL_OUTPUT (3<<8) | 226 | #define ACPI_DIGITAL_OUTPUT (3<<8) |
227 | #define ACPI_LVDS_OUTPUT (4<<8) | 227 | #define ACPI_LVDS_OUTPUT (4<<8) |
228 | 228 | ||
229 | #define MAX_DSLP 1500 | ||
230 | |||
229 | #ifdef CONFIG_ACPI | 231 | #ifdef CONFIG_ACPI |
230 | static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) | 232 | static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) |
231 | { | 233 | { |
@@ -260,10 +262,11 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) | |||
260 | /* The spec says 2ms should be the default, but it's too small | 262 | /* The spec says 2ms should be the default, but it's too small |
261 | * for some machines. */ | 263 | * for some machines. */ |
262 | dslp = 50; | 264 | dslp = 50; |
263 | } else if (dslp > 500) { | 265 | } else if (dslp > MAX_DSLP) { |
264 | /* Hey bios, trust must be earned. */ | 266 | /* Hey bios, trust must be earned. */ |
265 | WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp); | 267 | DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, " |
266 | dslp = 500; | 268 | "using %u ms instead\n", dslp, MAX_DSLP); |
269 | dslp = MAX_DSLP; | ||
267 | } | 270 | } |
268 | 271 | ||
269 | /* The spec tells us to do this, but we are the only user... */ | 272 | /* The spec tells us to do this, but we are the only user... */ |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 1964f4f0d452..84c5b13b33c9 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | |||
@@ -39,6 +39,7 @@ struct mdp4_crtc { | |||
39 | spinlock_t lock; | 39 | spinlock_t lock; |
40 | bool stale; | 40 | bool stale; |
41 | uint32_t width, height; | 41 | uint32_t width, height; |
42 | uint32_t x, y; | ||
42 | 43 | ||
43 | /* next cursor to scan-out: */ | 44 | /* next cursor to scan-out: */ |
44 | uint32_t next_iova; | 45 | uint32_t next_iova; |
@@ -57,9 +58,16 @@ struct mdp4_crtc { | |||
57 | #define PENDING_FLIP 0x2 | 58 | #define PENDING_FLIP 0x2 |
58 | atomic_t pending; | 59 | atomic_t pending; |
59 | 60 | ||
60 | /* the fb that we currently hold a scanout ref to: */ | 61 | /* the fb that we logically (from PoV of KMS API) hold a ref |
62 | * to. Which we may not yet be scanning out (we may still | ||
63 | * be scanning out previous in case of page_flip while waiting | ||
64 | * for gpu rendering to complete: | ||
65 | */ | ||
61 | struct drm_framebuffer *fb; | 66 | struct drm_framebuffer *fb; |
62 | 67 | ||
68 | /* the fb that we currently hold a scanout ref to: */ | ||
69 | struct drm_framebuffer *scanout_fb; | ||
70 | |||
63 | /* for unref'ing framebuffers after scanout completes: */ | 71 | /* for unref'ing framebuffers after scanout completes: */ |
64 | struct drm_flip_work unref_fb_work; | 72 | struct drm_flip_work unref_fb_work; |
65 | 73 | ||
@@ -77,24 +85,73 @@ static struct mdp4_kms *get_kms(struct drm_crtc *crtc) | |||
77 | return to_mdp4_kms(to_mdp_kms(priv->kms)); | 85 | return to_mdp4_kms(to_mdp_kms(priv->kms)); |
78 | } | 86 | } |
79 | 87 | ||
80 | static void update_fb(struct drm_crtc *crtc, bool async, | 88 | static void request_pending(struct drm_crtc *crtc, uint32_t pending) |
81 | struct drm_framebuffer *new_fb) | ||
82 | { | 89 | { |
83 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | 90 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); |
84 | struct drm_framebuffer *old_fb = mdp4_crtc->fb; | ||
85 | 91 | ||
86 | if (old_fb) | 92 | atomic_or(pending, &mdp4_crtc->pending); |
87 | drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb); | 93 | mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); |
94 | } | ||
95 | |||
96 | static void crtc_flush(struct drm_crtc *crtc) | ||
97 | { | ||
98 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
99 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | ||
100 | uint32_t i, flush = 0; | ||
101 | |||
102 | for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { | ||
103 | struct drm_plane *plane = mdp4_crtc->planes[i]; | ||
104 | if (plane) { | ||
105 | enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); | ||
106 | flush |= pipe2flush(pipe_id); | ||
107 | } | ||
108 | } | ||
109 | flush |= ovlp2flush(mdp4_crtc->ovlp); | ||
110 | |||
111 | DBG("%s: flush=%08x", mdp4_crtc->name, flush); | ||
112 | |||
113 | mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); | ||
114 | } | ||
115 | |||
116 | static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb) | ||
117 | { | ||
118 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
119 | struct drm_framebuffer *old_fb = mdp4_crtc->fb; | ||
88 | 120 | ||
89 | /* grab reference to incoming scanout fb: */ | 121 | /* grab reference to incoming scanout fb: */ |
90 | drm_framebuffer_reference(new_fb); | 122 | drm_framebuffer_reference(new_fb); |
91 | mdp4_crtc->base.fb = new_fb; | 123 | mdp4_crtc->base.fb = new_fb; |
92 | mdp4_crtc->fb = new_fb; | 124 | mdp4_crtc->fb = new_fb; |
93 | 125 | ||
94 | if (!async) { | 126 | if (old_fb) |
95 | /* enable vblank to pick up the old_fb */ | 127 | drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb); |
96 | mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); | 128 | } |
97 | } | 129 | |
130 | /* unlike update_fb(), take a ref to the new scanout fb *before* updating | ||
131 | * plane, then call this. Needed to ensure we don't unref the buffer that | ||
132 | * is actually still being scanned out. | ||
133 | * | ||
134 | * Note that this whole thing goes away with atomic.. since we can defer | ||
135 | * calling into driver until rendering is done. | ||
136 | */ | ||
137 | static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) | ||
138 | { | ||
139 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
140 | |||
141 | /* flush updates, to make sure hw is updated to new scanout fb, | ||
142 | * so that we can safely queue unref to current fb (ie. next | ||
143 | * vblank we know hw is done w/ previous scanout_fb). | ||
144 | */ | ||
145 | crtc_flush(crtc); | ||
146 | |||
147 | if (mdp4_crtc->scanout_fb) | ||
148 | drm_flip_work_queue(&mdp4_crtc->unref_fb_work, | ||
149 | mdp4_crtc->scanout_fb); | ||
150 | |||
151 | mdp4_crtc->scanout_fb = fb; | ||
152 | |||
153 | /* enable vblank to complete flip: */ | ||
154 | request_pending(crtc, PENDING_FLIP); | ||
98 | } | 155 | } |
99 | 156 | ||
100 | /* if file!=NULL, this is preclose potential cancel-flip path */ | 157 | /* if file!=NULL, this is preclose potential cancel-flip path */ |
@@ -120,34 +177,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) | |||
120 | spin_unlock_irqrestore(&dev->event_lock, flags); | 177 | spin_unlock_irqrestore(&dev->event_lock, flags); |
121 | } | 178 | } |
122 | 179 | ||
123 | static void crtc_flush(struct drm_crtc *crtc) | ||
124 | { | ||
125 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
126 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | ||
127 | uint32_t i, flush = 0; | ||
128 | |||
129 | for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { | ||
130 | struct drm_plane *plane = mdp4_crtc->planes[i]; | ||
131 | if (plane) { | ||
132 | enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); | ||
133 | flush |= pipe2flush(pipe_id); | ||
134 | } | ||
135 | } | ||
136 | flush |= ovlp2flush(mdp4_crtc->ovlp); | ||
137 | |||
138 | DBG("%s: flush=%08x", mdp4_crtc->name, flush); | ||
139 | |||
140 | mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); | ||
141 | } | ||
142 | |||
143 | static void request_pending(struct drm_crtc *crtc, uint32_t pending) | ||
144 | { | ||
145 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | ||
146 | |||
147 | atomic_or(pending, &mdp4_crtc->pending); | ||
148 | mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); | ||
149 | } | ||
150 | |||
151 | static void pageflip_cb(struct msm_fence_cb *cb) | 180 | static void pageflip_cb(struct msm_fence_cb *cb) |
152 | { | 181 | { |
153 | struct mdp4_crtc *mdp4_crtc = | 182 | struct mdp4_crtc *mdp4_crtc = |
@@ -158,11 +187,9 @@ static void pageflip_cb(struct msm_fence_cb *cb) | |||
158 | if (!fb) | 187 | if (!fb) |
159 | return; | 188 | return; |
160 | 189 | ||
190 | drm_framebuffer_reference(fb); | ||
161 | mdp4_plane_set_scanout(mdp4_crtc->plane, fb); | 191 | mdp4_plane_set_scanout(mdp4_crtc->plane, fb); |
162 | crtc_flush(crtc); | 192 | update_scanout(crtc, fb); |
163 | |||
164 | /* enable vblank to complete flip: */ | ||
165 | request_pending(crtc, PENDING_FLIP); | ||
166 | } | 193 | } |
167 | 194 | ||
168 | static void unref_fb_worker(struct drm_flip_work *work, void *val) | 195 | static void unref_fb_worker(struct drm_flip_work *work, void *val) |
@@ -320,6 +347,20 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc, | |||
320 | mode->vsync_end, mode->vtotal, | 347 | mode->vsync_end, mode->vtotal, |
321 | mode->type, mode->flags); | 348 | mode->type, mode->flags); |
322 | 349 | ||
350 | /* grab extra ref for update_scanout() */ | ||
351 | drm_framebuffer_reference(crtc->fb); | ||
352 | |||
353 | ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb, | ||
354 | 0, 0, mode->hdisplay, mode->vdisplay, | ||
355 | x << 16, y << 16, | ||
356 | mode->hdisplay << 16, mode->vdisplay << 16); | ||
357 | if (ret) { | ||
358 | drm_framebuffer_unreference(crtc->fb); | ||
359 | dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", | ||
360 | mdp4_crtc->name, ret); | ||
361 | return ret; | ||
362 | } | ||
363 | |||
323 | mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), | 364 | mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), |
324 | MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | | 365 | MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | |
325 | MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); | 366 | MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); |
@@ -341,24 +382,15 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc, | |||
341 | 382 | ||
342 | mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); | 383 | mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); |
343 | 384 | ||
344 | update_fb(crtc, false, crtc->fb); | ||
345 | |||
346 | ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb, | ||
347 | 0, 0, mode->hdisplay, mode->vdisplay, | ||
348 | x << 16, y << 16, | ||
349 | mode->hdisplay << 16, mode->vdisplay << 16); | ||
350 | if (ret) { | ||
351 | dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", | ||
352 | mdp4_crtc->name, ret); | ||
353 | return ret; | ||
354 | } | ||
355 | |||
356 | if (dma == DMA_E) { | 385 | if (dma == DMA_E) { |
357 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000); | 386 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000); |
358 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); | 387 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); |
359 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); | 388 | mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); |
360 | } | 389 | } |
361 | 390 | ||
391 | update_fb(crtc, crtc->fb); | ||
392 | update_scanout(crtc, crtc->fb); | ||
393 | |||
362 | return 0; | 394 | return 0; |
363 | } | 395 | } |
364 | 396 | ||
@@ -385,13 +417,24 @@ static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | |||
385 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | 417 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); |
386 | struct drm_plane *plane = mdp4_crtc->plane; | 418 | struct drm_plane *plane = mdp4_crtc->plane; |
387 | struct drm_display_mode *mode = &crtc->mode; | 419 | struct drm_display_mode *mode = &crtc->mode; |
420 | int ret; | ||
388 | 421 | ||
389 | update_fb(crtc, false, crtc->fb); | 422 | /* grab extra ref for update_scanout() */ |
423 | drm_framebuffer_reference(crtc->fb); | ||
390 | 424 | ||
391 | return mdp4_plane_mode_set(plane, crtc, crtc->fb, | 425 | ret = mdp4_plane_mode_set(plane, crtc, crtc->fb, |
392 | 0, 0, mode->hdisplay, mode->vdisplay, | 426 | 0, 0, mode->hdisplay, mode->vdisplay, |
393 | x << 16, y << 16, | 427 | x << 16, y << 16, |
394 | mode->hdisplay << 16, mode->vdisplay << 16); | 428 | mode->hdisplay << 16, mode->vdisplay << 16); |
429 | if (ret) { | ||
430 | drm_framebuffer_unreference(crtc->fb); | ||
431 | return ret; | ||
432 | } | ||
433 | |||
434 | update_fb(crtc, crtc->fb); | ||
435 | update_scanout(crtc, crtc->fb); | ||
436 | |||
437 | return 0; | ||
395 | } | 438 | } |
396 | 439 | ||
397 | static void mdp4_crtc_load_lut(struct drm_crtc *crtc) | 440 | static void mdp4_crtc_load_lut(struct drm_crtc *crtc) |
@@ -419,7 +462,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc, | |||
419 | mdp4_crtc->event = event; | 462 | mdp4_crtc->event = event; |
420 | spin_unlock_irqrestore(&dev->event_lock, flags); | 463 | spin_unlock_irqrestore(&dev->event_lock, flags); |
421 | 464 | ||
422 | update_fb(crtc, true, new_fb); | 465 | update_fb(crtc, new_fb); |
423 | 466 | ||
424 | return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb); | 467 | return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb); |
425 | } | 468 | } |
@@ -442,12 +485,12 @@ static int mdp4_crtc_set_property(struct drm_crtc *crtc, | |||
442 | static void update_cursor(struct drm_crtc *crtc) | 485 | static void update_cursor(struct drm_crtc *crtc) |
443 | { | 486 | { |
444 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | 487 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); |
488 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | ||
445 | enum mdp4_dma dma = mdp4_crtc->dma; | 489 | enum mdp4_dma dma = mdp4_crtc->dma; |
446 | unsigned long flags; | 490 | unsigned long flags; |
447 | 491 | ||
448 | spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); | 492 | spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); |
449 | if (mdp4_crtc->cursor.stale) { | 493 | if (mdp4_crtc->cursor.stale) { |
450 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | ||
451 | struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; | 494 | struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; |
452 | struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; | 495 | struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; |
453 | uint32_t iova = mdp4_crtc->cursor.next_iova; | 496 | uint32_t iova = mdp4_crtc->cursor.next_iova; |
@@ -479,6 +522,11 @@ static void update_cursor(struct drm_crtc *crtc) | |||
479 | mdp4_crtc->cursor.scanout_bo = next_bo; | 522 | mdp4_crtc->cursor.scanout_bo = next_bo; |
480 | mdp4_crtc->cursor.stale = false; | 523 | mdp4_crtc->cursor.stale = false; |
481 | } | 524 | } |
525 | |||
526 | mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), | ||
527 | MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) | | ||
528 | MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y)); | ||
529 | |||
482 | spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); | 530 | spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); |
483 | } | 531 | } |
484 | 532 | ||
@@ -530,6 +578,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, | |||
530 | drm_gem_object_unreference_unlocked(old_bo); | 578 | drm_gem_object_unreference_unlocked(old_bo); |
531 | } | 579 | } |
532 | 580 | ||
581 | crtc_flush(crtc); | ||
533 | request_pending(crtc, PENDING_CURSOR); | 582 | request_pending(crtc, PENDING_CURSOR); |
534 | 583 | ||
535 | return 0; | 584 | return 0; |
@@ -542,12 +591,15 @@ fail: | |||
542 | static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | 591 | static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) |
543 | { | 592 | { |
544 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); | 593 | struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); |
545 | struct mdp4_kms *mdp4_kms = get_kms(crtc); | 594 | unsigned long flags; |
546 | enum mdp4_dma dma = mdp4_crtc->dma; | ||
547 | 595 | ||
548 | mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), | 596 | spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); |
549 | MDP4_DMA_CURSOR_POS_X(x) | | 597 | mdp4_crtc->cursor.x = x; |
550 | MDP4_DMA_CURSOR_POS_Y(y)); | 598 | mdp4_crtc->cursor.y = y; |
599 | spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); | ||
600 | |||
601 | crtc_flush(crtc); | ||
602 | request_pending(crtc, PENDING_CURSOR); | ||
551 | 603 | ||
552 | return 0; | 604 | return 0; |
553 | } | 605 | } |
@@ -713,6 +765,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, | |||
713 | crtc = &mdp4_crtc->base; | 765 | crtc = &mdp4_crtc->base; |
714 | 766 | ||
715 | mdp4_crtc->plane = plane; | 767 | mdp4_crtc->plane = plane; |
768 | mdp4_crtc->id = id; | ||
716 | 769 | ||
717 | mdp4_crtc->ovlp = ovlp_id; | 770 | mdp4_crtc->ovlp = ovlp_id; |
718 | mdp4_crtc->dma = dma_id; | 771 | mdp4_crtc->dma = dma_id; |
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 2406027200ec..1e893dd13859 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c | |||
@@ -170,8 +170,8 @@ int mdp4_plane_mode_set(struct drm_plane *plane, | |||
170 | MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h)); | 170 | MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h)); |
171 | 171 | ||
172 | mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe), | 172 | mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe), |
173 | MDP4_PIPE_SRC_XY_X(crtc_x) | | 173 | MDP4_PIPE_DST_XY_X(crtc_x) | |
174 | MDP4_PIPE_SRC_XY_Y(crtc_y)); | 174 | MDP4_PIPE_DST_XY_Y(crtc_y)); |
175 | 175 | ||
176 | mdp4_plane_set_scanout(plane, fb); | 176 | mdp4_plane_set_scanout(plane, fb); |
177 | 177 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 71a3b2345eb3..f2794021f086 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | |||
@@ -296,6 +296,7 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc, | |||
296 | x << 16, y << 16, | 296 | x << 16, y << 16, |
297 | mode->hdisplay << 16, mode->vdisplay << 16); | 297 | mode->hdisplay << 16, mode->vdisplay << 16); |
298 | if (ret) { | 298 | if (ret) { |
299 | drm_framebuffer_unreference(crtc->fb); | ||
299 | dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", | 300 | dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", |
300 | mdp5_crtc->name, ret); | 301 | mdp5_crtc->name, ret); |
301 | return ret; | 302 | return ret; |
@@ -343,11 +344,15 @@ static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, | |||
343 | 0, 0, mode->hdisplay, mode->vdisplay, | 344 | 0, 0, mode->hdisplay, mode->vdisplay, |
344 | x << 16, y << 16, | 345 | x << 16, y << 16, |
345 | mode->hdisplay << 16, mode->vdisplay << 16); | 346 | mode->hdisplay << 16, mode->vdisplay << 16); |
347 | if (ret) { | ||
348 | drm_framebuffer_unreference(crtc->fb); | ||
349 | return ret; | ||
350 | } | ||
346 | 351 | ||
347 | update_fb(crtc, crtc->fb); | 352 | update_fb(crtc, crtc->fb); |
348 | update_scanout(crtc, crtc->fb); | 353 | update_scanout(crtc, crtc->fb); |
349 | 354 | ||
350 | return ret; | 355 | return 0; |
351 | } | 356 | } |
352 | 357 | ||
353 | static void mdp5_crtc_load_lut(struct drm_crtc *crtc) | 358 | static void mdp5_crtc_load_lut(struct drm_crtc *crtc) |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index d8d60c969ac7..3da8264d3039 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -644,7 +644,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, | |||
644 | 644 | ||
645 | fail: | 645 | fail: |
646 | if (obj) | 646 | if (obj) |
647 | drm_gem_object_unreference_unlocked(obj); | 647 | drm_gem_object_unreference(obj); |
648 | 648 | ||
649 | return ERR_PTR(ret); | 649 | return ERR_PTR(ret); |
650 | } | 650 | } |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 5281d4bc37f7..5423e914e491 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
@@ -163,7 +163,7 @@ retry: | |||
163 | 163 | ||
164 | 164 | ||
165 | /* if locking succeeded, pin bo: */ | 165 | /* if locking succeeded, pin bo: */ |
166 | ret = msm_gem_get_iova(&msm_obj->base, | 166 | ret = msm_gem_get_iova_locked(&msm_obj->base, |
167 | submit->gpu->id, &iova); | 167 | submit->gpu->id, &iova); |
168 | 168 | ||
169 | /* this would break the logic in the fail path.. there is no | 169 | /* this would break the logic in the fail path.. there is no |
@@ -247,7 +247,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob | |||
247 | /* For now, just map the entire thing. Eventually we probably | 247 | /* For now, just map the entire thing. Eventually we probably |
248 | * to do it page-by-page, w/ kmap() if not vmap()d.. | 248 | * to do it page-by-page, w/ kmap() if not vmap()d.. |
249 | */ | 249 | */ |
250 | ptr = msm_gem_vaddr(&obj->base); | 250 | ptr = msm_gem_vaddr_locked(&obj->base); |
251 | 251 | ||
252 | if (IS_ERR(ptr)) { | 252 | if (IS_ERR(ptr)) { |
253 | ret = PTR_ERR(ptr); | 253 | ret = PTR_ERR(ptr); |
@@ -307,14 +307,12 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool fail) | |||
307 | { | 307 | { |
308 | unsigned i; | 308 | unsigned i; |
309 | 309 | ||
310 | mutex_lock(&submit->dev->struct_mutex); | ||
311 | for (i = 0; i < submit->nr_bos; i++) { | 310 | for (i = 0; i < submit->nr_bos; i++) { |
312 | struct msm_gem_object *msm_obj = submit->bos[i].obj; | 311 | struct msm_gem_object *msm_obj = submit->bos[i].obj; |
313 | submit_unlock_unpin_bo(submit, i); | 312 | submit_unlock_unpin_bo(submit, i); |
314 | list_del_init(&msm_obj->submit_entry); | 313 | list_del_init(&msm_obj->submit_entry); |
315 | drm_gem_object_unreference(&msm_obj->base); | 314 | drm_gem_object_unreference(&msm_obj->base); |
316 | } | 315 | } |
317 | mutex_unlock(&submit->dev->struct_mutex); | ||
318 | 316 | ||
319 | ww_acquire_fini(&submit->ticket); | 317 | ww_acquire_fini(&submit->ticket); |
320 | kfree(submit); | 318 | kfree(submit); |
@@ -342,6 +340,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
342 | if (args->nr_cmds > MAX_CMDS) | 340 | if (args->nr_cmds > MAX_CMDS) |
343 | return -EINVAL; | 341 | return -EINVAL; |
344 | 342 | ||
343 | mutex_lock(&dev->struct_mutex); | ||
344 | |||
345 | submit = submit_create(dev, gpu, args->nr_bos); | 345 | submit = submit_create(dev, gpu, args->nr_bos); |
346 | if (!submit) { | 346 | if (!submit) { |
347 | ret = -ENOMEM; | 347 | ret = -ENOMEM; |
@@ -410,5 +410,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
410 | out: | 410 | out: |
411 | if (submit) | 411 | if (submit) |
412 | submit_cleanup(submit, !!ret); | 412 | submit_cleanup(submit, !!ret); |
413 | mutex_unlock(&dev->struct_mutex); | ||
413 | return ret; | 414 | return ret; |
414 | } | 415 | } |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 4ebce8be489d..0cfe3f426ee4 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
@@ -298,8 +298,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, | |||
298 | struct msm_drm_private *priv = dev->dev_private; | 298 | struct msm_drm_private *priv = dev->dev_private; |
299 | int i, ret; | 299 | int i, ret; |
300 | 300 | ||
301 | mutex_lock(&dev->struct_mutex); | ||
302 | |||
303 | submit->fence = ++priv->next_fence; | 301 | submit->fence = ++priv->next_fence; |
304 | 302 | ||
305 | gpu->submitted_fence = submit->fence; | 303 | gpu->submitted_fence = submit->fence; |
@@ -331,7 +329,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, | |||
331 | msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); | 329 | msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); |
332 | } | 330 | } |
333 | hangcheck_timer_reset(gpu); | 331 | hangcheck_timer_reset(gpu); |
334 | mutex_unlock(&dev->struct_mutex); | ||
335 | 332 | ||
336 | return ret; | 333 | return ret; |
337 | } | 334 | } |
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 0fbd36f3d4e9..ea103ccdf4bd 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include "cypress_dpm.h" | 29 | #include "cypress_dpm.h" |
30 | #include "btc_dpm.h" | 30 | #include "btc_dpm.h" |
31 | #include "atom.h" | 31 | #include "atom.h" |
32 | #include <linux/seq_file.h> | ||
32 | 33 | ||
33 | #define MC_CG_ARB_FREQ_F0 0x0a | 34 | #define MC_CG_ARB_FREQ_F0 0x0a |
34 | #define MC_CG_ARB_FREQ_F1 0x0b | 35 | #define MC_CG_ARB_FREQ_F1 0x0b |
@@ -2756,6 +2757,37 @@ void btc_dpm_fini(struct radeon_device *rdev) | |||
2756 | r600_free_extended_power_table(rdev); | 2757 | r600_free_extended_power_table(rdev); |
2757 | } | 2758 | } |
2758 | 2759 | ||
2760 | void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | ||
2761 | struct seq_file *m) | ||
2762 | { | ||
2763 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); | ||
2764 | struct radeon_ps *rps = &eg_pi->current_rps; | ||
2765 | struct rv7xx_ps *ps = rv770_get_ps(rps); | ||
2766 | struct rv7xx_pl *pl; | ||
2767 | u32 current_index = | ||
2768 | (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> | ||
2769 | CURRENT_PROFILE_INDEX_SHIFT; | ||
2770 | |||
2771 | if (current_index > 2) { | ||
2772 | seq_printf(m, "invalid dpm profile %d\n", current_index); | ||
2773 | } else { | ||
2774 | if (current_index == 0) | ||
2775 | pl = &ps->low; | ||
2776 | else if (current_index == 1) | ||
2777 | pl = &ps->medium; | ||
2778 | else /* current_index == 2 */ | ||
2779 | pl = &ps->high; | ||
2780 | seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); | ||
2781 | if (rdev->family >= CHIP_CEDAR) { | ||
2782 | seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", | ||
2783 | current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci); | ||
2784 | } else { | ||
2785 | seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n", | ||
2786 | current_index, pl->sclk, pl->mclk, pl->vddc); | ||
2787 | } | ||
2788 | } | ||
2789 | } | ||
2790 | |||
2759 | u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low) | 2791 | u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low) |
2760 | { | 2792 | { |
2761 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); | 2793 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
diff --git a/drivers/gpu/drm/radeon/btcd.h b/drivers/gpu/drm/radeon/btcd.h index 29e32de7e025..9c65be2d55a9 100644 --- a/drivers/gpu/drm/radeon/btcd.h +++ b/drivers/gpu/drm/radeon/btcd.h | |||
@@ -44,6 +44,10 @@ | |||
44 | # define DYN_SPREAD_SPECTRUM_EN (1 << 23) | 44 | # define DYN_SPREAD_SPECTRUM_EN (1 << 23) |
45 | # define AC_DC_SW (1 << 24) | 45 | # define AC_DC_SW (1 << 24) |
46 | 46 | ||
47 | #define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c | ||
48 | # define CURRENT_PROFILE_INDEX_MASK (0xf << 4) | ||
49 | # define CURRENT_PROFILE_INDEX_SHIFT 4 | ||
50 | |||
47 | #define CG_BIF_REQ_AND_RSP 0x7f4 | 51 | #define CG_BIF_REQ_AND_RSP 0x7f4 |
48 | #define CG_CLIENT_REQ(x) ((x) << 0) | 52 | #define CG_CLIENT_REQ(x) ((x) << 0) |
49 | #define CG_CLIENT_REQ_MASK (0xff << 0) | 53 | #define CG_CLIENT_REQ_MASK (0xff << 0) |
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index b6e01d5d2cce..351db361239d 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c | |||
@@ -1223,7 +1223,7 @@ int kv_dpm_enable(struct radeon_device *rdev) | |||
1223 | 1223 | ||
1224 | int kv_dpm_late_enable(struct radeon_device *rdev) | 1224 | int kv_dpm_late_enable(struct radeon_device *rdev) |
1225 | { | 1225 | { |
1226 | int ret; | 1226 | int ret = 0; |
1227 | 1227 | ||
1228 | if (rdev->irq.installed && | 1228 | if (rdev->irq.installed && |
1229 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { | 1229 | r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index c351226ecb31..1217fbcbdcca 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c | |||
@@ -3945,7 +3945,6 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev, | |||
3945 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); | 3945 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
3946 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); | 3946 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
3947 | struct ni_ps *ps = ni_get_ps(rps); | 3947 | struct ni_ps *ps = ni_get_ps(rps); |
3948 | u16 vddc; | ||
3949 | struct rv7xx_pl *pl = &ps->performance_levels[index]; | 3948 | struct rv7xx_pl *pl = &ps->performance_levels[index]; |
3950 | 3949 | ||
3951 | ps->performance_level_count = index + 1; | 3950 | ps->performance_level_count = index + 1; |
@@ -3961,8 +3960,8 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev, | |||
3961 | 3960 | ||
3962 | /* patch up vddc if necessary */ | 3961 | /* patch up vddc if necessary */ |
3963 | if (pl->vddc == 0xff01) { | 3962 | if (pl->vddc == 0xff01) { |
3964 | if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0) | 3963 | if (pi->max_vddc) |
3965 | pl->vddc = vddc; | 3964 | pl->vddc = pi->max_vddc; |
3966 | } | 3965 | } |
3967 | 3966 | ||
3968 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { | 3967 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { |
@@ -4322,7 +4321,8 @@ void ni_dpm_print_power_state(struct radeon_device *rdev, | |||
4322 | void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | 4321 | void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, |
4323 | struct seq_file *m) | 4322 | struct seq_file *m) |
4324 | { | 4323 | { |
4325 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | 4324 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
4325 | struct radeon_ps *rps = &eg_pi->current_rps; | ||
4326 | struct ni_ps *ps = ni_get_ps(rps); | 4326 | struct ni_ps *ps = ni_get_ps(rps); |
4327 | struct rv7xx_pl *pl; | 4327 | struct rv7xx_pl *pl; |
4328 | u32 current_index = | 4328 | u32 current_index = |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 56140b4e5bb2..cdbc4171fe73 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -3991,6 +3991,10 @@ restart_ih: | |||
3991 | break; | 3991 | break; |
3992 | } | 3992 | } |
3993 | break; | 3993 | break; |
3994 | case 124: /* UVD */ | ||
3995 | DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); | ||
3996 | radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); | ||
3997 | break; | ||
3994 | case 176: /* CP_INT in ring buffer */ | 3998 | case 176: /* CP_INT in ring buffer */ |
3995 | case 177: /* CP_INT in IB1 */ | 3999 | case 177: /* CP_INT in IB1 */ |
3996 | case 178: /* CP_INT in IB2 */ | 4000 | case 178: /* CP_INT in IB2 */ |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index f74db43346fd..dda02bfc10a4 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -1555,7 +1555,7 @@ static struct radeon_asic btc_asic = { | |||
1555 | .get_sclk = &btc_dpm_get_sclk, | 1555 | .get_sclk = &btc_dpm_get_sclk, |
1556 | .get_mclk = &btc_dpm_get_mclk, | 1556 | .get_mclk = &btc_dpm_get_mclk, |
1557 | .print_power_state = &rv770_dpm_print_power_state, | 1557 | .print_power_state = &rv770_dpm_print_power_state, |
1558 | .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level, | 1558 | .debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level, |
1559 | .force_performance_level = &rv770_dpm_force_performance_level, | 1559 | .force_performance_level = &rv770_dpm_force_performance_level, |
1560 | .vblank_too_short = &btc_dpm_vblank_too_short, | 1560 | .vblank_too_short = &btc_dpm_vblank_too_short, |
1561 | }, | 1561 | }, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index b3bc433eed4c..ae637cfda783 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -551,6 +551,8 @@ void btc_dpm_fini(struct radeon_device *rdev); | |||
551 | u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low); | 551 | u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low); |
552 | u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low); | 552 | u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low); |
553 | bool btc_dpm_vblank_too_short(struct radeon_device *rdev); | 553 | bool btc_dpm_vblank_too_short(struct radeon_device *rdev); |
554 | void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | ||
555 | struct seq_file *m); | ||
554 | int sumo_dpm_init(struct radeon_device *rdev); | 556 | int sumo_dpm_init(struct radeon_device *rdev); |
555 | int sumo_dpm_enable(struct radeon_device *rdev); | 557 | int sumo_dpm_enable(struct radeon_device *rdev); |
556 | int sumo_dpm_late_enable(struct radeon_device *rdev); | 558 | int sumo_dpm_late_enable(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 80c595aba359..5b2ea8ac0731 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c | |||
@@ -2174,7 +2174,6 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev, | |||
2174 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); | 2174 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
2175 | struct rv7xx_ps *ps = rv770_get_ps(rps); | 2175 | struct rv7xx_ps *ps = rv770_get_ps(rps); |
2176 | u32 sclk, mclk; | 2176 | u32 sclk, mclk; |
2177 | u16 vddc; | ||
2178 | struct rv7xx_pl *pl; | 2177 | struct rv7xx_pl *pl; |
2179 | 2178 | ||
2180 | switch (index) { | 2179 | switch (index) { |
@@ -2214,8 +2213,8 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev, | |||
2214 | 2213 | ||
2215 | /* patch up vddc if necessary */ | 2214 | /* patch up vddc if necessary */ |
2216 | if (pl->vddc == 0xff01) { | 2215 | if (pl->vddc == 0xff01) { |
2217 | if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0) | 2216 | if (pi->max_vddc) |
2218 | pl->vddc = vddc; | 2217 | pl->vddc = pi->max_vddc; |
2219 | } | 2218 | } |
2220 | 2219 | ||
2221 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { | 2220 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 09ec4f6c53bb..83578324e5d1 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -6338,6 +6338,10 @@ restart_ih: | |||
6338 | break; | 6338 | break; |
6339 | } | 6339 | } |
6340 | break; | 6340 | break; |
6341 | case 124: /* UVD */ | ||
6342 | DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); | ||
6343 | radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); | ||
6344 | break; | ||
6341 | case 146: | 6345 | case 146: |
6342 | case 147: | 6346 | case 147: |
6343 | addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); | 6347 | addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 0471501338fb..eafb0e6bc67e 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
@@ -6472,7 +6472,8 @@ void si_dpm_fini(struct radeon_device *rdev) | |||
6472 | void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | 6472 | void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, |
6473 | struct seq_file *m) | 6473 | struct seq_file *m) |
6474 | { | 6474 | { |
6475 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | 6475 | struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
6476 | struct radeon_ps *rps = &eg_pi->current_rps; | ||
6476 | struct ni_ps *ps = ni_get_ps(rps); | 6477 | struct ni_ps *ps = ni_get_ps(rps); |
6477 | struct rv7xx_pl *pl; | 6478 | struct rv7xx_pl *pl; |
6478 | u32 current_index = | 6479 | u32 current_index = |
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index f121efe12dc5..8b47b3cd0357 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c | |||
@@ -1807,7 +1807,7 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev | |||
1807 | struct seq_file *m) | 1807 | struct seq_file *m) |
1808 | { | 1808 | { |
1809 | struct sumo_power_info *pi = sumo_get_pi(rdev); | 1809 | struct sumo_power_info *pi = sumo_get_pi(rdev); |
1810 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | 1810 | struct radeon_ps *rps = &pi->current_rps; |
1811 | struct sumo_ps *ps = sumo_get_ps(rps); | 1811 | struct sumo_ps *ps = sumo_get_ps(rps); |
1812 | struct sumo_pl *pl; | 1812 | struct sumo_pl *pl; |
1813 | u32 current_index = | 1813 | u32 current_index = |
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 2d447192d6f7..2da0e17eb960 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c | |||
@@ -1926,7 +1926,8 @@ void trinity_dpm_print_power_state(struct radeon_device *rdev, | |||
1926 | void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, | 1926 | void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, |
1927 | struct seq_file *m) | 1927 | struct seq_file *m) |
1928 | { | 1928 | { |
1929 | struct radeon_ps *rps = rdev->pm.dpm.current_ps; | 1929 | struct trinity_power_info *pi = trinity_get_pi(rdev); |
1930 | struct radeon_ps *rps = &pi->current_rps; | ||
1930 | struct trinity_ps *ps = trinity_get_ps(rps); | 1931 | struct trinity_ps *ps = trinity_get_ps(rps); |
1931 | struct trinity_pl *pl; | 1932 | struct trinity_pl *pl; |
1932 | u32 current_index = | 1933 | u32 current_index = |
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c index 824550db3fed..d1771004cb52 100644 --- a/drivers/gpu/drm/radeon/uvd_v2_2.c +++ b/drivers/gpu/drm/radeon/uvd_v2_2.c | |||
@@ -57,7 +57,6 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev, | |||
57 | radeon_ring_write(ring, 0); | 57 | radeon_ring_write(ring, 0); |
58 | radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); | 58 | radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); |
59 | radeon_ring_write(ring, 2); | 59 | radeon_ring_write(ring, 2); |
60 | return; | ||
61 | } | 60 | } |
62 | 61 | ||
63 | /** | 62 | /** |
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index af6edf9b1936..f2d7bf90c9fe 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c | |||
@@ -67,7 +67,6 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, | |||
67 | int ret = 0; | 67 | int ret = 0; |
68 | struct vmbus_channel_initiate_contact *msg; | 68 | struct vmbus_channel_initiate_contact *msg; |
69 | unsigned long flags; | 69 | unsigned long flags; |
70 | int t; | ||
71 | 70 | ||
72 | init_completion(&msginfo->waitevent); | 71 | init_completion(&msginfo->waitevent); |
73 | 72 | ||
@@ -78,6 +77,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, | |||
78 | msg->interrupt_page = virt_to_phys(vmbus_connection.int_page); | 77 | msg->interrupt_page = virt_to_phys(vmbus_connection.int_page); |
79 | msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]); | 78 | msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]); |
80 | msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]); | 79 | msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]); |
80 | if (version == VERSION_WIN8) | ||
81 | msg->target_vcpu = hv_context.vp_index[smp_processor_id()]; | ||
81 | 82 | ||
82 | /* | 83 | /* |
83 | * Add to list before we send the request since we may | 84 | * Add to list before we send the request since we may |
@@ -100,15 +101,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, | |||
100 | } | 101 | } |
101 | 102 | ||
102 | /* Wait for the connection response */ | 103 | /* Wait for the connection response */ |
103 | t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ); | 104 | wait_for_completion(&msginfo->waitevent); |
104 | if (t == 0) { | ||
105 | spin_lock_irqsave(&vmbus_connection.channelmsg_lock, | ||
106 | flags); | ||
107 | list_del(&msginfo->msglistentry); | ||
108 | spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, | ||
109 | flags); | ||
110 | return -ETIMEDOUT; | ||
111 | } | ||
112 | 105 | ||
113 | spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); | 106 | spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); |
114 | list_del(&msginfo->msglistentry); | 107 | list_del(&msginfo->msglistentry); |
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index 8c23203915af..8a17f01e8672 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c | |||
@@ -145,7 +145,7 @@ struct ntc_data { | |||
145 | static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) | 145 | static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) |
146 | { | 146 | { |
147 | struct iio_channel *channel = pdata->chan; | 147 | struct iio_channel *channel = pdata->chan; |
148 | unsigned int result; | 148 | s64 result; |
149 | int val, ret; | 149 | int val, ret; |
150 | 150 | ||
151 | ret = iio_read_channel_raw(channel, &val); | 151 | ret = iio_read_channel_raw(channel, &val); |
@@ -155,10 +155,10 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) | |||
155 | } | 155 | } |
156 | 156 | ||
157 | /* unit: mV */ | 157 | /* unit: mV */ |
158 | result = pdata->pullup_uv * val; | 158 | result = pdata->pullup_uv * (s64) val; |
159 | result >>= 12; | 159 | result >>= 12; |
160 | 160 | ||
161 | return result; | 161 | return (int)result; |
162 | } | 162 | } |
163 | 163 | ||
164 | static const struct of_device_id ntc_match[] = { | 164 | static const struct of_device_id ntc_match[] = { |
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index b8c5187b9ee0..d52d84937ad3 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c | |||
@@ -97,7 +97,6 @@ enum { | |||
97 | enum { | 97 | enum { |
98 | MV64XXX_I2C_ACTION_INVALID, | 98 | MV64XXX_I2C_ACTION_INVALID, |
99 | MV64XXX_I2C_ACTION_CONTINUE, | 99 | MV64XXX_I2C_ACTION_CONTINUE, |
100 | MV64XXX_I2C_ACTION_OFFLOAD_SEND_START, | ||
101 | MV64XXX_I2C_ACTION_SEND_START, | 100 | MV64XXX_I2C_ACTION_SEND_START, |
102 | MV64XXX_I2C_ACTION_SEND_RESTART, | 101 | MV64XXX_I2C_ACTION_SEND_RESTART, |
103 | MV64XXX_I2C_ACTION_OFFLOAD_RESTART, | 102 | MV64XXX_I2C_ACTION_OFFLOAD_RESTART, |
@@ -204,6 +203,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data) | |||
204 | unsigned long ctrl_reg; | 203 | unsigned long ctrl_reg; |
205 | struct i2c_msg *msg = drv_data->msgs; | 204 | struct i2c_msg *msg = drv_data->msgs; |
206 | 205 | ||
206 | if (!drv_data->offload_enabled) | ||
207 | return -EOPNOTSUPP; | ||
208 | |||
207 | drv_data->msg = msg; | 209 | drv_data->msg = msg; |
208 | drv_data->byte_posn = 0; | 210 | drv_data->byte_posn = 0; |
209 | drv_data->bytes_left = msg->len; | 211 | drv_data->bytes_left = msg->len; |
@@ -433,8 +435,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) | |||
433 | 435 | ||
434 | drv_data->msgs++; | 436 | drv_data->msgs++; |
435 | drv_data->num_msgs--; | 437 | drv_data->num_msgs--; |
436 | if (!(drv_data->offload_enabled && | 438 | if (mv64xxx_i2c_offload_msg(drv_data) < 0) { |
437 | mv64xxx_i2c_offload_msg(drv_data))) { | ||
438 | drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START; | 439 | drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START; |
439 | writel(drv_data->cntl_bits, | 440 | writel(drv_data->cntl_bits, |
440 | drv_data->reg_base + drv_data->reg_offsets.control); | 441 | drv_data->reg_base + drv_data->reg_offsets.control); |
@@ -458,15 +459,14 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) | |||
458 | drv_data->reg_base + drv_data->reg_offsets.control); | 459 | drv_data->reg_base + drv_data->reg_offsets.control); |
459 | break; | 460 | break; |
460 | 461 | ||
461 | case MV64XXX_I2C_ACTION_OFFLOAD_SEND_START: | ||
462 | if (!mv64xxx_i2c_offload_msg(drv_data)) | ||
463 | break; | ||
464 | else | ||
465 | drv_data->action = MV64XXX_I2C_ACTION_SEND_START; | ||
466 | /* FALLTHRU */ | ||
467 | case MV64XXX_I2C_ACTION_SEND_START: | 462 | case MV64XXX_I2C_ACTION_SEND_START: |
468 | writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START, | 463 | /* Can we offload this msg ? */ |
469 | drv_data->reg_base + drv_data->reg_offsets.control); | 464 | if (mv64xxx_i2c_offload_msg(drv_data) < 0) { |
465 | /* No, switch to standard path */ | ||
466 | mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs); | ||
467 | writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START, | ||
468 | drv_data->reg_base + drv_data->reg_offsets.control); | ||
469 | } | ||
470 | break; | 470 | break; |
471 | 471 | ||
472 | case MV64XXX_I2C_ACTION_SEND_ADDR_1: | 472 | case MV64XXX_I2C_ACTION_SEND_ADDR_1: |
@@ -625,15 +625,10 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg, | |||
625 | unsigned long flags; | 625 | unsigned long flags; |
626 | 626 | ||
627 | spin_lock_irqsave(&drv_data->lock, flags); | 627 | spin_lock_irqsave(&drv_data->lock, flags); |
628 | if (drv_data->offload_enabled) { | ||
629 | drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_START; | ||
630 | drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND; | ||
631 | } else { | ||
632 | mv64xxx_i2c_prepare_for_io(drv_data, msg); | ||
633 | 628 | ||
634 | drv_data->action = MV64XXX_I2C_ACTION_SEND_START; | 629 | drv_data->action = MV64XXX_I2C_ACTION_SEND_START; |
635 | drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND; | 630 | drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND; |
636 | } | 631 | |
637 | drv_data->send_stop = is_last; | 632 | drv_data->send_stop = is_last; |
638 | drv_data->block = 1; | 633 | drv_data->block = 1; |
639 | mv64xxx_i2c_do_action(drv_data); | 634 | mv64xxx_i2c_do_action(drv_data); |
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c index 3bec9220df04..bfec313492b3 100644 --- a/drivers/iio/accel/bma180.c +++ b/drivers/iio/accel/bma180.c | |||
@@ -447,14 +447,14 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = { | |||
447 | { }, | 447 | { }, |
448 | }; | 448 | }; |
449 | 449 | ||
450 | #define BMA180_CHANNEL(_index) { \ | 450 | #define BMA180_CHANNEL(_axis) { \ |
451 | .type = IIO_ACCEL, \ | 451 | .type = IIO_ACCEL, \ |
452 | .indexed = 1, \ | 452 | .modified = 1, \ |
453 | .channel = (_index), \ | 453 | .channel2 = IIO_MOD_##_axis, \ |
454 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ | 454 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ |
455 | BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \ | 455 | BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \ |
456 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ | 456 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ |
457 | .scan_index = (_index), \ | 457 | .scan_index = AXIS_##_axis, \ |
458 | .scan_type = { \ | 458 | .scan_type = { \ |
459 | .sign = 's', \ | 459 | .sign = 's', \ |
460 | .realbits = 14, \ | 460 | .realbits = 14, \ |
@@ -465,10 +465,10 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = { | |||
465 | } | 465 | } |
466 | 466 | ||
467 | static const struct iio_chan_spec bma180_channels[] = { | 467 | static const struct iio_chan_spec bma180_channels[] = { |
468 | BMA180_CHANNEL(AXIS_X), | 468 | BMA180_CHANNEL(X), |
469 | BMA180_CHANNEL(AXIS_Y), | 469 | BMA180_CHANNEL(Y), |
470 | BMA180_CHANNEL(AXIS_Z), | 470 | BMA180_CHANNEL(Z), |
471 | IIO_CHAN_SOFT_TIMESTAMP(4), | 471 | IIO_CHAN_SOFT_TIMESTAMP(3), |
472 | }; | 472 | }; |
473 | 473 | ||
474 | static irqreturn_t bma180_trigger_handler(int irq, void *p) | 474 | static irqreturn_t bma180_trigger_handler(int irq, void *p) |
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c index e283f2f2ee2f..360259266d4f 100644 --- a/drivers/iio/adc/max1363.c +++ b/drivers/iio/adc/max1363.c | |||
@@ -1560,7 +1560,7 @@ static int max1363_probe(struct i2c_client *client, | |||
1560 | st->client = client; | 1560 | st->client = client; |
1561 | 1561 | ||
1562 | st->vref_uv = st->chip_info->int_vref_mv * 1000; | 1562 | st->vref_uv = st->chip_info->int_vref_mv * 1000; |
1563 | vref = devm_regulator_get(&client->dev, "vref"); | 1563 | vref = devm_regulator_get_optional(&client->dev, "vref"); |
1564 | if (!IS_ERR(vref)) { | 1564 | if (!IS_ERR(vref)) { |
1565 | int vref_uv; | 1565 | int vref_uv; |
1566 | 1566 | ||
diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h index 2f8f9d632386..0916bf6b6c31 100644 --- a/drivers/iio/imu/adis16400.h +++ b/drivers/iio/imu/adis16400.h | |||
@@ -189,6 +189,7 @@ enum { | |||
189 | ADIS16300_SCAN_INCLI_X, | 189 | ADIS16300_SCAN_INCLI_X, |
190 | ADIS16300_SCAN_INCLI_Y, | 190 | ADIS16300_SCAN_INCLI_Y, |
191 | ADIS16400_SCAN_ADC, | 191 | ADIS16400_SCAN_ADC, |
192 | ADIS16400_SCAN_TIMESTAMP, | ||
192 | }; | 193 | }; |
193 | 194 | ||
194 | #ifdef CONFIG_IIO_BUFFER | 195 | #ifdef CONFIG_IIO_BUFFER |
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c index 368660dfe135..7c582f7ae34e 100644 --- a/drivers/iio/imu/adis16400_core.c +++ b/drivers/iio/imu/adis16400_core.c | |||
@@ -632,7 +632,7 @@ static const struct iio_chan_spec adis16400_channels[] = { | |||
632 | ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14), | 632 | ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14), |
633 | ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12), | 633 | ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12), |
634 | ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12), | 634 | ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12), |
635 | IIO_CHAN_SOFT_TIMESTAMP(12) | 635 | IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), |
636 | }; | 636 | }; |
637 | 637 | ||
638 | static const struct iio_chan_spec adis16448_channels[] = { | 638 | static const struct iio_chan_spec adis16448_channels[] = { |
@@ -659,7 +659,7 @@ static const struct iio_chan_spec adis16448_channels[] = { | |||
659 | }, | 659 | }, |
660 | }, | 660 | }, |
661 | ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), | 661 | ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), |
662 | IIO_CHAN_SOFT_TIMESTAMP(11) | 662 | IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), |
663 | }; | 663 | }; |
664 | 664 | ||
665 | static const struct iio_chan_spec adis16350_channels[] = { | 665 | static const struct iio_chan_spec adis16350_channels[] = { |
@@ -677,7 +677,7 @@ static const struct iio_chan_spec adis16350_channels[] = { | |||
677 | ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12), | 677 | ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12), |
678 | ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12), | 678 | ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12), |
679 | ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12), | 679 | ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12), |
680 | IIO_CHAN_SOFT_TIMESTAMP(11) | 680 | IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), |
681 | }; | 681 | }; |
682 | 682 | ||
683 | static const struct iio_chan_spec adis16300_channels[] = { | 683 | static const struct iio_chan_spec adis16300_channels[] = { |
@@ -690,7 +690,7 @@ static const struct iio_chan_spec adis16300_channels[] = { | |||
690 | ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12), | 690 | ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12), |
691 | ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13), | 691 | ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13), |
692 | ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13), | 692 | ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13), |
693 | IIO_CHAN_SOFT_TIMESTAMP(14) | 693 | IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), |
694 | }; | 694 | }; |
695 | 695 | ||
696 | static const struct iio_chan_spec adis16334_channels[] = { | 696 | static const struct iio_chan_spec adis16334_channels[] = { |
@@ -701,7 +701,7 @@ static const struct iio_chan_spec adis16334_channels[] = { | |||
701 | ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14), | 701 | ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14), |
702 | ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14), | 702 | ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14), |
703 | ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12), | 703 | ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12), |
704 | IIO_CHAN_SOFT_TIMESTAMP(8) | 704 | IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), |
705 | }; | 705 | }; |
706 | 706 | ||
707 | static struct attribute *adis16400_attributes[] = { | 707 | static struct attribute *adis16400_attributes[] = { |
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c index 3d8110157f2d..94daa9fc1247 100644 --- a/drivers/iio/light/tsl2563.c +++ b/drivers/iio/light/tsl2563.c | |||
@@ -460,10 +460,14 @@ static int tsl2563_write_raw(struct iio_dev *indio_dev, | |||
460 | { | 460 | { |
461 | struct tsl2563_chip *chip = iio_priv(indio_dev); | 461 | struct tsl2563_chip *chip = iio_priv(indio_dev); |
462 | 462 | ||
463 | if (chan->channel == IIO_MOD_LIGHT_BOTH) | 463 | if (mask != IIO_CHAN_INFO_CALIBSCALE) |
464 | return -EINVAL; | ||
465 | if (chan->channel2 == IIO_MOD_LIGHT_BOTH) | ||
464 | chip->calib0 = calib_from_sysfs(val); | 466 | chip->calib0 = calib_from_sysfs(val); |
465 | else | 467 | else if (chan->channel2 == IIO_MOD_LIGHT_IR) |
466 | chip->calib1 = calib_from_sysfs(val); | 468 | chip->calib1 = calib_from_sysfs(val); |
469 | else | ||
470 | return -EINVAL; | ||
467 | 471 | ||
468 | return 0; | 472 | return 0; |
469 | } | 473 | } |
@@ -472,14 +476,14 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev, | |||
472 | struct iio_chan_spec const *chan, | 476 | struct iio_chan_spec const *chan, |
473 | int *val, | 477 | int *val, |
474 | int *val2, | 478 | int *val2, |
475 | long m) | 479 | long mask) |
476 | { | 480 | { |
477 | int ret = -EINVAL; | 481 | int ret = -EINVAL; |
478 | u32 calib0, calib1; | 482 | u32 calib0, calib1; |
479 | struct tsl2563_chip *chip = iio_priv(indio_dev); | 483 | struct tsl2563_chip *chip = iio_priv(indio_dev); |
480 | 484 | ||
481 | mutex_lock(&chip->lock); | 485 | mutex_lock(&chip->lock); |
482 | switch (m) { | 486 | switch (mask) { |
483 | case IIO_CHAN_INFO_RAW: | 487 | case IIO_CHAN_INFO_RAW: |
484 | case IIO_CHAN_INFO_PROCESSED: | 488 | case IIO_CHAN_INFO_PROCESSED: |
485 | switch (chan->type) { | 489 | switch (chan->type) { |
@@ -498,7 +502,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev, | |||
498 | ret = tsl2563_get_adc(chip); | 502 | ret = tsl2563_get_adc(chip); |
499 | if (ret) | 503 | if (ret) |
500 | goto error_ret; | 504 | goto error_ret; |
501 | if (chan->channel == 0) | 505 | if (chan->channel2 == IIO_MOD_LIGHT_BOTH) |
502 | *val = chip->data0; | 506 | *val = chip->data0; |
503 | else | 507 | else |
504 | *val = chip->data1; | 508 | *val = chip->data1; |
@@ -510,7 +514,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev, | |||
510 | break; | 514 | break; |
511 | 515 | ||
512 | case IIO_CHAN_INFO_CALIBSCALE: | 516 | case IIO_CHAN_INFO_CALIBSCALE: |
513 | if (chan->channel == 0) | 517 | if (chan->channel2 == IIO_MOD_LIGHT_BOTH) |
514 | *val = calib_to_sysfs(chip->calib0); | 518 | *val = calib_to_sysfs(chip->calib0); |
515 | else | 519 | else |
516 | *val = calib_to_sysfs(chip->calib1); | 520 | *val = calib_to_sysfs(chip->calib1); |
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c index ff284e5afd95..05423543f89d 100644 --- a/drivers/iio/magnetometer/ak8975.c +++ b/drivers/iio/magnetometer/ak8975.c | |||
@@ -85,6 +85,7 @@ | |||
85 | #define AK8975_MAX_CONVERSION_TIMEOUT 500 | 85 | #define AK8975_MAX_CONVERSION_TIMEOUT 500 |
86 | #define AK8975_CONVERSION_DONE_POLL_TIME 10 | 86 | #define AK8975_CONVERSION_DONE_POLL_TIME 10 |
87 | #define AK8975_DATA_READY_TIMEOUT ((100*HZ)/1000) | 87 | #define AK8975_DATA_READY_TIMEOUT ((100*HZ)/1000) |
88 | #define RAW_TO_GAUSS(asa) ((((asa) + 128) * 3000) / 256) | ||
88 | 89 | ||
89 | /* | 90 | /* |
90 | * Per-instance context data for the device. | 91 | * Per-instance context data for the device. |
@@ -265,15 +266,15 @@ static int ak8975_setup(struct i2c_client *client) | |||
265 | * | 266 | * |
266 | * Since 1uT = 0.01 gauss, our final scale factor becomes: | 267 | * Since 1uT = 0.01 gauss, our final scale factor becomes: |
267 | * | 268 | * |
268 | * Hadj = H * ((ASA + 128) / 256) * 3/10 * 100 | 269 | * Hadj = H * ((ASA + 128) / 256) * 3/10 * 1/100 |
269 | * Hadj = H * ((ASA + 128) * 30 / 256 | 270 | * Hadj = H * ((ASA + 128) * 0.003) / 256 |
270 | * | 271 | * |
271 | * Since ASA doesn't change, we cache the resultant scale factor into the | 272 | * Since ASA doesn't change, we cache the resultant scale factor into the |
272 | * device context in ak8975_setup(). | 273 | * device context in ak8975_setup(). |
273 | */ | 274 | */ |
274 | data->raw_to_gauss[0] = ((data->asa[0] + 128) * 30) >> 8; | 275 | data->raw_to_gauss[0] = RAW_TO_GAUSS(data->asa[0]); |
275 | data->raw_to_gauss[1] = ((data->asa[1] + 128) * 30) >> 8; | 276 | data->raw_to_gauss[1] = RAW_TO_GAUSS(data->asa[1]); |
276 | data->raw_to_gauss[2] = ((data->asa[2] + 128) * 30) >> 8; | 277 | data->raw_to_gauss[2] = RAW_TO_GAUSS(data->asa[2]); |
277 | 278 | ||
278 | return 0; | 279 | return 0; |
279 | } | 280 | } |
@@ -428,8 +429,9 @@ static int ak8975_read_raw(struct iio_dev *indio_dev, | |||
428 | case IIO_CHAN_INFO_RAW: | 429 | case IIO_CHAN_INFO_RAW: |
429 | return ak8975_read_axis(indio_dev, chan->address, val); | 430 | return ak8975_read_axis(indio_dev, chan->address, val); |
430 | case IIO_CHAN_INFO_SCALE: | 431 | case IIO_CHAN_INFO_SCALE: |
431 | *val = data->raw_to_gauss[chan->address]; | 432 | *val = 0; |
432 | return IIO_VAL_INT; | 433 | *val2 = data->raw_to_gauss[chan->address]; |
434 | return IIO_VAL_INT_PLUS_MICRO; | ||
433 | } | 435 | } |
434 | return -EINVAL; | 436 | return -EINVAL; |
435 | } | 437 | } |
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c index 4b65b6d3bdb1..f66955fb3509 100644 --- a/drivers/iio/magnetometer/mag3110.c +++ b/drivers/iio/magnetometer/mag3110.c | |||
@@ -106,7 +106,7 @@ static ssize_t mag3110_show_int_plus_micros(char *buf, | |||
106 | 106 | ||
107 | while (n-- > 0) | 107 | while (n-- > 0) |
108 | len += scnprintf(buf + len, PAGE_SIZE - len, | 108 | len += scnprintf(buf + len, PAGE_SIZE - len, |
109 | "%d.%d ", vals[n][0], vals[n][1]); | 109 | "%d.%06d ", vals[n][0], vals[n][1]); |
110 | 110 | ||
111 | /* replace trailing space by newline */ | 111 | /* replace trailing space by newline */ |
112 | buf[len - 1] = '\n'; | 112 | buf[len - 1] = '\n'; |
@@ -154,6 +154,9 @@ static int mag3110_read_raw(struct iio_dev *indio_dev, | |||
154 | 154 | ||
155 | switch (mask) { | 155 | switch (mask) { |
156 | case IIO_CHAN_INFO_RAW: | 156 | case IIO_CHAN_INFO_RAW: |
157 | if (iio_buffer_enabled(indio_dev)) | ||
158 | return -EBUSY; | ||
159 | |||
157 | switch (chan->type) { | 160 | switch (chan->type) { |
158 | case IIO_MAGN: /* in 0.1 uT / LSB */ | 161 | case IIO_MAGN: /* in 0.1 uT / LSB */ |
159 | ret = mag3110_read(data, buffer); | 162 | ret = mag3110_read(data, buffer); |
@@ -199,6 +202,9 @@ static int mag3110_write_raw(struct iio_dev *indio_dev, | |||
199 | struct mag3110_data *data = iio_priv(indio_dev); | 202 | struct mag3110_data *data = iio_priv(indio_dev); |
200 | int rate; | 203 | int rate; |
201 | 204 | ||
205 | if (iio_buffer_enabled(indio_dev)) | ||
206 | return -EBUSY; | ||
207 | |||
202 | switch (mask) { | 208 | switch (mask) { |
203 | case IIO_CHAN_INFO_SAMP_FREQ: | 209 | case IIO_CHAN_INFO_SAMP_FREQ: |
204 | rate = mag3110_get_samp_freq_index(data, val, val2); | 210 | rate = mag3110_get_samp_freq_index(data, val, val2); |
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c index d53cf519f42a..00400c352c1a 100644 --- a/drivers/infiniband/hw/amso1100/c2.c +++ b/drivers/infiniband/hw/amso1100/c2.c | |||
@@ -1082,6 +1082,7 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) | |||
1082 | 1082 | ||
1083 | /* Initialize network device */ | 1083 | /* Initialize network device */ |
1084 | if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) { | 1084 | if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) { |
1085 | ret = -ENOMEM; | ||
1085 | iounmap(mmio_regs); | 1086 | iounmap(mmio_regs); |
1086 | goto bail4; | 1087 | goto bail4; |
1087 | } | 1088 | } |
@@ -1151,7 +1152,8 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) | |||
1151 | goto bail10; | 1152 | goto bail10; |
1152 | } | 1153 | } |
1153 | 1154 | ||
1154 | if (c2_register_device(c2dev)) | 1155 | ret = c2_register_device(c2dev); |
1156 | if (ret) | ||
1155 | goto bail10; | 1157 | goto bail10; |
1156 | 1158 | ||
1157 | return 0; | 1159 | return 0; |
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c index b7c986990053..d2a6d961344b 100644 --- a/drivers/infiniband/hw/amso1100/c2_rnic.c +++ b/drivers/infiniband/hw/amso1100/c2_rnic.c | |||
@@ -576,7 +576,8 @@ int c2_rnic_init(struct c2_dev *c2dev) | |||
576 | goto bail4; | 576 | goto bail4; |
577 | 577 | ||
578 | /* Initialize cached the adapter limits */ | 578 | /* Initialize cached the adapter limits */ |
579 | if (c2_rnic_query(c2dev, &c2dev->props)) | 579 | err = c2_rnic_query(c2dev, &c2dev->props); |
580 | if (err) | ||
580 | goto bail5; | 581 | goto bail5; |
581 | 582 | ||
582 | /* Initialize the PD pool */ | 583 | /* Initialize the PD pool */ |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 45126879ad28..d286bdebe2ab 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -3352,6 +3352,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) | |||
3352 | goto free_dst; | 3352 | goto free_dst; |
3353 | } | 3353 | } |
3354 | 3354 | ||
3355 | neigh_release(neigh); | ||
3355 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; | 3356 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; |
3356 | rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; | 3357 | rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; |
3357 | window = (__force u16) htons((__force u16)tcph->window); | 3358 | window = (__force u16) htons((__force u16)tcph->window); |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index c2702f549f10..e81c5547e647 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -347,7 +347,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
347 | props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? | 347 | props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? |
348 | IB_WIDTH_4X : IB_WIDTH_1X; | 348 | IB_WIDTH_4X : IB_WIDTH_1X; |
349 | props->active_speed = IB_SPEED_QDR; | 349 | props->active_speed = IB_SPEED_QDR; |
350 | props->port_cap_flags = IB_PORT_CM_SUP; | 350 | props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS; |
351 | props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; | 351 | props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; |
352 | props->max_msg_sz = mdev->dev->caps.max_msg_sz; | 352 | props->max_msg_sz = mdev->dev->caps.max_msg_sz; |
353 | props->pkey_tbl_len = 1; | 353 | props->pkey_tbl_len = 1; |
@@ -1357,6 +1357,21 @@ static struct device_attribute *mlx4_class_attributes[] = { | |||
1357 | &dev_attr_board_id | 1357 | &dev_attr_board_id |
1358 | }; | 1358 | }; |
1359 | 1359 | ||
1360 | static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, | ||
1361 | struct net_device *dev) | ||
1362 | { | ||
1363 | memcpy(eui, dev->dev_addr, 3); | ||
1364 | memcpy(eui + 5, dev->dev_addr + 3, 3); | ||
1365 | if (vlan_id < 0x1000) { | ||
1366 | eui[3] = vlan_id >> 8; | ||
1367 | eui[4] = vlan_id & 0xff; | ||
1368 | } else { | ||
1369 | eui[3] = 0xff; | ||
1370 | eui[4] = 0xfe; | ||
1371 | } | ||
1372 | eui[0] ^= 2; | ||
1373 | } | ||
1374 | |||
1360 | static void update_gids_task(struct work_struct *work) | 1375 | static void update_gids_task(struct work_struct *work) |
1361 | { | 1376 | { |
1362 | struct update_gid_work *gw = container_of(work, struct update_gid_work, work); | 1377 | struct update_gid_work *gw = container_of(work, struct update_gid_work, work); |
@@ -1393,7 +1408,6 @@ static void reset_gids_task(struct work_struct *work) | |||
1393 | struct mlx4_cmd_mailbox *mailbox; | 1408 | struct mlx4_cmd_mailbox *mailbox; |
1394 | union ib_gid *gids; | 1409 | union ib_gid *gids; |
1395 | int err; | 1410 | int err; |
1396 | int i; | ||
1397 | struct mlx4_dev *dev = gw->dev->dev; | 1411 | struct mlx4_dev *dev = gw->dev->dev; |
1398 | 1412 | ||
1399 | mailbox = mlx4_alloc_cmd_mailbox(dev); | 1413 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
@@ -1405,18 +1419,16 @@ static void reset_gids_task(struct work_struct *work) | |||
1405 | gids = mailbox->buf; | 1419 | gids = mailbox->buf; |
1406 | memcpy(gids, gw->gids, sizeof(gw->gids)); | 1420 | memcpy(gids, gw->gids, sizeof(gw->gids)); |
1407 | 1421 | ||
1408 | for (i = 1; i < gw->dev->num_ports + 1; i++) { | 1422 | if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) == |
1409 | if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, i) == | 1423 | IB_LINK_LAYER_ETHERNET) { |
1410 | IB_LINK_LAYER_ETHERNET) { | 1424 | err = mlx4_cmd(dev, mailbox->dma, |
1411 | err = mlx4_cmd(dev, mailbox->dma, | 1425 | MLX4_SET_PORT_GID_TABLE << 8 | gw->port, |
1412 | MLX4_SET_PORT_GID_TABLE << 8 | i, | 1426 | 1, MLX4_CMD_SET_PORT, |
1413 | 1, MLX4_CMD_SET_PORT, | 1427 | MLX4_CMD_TIME_CLASS_B, |
1414 | MLX4_CMD_TIME_CLASS_B, | 1428 | MLX4_CMD_WRAPPED); |
1415 | MLX4_CMD_WRAPPED); | 1429 | if (err) |
1416 | if (err) | 1430 | pr_warn(KERN_WARNING |
1417 | pr_warn(KERN_WARNING | 1431 | "set port %d command failed\n", gw->port); |
1418 | "set port %d command failed\n", i); | ||
1419 | } | ||
1420 | } | 1432 | } |
1421 | 1433 | ||
1422 | mlx4_free_cmd_mailbox(dev, mailbox); | 1434 | mlx4_free_cmd_mailbox(dev, mailbox); |
@@ -1425,7 +1437,8 @@ free: | |||
1425 | } | 1437 | } |
1426 | 1438 | ||
1427 | static int update_gid_table(struct mlx4_ib_dev *dev, int port, | 1439 | static int update_gid_table(struct mlx4_ib_dev *dev, int port, |
1428 | union ib_gid *gid, int clear) | 1440 | union ib_gid *gid, int clear, |
1441 | int default_gid) | ||
1429 | { | 1442 | { |
1430 | struct update_gid_work *work; | 1443 | struct update_gid_work *work; |
1431 | int i; | 1444 | int i; |
@@ -1434,26 +1447,31 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port, | |||
1434 | int found = -1; | 1447 | int found = -1; |
1435 | int max_gids; | 1448 | int max_gids; |
1436 | 1449 | ||
1437 | max_gids = dev->dev->caps.gid_table_len[port]; | 1450 | if (default_gid) { |
1438 | for (i = 0; i < max_gids; ++i) { | 1451 | free = 0; |
1439 | if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid, | 1452 | } else { |
1440 | sizeof(*gid))) | 1453 | max_gids = dev->dev->caps.gid_table_len[port]; |
1441 | found = i; | 1454 | for (i = 1; i < max_gids; ++i) { |
1442 | 1455 | if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid, | |
1443 | if (clear) { | ||
1444 | if (found >= 0) { | ||
1445 | need_update = 1; | ||
1446 | dev->iboe.gid_table[port - 1][found] = zgid; | ||
1447 | break; | ||
1448 | } | ||
1449 | } else { | ||
1450 | if (found >= 0) | ||
1451 | break; | ||
1452 | |||
1453 | if (free < 0 && | ||
1454 | !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, | ||
1455 | sizeof(*gid))) | 1456 | sizeof(*gid))) |
1456 | free = i; | 1457 | found = i; |
1458 | |||
1459 | if (clear) { | ||
1460 | if (found >= 0) { | ||
1461 | need_update = 1; | ||
1462 | dev->iboe.gid_table[port - 1][found] = | ||
1463 | zgid; | ||
1464 | break; | ||
1465 | } | ||
1466 | } else { | ||
1467 | if (found >= 0) | ||
1468 | break; | ||
1469 | |||
1470 | if (free < 0 && | ||
1471 | !memcmp(&dev->iboe.gid_table[port - 1][i], | ||
1472 | &zgid, sizeof(*gid))) | ||
1473 | free = i; | ||
1474 | } | ||
1457 | } | 1475 | } |
1458 | } | 1476 | } |
1459 | 1477 | ||
@@ -1478,18 +1496,26 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port, | |||
1478 | return 0; | 1496 | return 0; |
1479 | } | 1497 | } |
1480 | 1498 | ||
1481 | static int reset_gid_table(struct mlx4_ib_dev *dev) | 1499 | static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid) |
1482 | { | 1500 | { |
1483 | struct update_gid_work *work; | 1501 | gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); |
1502 | mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev); | ||
1503 | } | ||
1504 | |||
1484 | 1505 | ||
1506 | static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port) | ||
1507 | { | ||
1508 | struct update_gid_work *work; | ||
1485 | 1509 | ||
1486 | work = kzalloc(sizeof(*work), GFP_ATOMIC); | 1510 | work = kzalloc(sizeof(*work), GFP_ATOMIC); |
1487 | if (!work) | 1511 | if (!work) |
1488 | return -ENOMEM; | 1512 | return -ENOMEM; |
1489 | memset(dev->iboe.gid_table, 0, sizeof(dev->iboe.gid_table)); | 1513 | |
1514 | memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids)); | ||
1490 | memset(work->gids, 0, sizeof(work->gids)); | 1515 | memset(work->gids, 0, sizeof(work->gids)); |
1491 | INIT_WORK(&work->work, reset_gids_task); | 1516 | INIT_WORK(&work->work, reset_gids_task); |
1492 | work->dev = dev; | 1517 | work->dev = dev; |
1518 | work->port = port; | ||
1493 | queue_work(wq, &work->work); | 1519 | queue_work(wq, &work->work); |
1494 | return 0; | 1520 | return 0; |
1495 | } | 1521 | } |
@@ -1502,6 +1528,12 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, | |||
1502 | struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ? | 1528 | struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ? |
1503 | rdma_vlan_dev_real_dev(event_netdev) : | 1529 | rdma_vlan_dev_real_dev(event_netdev) : |
1504 | event_netdev; | 1530 | event_netdev; |
1531 | union ib_gid default_gid; | ||
1532 | |||
1533 | mlx4_make_default_gid(real_dev, &default_gid); | ||
1534 | |||
1535 | if (!memcmp(gid, &default_gid, sizeof(*gid))) | ||
1536 | return 0; | ||
1505 | 1537 | ||
1506 | if (event != NETDEV_DOWN && event != NETDEV_UP) | 1538 | if (event != NETDEV_DOWN && event != NETDEV_UP) |
1507 | return 0; | 1539 | return 0; |
@@ -1520,7 +1552,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, | |||
1520 | (!netif_is_bond_master(real_dev) && | 1552 | (!netif_is_bond_master(real_dev) && |
1521 | (real_dev == iboe->netdevs[port - 1]))) | 1553 | (real_dev == iboe->netdevs[port - 1]))) |
1522 | update_gid_table(ibdev, port, gid, | 1554 | update_gid_table(ibdev, port, gid, |
1523 | event == NETDEV_DOWN); | 1555 | event == NETDEV_DOWN, 0); |
1524 | 1556 | ||
1525 | spin_unlock(&iboe->lock); | 1557 | spin_unlock(&iboe->lock); |
1526 | return 0; | 1558 | return 0; |
@@ -1536,7 +1568,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev, | |||
1536 | rdma_vlan_dev_real_dev(dev) : dev; | 1568 | rdma_vlan_dev_real_dev(dev) : dev; |
1537 | 1569 | ||
1538 | iboe = &ibdev->iboe; | 1570 | iboe = &ibdev->iboe; |
1539 | spin_lock(&iboe->lock); | ||
1540 | 1571 | ||
1541 | for (port = 1; port <= MLX4_MAX_PORTS; ++port) | 1572 | for (port = 1; port <= MLX4_MAX_PORTS; ++port) |
1542 | if ((netif_is_bond_master(real_dev) && | 1573 | if ((netif_is_bond_master(real_dev) && |
@@ -1545,8 +1576,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev, | |||
1545 | (real_dev == iboe->netdevs[port - 1]))) | 1576 | (real_dev == iboe->netdevs[port - 1]))) |
1546 | break; | 1577 | break; |
1547 | 1578 | ||
1548 | spin_unlock(&iboe->lock); | ||
1549 | |||
1550 | if ((port == 0) || (port > MLX4_MAX_PORTS)) | 1579 | if ((port == 0) || (port > MLX4_MAX_PORTS)) |
1551 | return 0; | 1580 | return 0; |
1552 | else | 1581 | else |
@@ -1607,7 +1636,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | |||
1607 | /*ifa->ifa_address;*/ | 1636 | /*ifa->ifa_address;*/ |
1608 | ipv6_addr_set_v4mapped(ifa->ifa_address, | 1637 | ipv6_addr_set_v4mapped(ifa->ifa_address, |
1609 | (struct in6_addr *)&gid); | 1638 | (struct in6_addr *)&gid); |
1610 | update_gid_table(ibdev, port, &gid, 0); | 1639 | update_gid_table(ibdev, port, &gid, 0, 0); |
1611 | } | 1640 | } |
1612 | endfor_ifa(in_dev); | 1641 | endfor_ifa(in_dev); |
1613 | in_dev_put(in_dev); | 1642 | in_dev_put(in_dev); |
@@ -1619,7 +1648,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | |||
1619 | read_lock_bh(&in6_dev->lock); | 1648 | read_lock_bh(&in6_dev->lock); |
1620 | list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { | 1649 | list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { |
1621 | pgid = (union ib_gid *)&ifp->addr; | 1650 | pgid = (union ib_gid *)&ifp->addr; |
1622 | update_gid_table(ibdev, port, pgid, 0); | 1651 | update_gid_table(ibdev, port, pgid, 0, 0); |
1623 | } | 1652 | } |
1624 | read_unlock_bh(&in6_dev->lock); | 1653 | read_unlock_bh(&in6_dev->lock); |
1625 | in6_dev_put(in6_dev); | 1654 | in6_dev_put(in6_dev); |
@@ -1627,14 +1656,26 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, | |||
1627 | #endif | 1656 | #endif |
1628 | } | 1657 | } |
1629 | 1658 | ||
1659 | static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev, | ||
1660 | struct net_device *dev, u8 port) | ||
1661 | { | ||
1662 | union ib_gid gid; | ||
1663 | mlx4_make_default_gid(dev, &gid); | ||
1664 | update_gid_table(ibdev, port, &gid, 0, 1); | ||
1665 | } | ||
1666 | |||
1630 | static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) | 1667 | static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) |
1631 | { | 1668 | { |
1632 | struct net_device *dev; | 1669 | struct net_device *dev; |
1670 | struct mlx4_ib_iboe *iboe = &ibdev->iboe; | ||
1671 | int i; | ||
1633 | 1672 | ||
1634 | if (reset_gid_table(ibdev)) | 1673 | for (i = 1; i <= ibdev->num_ports; ++i) |
1635 | return -1; | 1674 | if (reset_gid_table(ibdev, i)) |
1675 | return -1; | ||
1636 | 1676 | ||
1637 | read_lock(&dev_base_lock); | 1677 | read_lock(&dev_base_lock); |
1678 | spin_lock(&iboe->lock); | ||
1638 | 1679 | ||
1639 | for_each_netdev(&init_net, dev) { | 1680 | for_each_netdev(&init_net, dev) { |
1640 | u8 port = mlx4_ib_get_dev_port(dev, ibdev); | 1681 | u8 port = mlx4_ib_get_dev_port(dev, ibdev); |
@@ -1642,6 +1683,7 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) | |||
1642 | mlx4_ib_get_dev_addr(dev, ibdev, port); | 1683 | mlx4_ib_get_dev_addr(dev, ibdev, port); |
1643 | } | 1684 | } |
1644 | 1685 | ||
1686 | spin_unlock(&iboe->lock); | ||
1645 | read_unlock(&dev_base_lock); | 1687 | read_unlock(&dev_base_lock); |
1646 | 1688 | ||
1647 | return 0; | 1689 | return 0; |
@@ -1656,25 +1698,57 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) | |||
1656 | 1698 | ||
1657 | spin_lock(&iboe->lock); | 1699 | spin_lock(&iboe->lock); |
1658 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { | 1700 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { |
1701 | enum ib_port_state port_state = IB_PORT_NOP; | ||
1659 | struct net_device *old_master = iboe->masters[port - 1]; | 1702 | struct net_device *old_master = iboe->masters[port - 1]; |
1703 | struct net_device *curr_netdev; | ||
1660 | struct net_device *curr_master; | 1704 | struct net_device *curr_master; |
1705 | |||
1661 | iboe->netdevs[port - 1] = | 1706 | iboe->netdevs[port - 1] = |
1662 | mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); | 1707 | mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); |
1708 | if (iboe->netdevs[port - 1]) | ||
1709 | mlx4_ib_set_default_gid(ibdev, | ||
1710 | iboe->netdevs[port - 1], port); | ||
1711 | curr_netdev = iboe->netdevs[port - 1]; | ||
1663 | 1712 | ||
1664 | if (iboe->netdevs[port - 1] && | 1713 | if (iboe->netdevs[port - 1] && |
1665 | netif_is_bond_slave(iboe->netdevs[port - 1])) { | 1714 | netif_is_bond_slave(iboe->netdevs[port - 1])) { |
1666 | rtnl_lock(); | ||
1667 | iboe->masters[port - 1] = netdev_master_upper_dev_get( | 1715 | iboe->masters[port - 1] = netdev_master_upper_dev_get( |
1668 | iboe->netdevs[port - 1]); | 1716 | iboe->netdevs[port - 1]); |
1669 | rtnl_unlock(); | 1717 | } else { |
1718 | iboe->masters[port - 1] = NULL; | ||
1670 | } | 1719 | } |
1671 | curr_master = iboe->masters[port - 1]; | 1720 | curr_master = iboe->masters[port - 1]; |
1672 | 1721 | ||
1722 | if (curr_netdev) { | ||
1723 | port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? | ||
1724 | IB_PORT_ACTIVE : IB_PORT_DOWN; | ||
1725 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||
1726 | } else { | ||
1727 | reset_gid_table(ibdev, port); | ||
1728 | } | ||
1729 | /* if using bonding/team and a slave port is down, we don't the bond IP | ||
1730 | * based gids in the table since flows that select port by gid may get | ||
1731 | * the down port. | ||
1732 | */ | ||
1733 | if (curr_master && (port_state == IB_PORT_DOWN)) { | ||
1734 | reset_gid_table(ibdev, port); | ||
1735 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||
1736 | } | ||
1673 | /* if bonding is used it is possible that we add it to masters | 1737 | /* if bonding is used it is possible that we add it to masters |
1674 | only after IP address is assigned to the net bonding | 1738 | * only after IP address is assigned to the net bonding |
1675 | interface */ | 1739 | * interface. |
1676 | if (curr_master && (old_master != curr_master)) | 1740 | */ |
1741 | if (curr_master && (old_master != curr_master)) { | ||
1742 | reset_gid_table(ibdev, port); | ||
1743 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||
1677 | mlx4_ib_get_dev_addr(curr_master, ibdev, port); | 1744 | mlx4_ib_get_dev_addr(curr_master, ibdev, port); |
1745 | } | ||
1746 | |||
1747 | if (!curr_master && (old_master != curr_master)) { | ||
1748 | reset_gid_table(ibdev, port); | ||
1749 | mlx4_ib_set_default_gid(ibdev, curr_netdev, port); | ||
1750 | mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); | ||
1751 | } | ||
1678 | } | 1752 | } |
1679 | 1753 | ||
1680 | spin_unlock(&iboe->lock); | 1754 | spin_unlock(&iboe->lock); |
@@ -1810,6 +1884,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
1810 | int i, j; | 1884 | int i, j; |
1811 | int err; | 1885 | int err; |
1812 | struct mlx4_ib_iboe *iboe; | 1886 | struct mlx4_ib_iboe *iboe; |
1887 | int ib_num_ports = 0; | ||
1813 | 1888 | ||
1814 | pr_info_once("%s", mlx4_ib_version); | 1889 | pr_info_once("%s", mlx4_ib_version); |
1815 | 1890 | ||
@@ -1985,10 +2060,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
1985 | ibdev->counters[i] = -1; | 2060 | ibdev->counters[i] = -1; |
1986 | } | 2061 | } |
1987 | 2062 | ||
2063 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | ||
2064 | ib_num_ports++; | ||
2065 | |||
1988 | spin_lock_init(&ibdev->sm_lock); | 2066 | spin_lock_init(&ibdev->sm_lock); |
1989 | mutex_init(&ibdev->cap_mask_mutex); | 2067 | mutex_init(&ibdev->cap_mask_mutex); |
1990 | 2068 | ||
1991 | if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) { | 2069 | if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED && |
2070 | ib_num_ports) { | ||
1992 | ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; | 2071 | ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; |
1993 | err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, | 2072 | err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, |
1994 | MLX4_IB_UC_STEER_QPN_ALIGN, | 2073 | MLX4_IB_UC_STEER_QPN_ALIGN, |
@@ -2051,7 +2130,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2051 | } | 2130 | } |
2052 | } | 2131 | } |
2053 | #endif | 2132 | #endif |
2133 | for (i = 1 ; i <= ibdev->num_ports ; ++i) | ||
2134 | reset_gid_table(ibdev, i); | ||
2135 | rtnl_lock(); | ||
2054 | mlx4_ib_scan_netdevs(ibdev); | 2136 | mlx4_ib_scan_netdevs(ibdev); |
2137 | rtnl_unlock(); | ||
2055 | mlx4_ib_init_gid_table(ibdev); | 2138 | mlx4_ib_init_gid_table(ibdev); |
2056 | } | 2139 | } |
2057 | 2140 | ||
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig index 8e6aebfaf8a4..10df386c6344 100644 --- a/drivers/infiniband/hw/mlx5/Kconfig +++ b/drivers/infiniband/hw/mlx5/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config MLX5_INFINIBAND | 1 | config MLX5_INFINIBAND |
2 | tristate "Mellanox Connect-IB HCA support" | 2 | tristate "Mellanox Connect-IB HCA support" |
3 | depends on NETDEVICES && ETHERNET && PCI && X86 | 3 | depends on NETDEVICES && ETHERNET && PCI |
4 | select NET_VENDOR_MELLANOX | 4 | select NET_VENDOR_MELLANOX |
5 | select MLX5_CORE | 5 | select MLX5_CORE |
6 | ---help--- | 6 | ---help--- |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 9660d093f8cf..aa03e732b6a8 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -261,8 +261,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
261 | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | | 261 | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | |
262 | IB_DEVICE_PORT_ACTIVE_EVENT | | 262 | IB_DEVICE_PORT_ACTIVE_EVENT | |
263 | IB_DEVICE_SYS_IMAGE_GUID | | 263 | IB_DEVICE_SYS_IMAGE_GUID | |
264 | IB_DEVICE_RC_RNR_NAK_GEN | | 264 | IB_DEVICE_RC_RNR_NAK_GEN; |
265 | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; | ||
266 | flags = dev->mdev.caps.flags; | 265 | flags = dev->mdev.caps.flags; |
267 | if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) | 266 | if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) |
268 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; | 267 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; |
@@ -536,24 +535,38 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
536 | struct ib_udata *udata) | 535 | struct ib_udata *udata) |
537 | { | 536 | { |
538 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | 537 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
539 | struct mlx5_ib_alloc_ucontext_req req; | 538 | struct mlx5_ib_alloc_ucontext_req_v2 req; |
540 | struct mlx5_ib_alloc_ucontext_resp resp; | 539 | struct mlx5_ib_alloc_ucontext_resp resp; |
541 | struct mlx5_ib_ucontext *context; | 540 | struct mlx5_ib_ucontext *context; |
542 | struct mlx5_uuar_info *uuari; | 541 | struct mlx5_uuar_info *uuari; |
543 | struct mlx5_uar *uars; | 542 | struct mlx5_uar *uars; |
544 | int gross_uuars; | 543 | int gross_uuars; |
545 | int num_uars; | 544 | int num_uars; |
545 | int ver; | ||
546 | int uuarn; | 546 | int uuarn; |
547 | int err; | 547 | int err; |
548 | int i; | 548 | int i; |
549 | int reqlen; | ||
549 | 550 | ||
550 | if (!dev->ib_active) | 551 | if (!dev->ib_active) |
551 | return ERR_PTR(-EAGAIN); | 552 | return ERR_PTR(-EAGAIN); |
552 | 553 | ||
553 | err = ib_copy_from_udata(&req, udata, sizeof(req)); | 554 | memset(&req, 0, sizeof(req)); |
555 | reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr); | ||
556 | if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) | ||
557 | ver = 0; | ||
558 | else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2)) | ||
559 | ver = 2; | ||
560 | else | ||
561 | return ERR_PTR(-EINVAL); | ||
562 | |||
563 | err = ib_copy_from_udata(&req, udata, reqlen); | ||
554 | if (err) | 564 | if (err) |
555 | return ERR_PTR(err); | 565 | return ERR_PTR(err); |
556 | 566 | ||
567 | if (req.flags || req.reserved) | ||
568 | return ERR_PTR(-EINVAL); | ||
569 | |||
557 | if (req.total_num_uuars > MLX5_MAX_UUARS) | 570 | if (req.total_num_uuars > MLX5_MAX_UUARS) |
558 | return ERR_PTR(-ENOMEM); | 571 | return ERR_PTR(-ENOMEM); |
559 | 572 | ||
@@ -626,6 +639,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
626 | if (err) | 639 | if (err) |
627 | goto out_uars; | 640 | goto out_uars; |
628 | 641 | ||
642 | uuari->ver = ver; | ||
629 | uuari->num_low_latency_uuars = req.num_low_latency_uuars; | 643 | uuari->num_low_latency_uuars = req.num_low_latency_uuars; |
630 | uuari->uars = uars; | 644 | uuari->uars = uars; |
631 | uuari->num_uars = num_uars; | 645 | uuari->num_uars = num_uars; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index ae37fb9bf262..7dfe8a1c84cf 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -216,7 +216,9 @@ static int sq_overhead(enum ib_qp_type qp_type) | |||
216 | 216 | ||
217 | case IB_QPT_UC: | 217 | case IB_QPT_UC: |
218 | size += sizeof(struct mlx5_wqe_ctrl_seg) + | 218 | size += sizeof(struct mlx5_wqe_ctrl_seg) + |
219 | sizeof(struct mlx5_wqe_raddr_seg); | 219 | sizeof(struct mlx5_wqe_raddr_seg) + |
220 | sizeof(struct mlx5_wqe_umr_ctrl_seg) + | ||
221 | sizeof(struct mlx5_mkey_seg); | ||
220 | break; | 222 | break; |
221 | 223 | ||
222 | case IB_QPT_UD: | 224 | case IB_QPT_UD: |
@@ -428,11 +430,17 @@ static int alloc_uuar(struct mlx5_uuar_info *uuari, | |||
428 | break; | 430 | break; |
429 | 431 | ||
430 | case MLX5_IB_LATENCY_CLASS_MEDIUM: | 432 | case MLX5_IB_LATENCY_CLASS_MEDIUM: |
431 | uuarn = alloc_med_class_uuar(uuari); | 433 | if (uuari->ver < 2) |
434 | uuarn = -ENOMEM; | ||
435 | else | ||
436 | uuarn = alloc_med_class_uuar(uuari); | ||
432 | break; | 437 | break; |
433 | 438 | ||
434 | case MLX5_IB_LATENCY_CLASS_HIGH: | 439 | case MLX5_IB_LATENCY_CLASS_HIGH: |
435 | uuarn = alloc_high_class_uuar(uuari); | 440 | if (uuari->ver < 2) |
441 | uuarn = -ENOMEM; | ||
442 | else | ||
443 | uuarn = alloc_high_class_uuar(uuari); | ||
436 | break; | 444 | break; |
437 | 445 | ||
438 | case MLX5_IB_LATENCY_CLASS_FAST_PATH: | 446 | case MLX5_IB_LATENCY_CLASS_FAST_PATH: |
@@ -657,8 +665,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, | |||
657 | int err; | 665 | int err; |
658 | 666 | ||
659 | uuari = &dev->mdev.priv.uuari; | 667 | uuari = &dev->mdev.priv.uuari; |
660 | if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) | 668 | if (init_attr->create_flags) |
661 | qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; | 669 | return -EINVAL; |
662 | 670 | ||
663 | if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) | 671 | if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) |
664 | lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; | 672 | lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; |
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h index 32a2a5dfc523..0f4f8e42a17f 100644 --- a/drivers/infiniband/hw/mlx5/user.h +++ b/drivers/infiniband/hw/mlx5/user.h | |||
@@ -62,6 +62,13 @@ struct mlx5_ib_alloc_ucontext_req { | |||
62 | __u32 num_low_latency_uuars; | 62 | __u32 num_low_latency_uuars; |
63 | }; | 63 | }; |
64 | 64 | ||
65 | struct mlx5_ib_alloc_ucontext_req_v2 { | ||
66 | __u32 total_num_uuars; | ||
67 | __u32 num_low_latency_uuars; | ||
68 | __u32 flags; | ||
69 | __u32 reserved; | ||
70 | }; | ||
71 | |||
65 | struct mlx5_ib_alloc_ucontext_resp { | 72 | struct mlx5_ib_alloc_ucontext_resp { |
66 | __u32 qp_tab_size; | 73 | __u32 qp_tab_size; |
67 | __u32 bf_reg_size; | 74 | __u32 bf_reg_size; |
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 429141078eec..353c7b05a90a 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c | |||
@@ -675,8 +675,11 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) | |||
675 | INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status); | 675 | INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status); |
676 | 676 | ||
677 | /* Initialize network devices */ | 677 | /* Initialize network devices */ |
678 | if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) | 678 | netdev = nes_netdev_init(nesdev, mmio_regs); |
679 | if (netdev == NULL) { | ||
680 | ret = -ENOMEM; | ||
679 | goto bail7; | 681 | goto bail7; |
682 | } | ||
680 | 683 | ||
681 | /* Register network device */ | 684 | /* Register network device */ |
682 | ret = register_netdev(netdev); | 685 | ret = register_netdev(netdev); |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 2ca86ca818bd..1a8a945efa60 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c | |||
@@ -127,7 +127,7 @@ static int ocrdma_addr_event(unsigned long event, struct net_device *netdev, | |||
127 | 127 | ||
128 | is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; | 128 | is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; |
129 | if (is_vlan) | 129 | if (is_vlan) |
130 | netdev = vlan_dev_real_dev(netdev); | 130 | netdev = rdma_vlan_dev_real_dev(netdev); |
131 | 131 | ||
132 | rcu_read_lock(); | 132 | rcu_read_lock(); |
133 | list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { | 133 | list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index aa92f40c9d50..e0cc201be41a 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
@@ -176,7 +176,7 @@ int ocrdma_query_port(struct ib_device *ibdev, | |||
176 | props->port_cap_flags = | 176 | props->port_cap_flags = |
177 | IB_PORT_CM_SUP | | 177 | IB_PORT_CM_SUP | |
178 | IB_PORT_REINIT_SUP | | 178 | IB_PORT_REINIT_SUP | |
179 | IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP; | 179 | IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS; |
180 | props->gid_tbl_len = OCRDMA_MAX_SGID; | 180 | props->gid_tbl_len = OCRDMA_MAX_SGID; |
181 | props->pkey_tbl_len = 1; | 181 | props->pkey_tbl_len = 1; |
182 | props->bad_pkey_cntr = 0; | 182 | props->bad_pkey_cntr = 0; |
@@ -1416,7 +1416,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp, | |||
1416 | OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> | 1416 | OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> |
1417 | OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; | 1417 | OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; |
1418 | qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & | 1418 | qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & |
1419 | OCRDMA_QP_PARAMS_SQ_PSN_MASK) >> | 1419 | OCRDMA_QP_PARAMS_TCLASS_MASK) >> |
1420 | OCRDMA_QP_PARAMS_TCLASS_SHIFT; | 1420 | OCRDMA_QP_PARAMS_TCLASS_SHIFT; |
1421 | 1421 | ||
1422 | qp_attr->ah_attr.ah_flags = IB_AH_GRH; | 1422 | qp_attr->ah_attr.ah_flags = IB_AH_GRH; |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 5bfc02f450e6..d1bd21319d7d 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -2395,6 +2395,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) | |||
2395 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); | 2395 | qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); |
2396 | qib_write_kreg(dd, kr_scratch, 0ULL); | 2396 | qib_write_kreg(dd, kr_scratch, 0ULL); |
2397 | 2397 | ||
2398 | /* ensure previous Tx parameters are not still forced */ | ||
2399 | qib_write_kreg_port(ppd, krp_tx_deemph_override, | ||
2400 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
2401 | reset_tx_deemphasis_override)); | ||
2402 | |||
2398 | if (qib_compat_ddr_negotiate) { | 2403 | if (qib_compat_ddr_negotiate) { |
2399 | ppd->cpspec->ibdeltainprog = 1; | 2404 | ppd->cpspec->ibdeltainprog = 1; |
2400 | ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, | 2405 | ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, |
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c index 7ecc6061f1f4..f8dfd76be89f 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c | |||
@@ -629,6 +629,7 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow, | |||
629 | { | 629 | { |
630 | enum usnic_transport_type trans_type = qp_flow->trans_type; | 630 | enum usnic_transport_type trans_type = qp_flow->trans_type; |
631 | int err; | 631 | int err; |
632 | uint16_t port_num = 0; | ||
632 | 633 | ||
633 | switch (trans_type) { | 634 | switch (trans_type) { |
634 | case USNIC_TRANSPORT_ROCE_CUSTOM: | 635 | case USNIC_TRANSPORT_ROCE_CUSTOM: |
@@ -637,9 +638,15 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow, | |||
637 | case USNIC_TRANSPORT_IPV4_UDP: | 638 | case USNIC_TRANSPORT_IPV4_UDP: |
638 | err = usnic_transport_sock_get_addr(qp_flow->udp.sock, | 639 | err = usnic_transport_sock_get_addr(qp_flow->udp.sock, |
639 | NULL, NULL, | 640 | NULL, NULL, |
640 | (uint16_t *) id); | 641 | &port_num); |
641 | if (err) | 642 | if (err) |
642 | return err; | 643 | return err; |
644 | /* | ||
645 | * Copy port_num to stack first and then to *id, | ||
646 | * so that the short to int cast works for little | ||
647 | * and big endian systems. | ||
648 | */ | ||
649 | *id = port_num; | ||
643 | break; | 650 | break; |
644 | default: | 651 | default: |
645 | usnic_err("Unsupported transport %u\n", trans_type); | 652 | usnic_err("Unsupported transport %u\n", trans_type); |
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 538822684d5b..334f34b1cd46 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c | |||
@@ -610,11 +610,12 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc, | |||
610 | ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, | 610 | ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, |
611 | ISER_HEADERS_LEN, DMA_TO_DEVICE); | 611 | ISER_HEADERS_LEN, DMA_TO_DEVICE); |
612 | kmem_cache_free(ig.desc_cache, tx_desc); | 612 | kmem_cache_free(ig.desc_cache, tx_desc); |
613 | tx_desc = NULL; | ||
613 | } | 614 | } |
614 | 615 | ||
615 | atomic_dec(&ib_conn->post_send_buf_count); | 616 | atomic_dec(&ib_conn->post_send_buf_count); |
616 | 617 | ||
617 | if (tx_desc->type == ISCSI_TX_CONTROL) { | 618 | if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) { |
618 | /* this arithmetic is legal by libiscsi dd_data allocation */ | 619 | /* this arithmetic is legal by libiscsi dd_data allocation */ |
619 | task = (void *) ((long)(void *)tx_desc - | 620 | task = (void *) ((long)(void *)tx_desc - |
620 | sizeof(struct iscsi_task)); | 621 | sizeof(struct iscsi_task)); |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index afe95674008b..ca37edef2791 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -652,9 +652,13 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id) | |||
652 | /* getting here when the state is UP means that the conn is being * | 652 | /* getting here when the state is UP means that the conn is being * |
653 | * terminated asynchronously from the iSCSI layer's perspective. */ | 653 | * terminated asynchronously from the iSCSI layer's perspective. */ |
654 | if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, | 654 | if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, |
655 | ISER_CONN_TERMINATING)) | 655 | ISER_CONN_TERMINATING)){ |
656 | iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, | 656 | if (ib_conn->iser_conn) |
657 | ISCSI_ERR_CONN_FAILED); | 657 | iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, |
658 | ISCSI_ERR_CONN_FAILED); | ||
659 | else | ||
660 | iser_err("iscsi_iser connection isn't bound\n"); | ||
661 | } | ||
658 | 662 | ||
659 | /* Complete the termination process if no posts are pending */ | 663 | /* Complete the termination process if no posts are pending */ |
660 | if (ib_conn->post_recv_buf_count == 0 && | 664 | if (ib_conn->post_recv_buf_count == 0 && |
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 2b161be3c1a3..d18d08a076e8 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -453,6 +453,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) | |||
453 | if (ret) { | 453 | if (ret) { |
454 | pr_err("Failed to create fastreg descriptor err=%d\n", | 454 | pr_err("Failed to create fastreg descriptor err=%d\n", |
455 | ret); | 455 | ret); |
456 | kfree(fr_desc); | ||
456 | goto err; | 457 | goto err; |
457 | } | 458 | } |
458 | 459 | ||
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 520a7e5a490b..0e537d8d0e47 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -3666,9 +3666,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size( | |||
3666 | unsigned long val; | 3666 | unsigned long val; |
3667 | int ret; | 3667 | int ret; |
3668 | 3668 | ||
3669 | ret = strict_strtoul(page, 0, &val); | 3669 | ret = kstrtoul(page, 0, &val); |
3670 | if (ret < 0) { | 3670 | if (ret < 0) { |
3671 | pr_err("strict_strtoul() failed with ret: %d\n", ret); | 3671 | pr_err("kstrtoul() failed with ret: %d\n", ret); |
3672 | return -EINVAL; | 3672 | return -EINVAL; |
3673 | } | 3673 | } |
3674 | if (val > MAX_SRPT_RDMA_SIZE) { | 3674 | if (val > MAX_SRPT_RDMA_SIZE) { |
@@ -3706,9 +3706,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size( | |||
3706 | unsigned long val; | 3706 | unsigned long val; |
3707 | int ret; | 3707 | int ret; |
3708 | 3708 | ||
3709 | ret = strict_strtoul(page, 0, &val); | 3709 | ret = kstrtoul(page, 0, &val); |
3710 | if (ret < 0) { | 3710 | if (ret < 0) { |
3711 | pr_err("strict_strtoul() failed with ret: %d\n", ret); | 3711 | pr_err("kstrtoul() failed with ret: %d\n", ret); |
3712 | return -EINVAL; | 3712 | return -EINVAL; |
3713 | } | 3713 | } |
3714 | if (val > MAX_SRPT_RSP_SIZE) { | 3714 | if (val > MAX_SRPT_RSP_SIZE) { |
@@ -3746,9 +3746,9 @@ static ssize_t srpt_tpg_attrib_store_srp_sq_size( | |||
3746 | unsigned long val; | 3746 | unsigned long val; |
3747 | int ret; | 3747 | int ret; |
3748 | 3748 | ||
3749 | ret = strict_strtoul(page, 0, &val); | 3749 | ret = kstrtoul(page, 0, &val); |
3750 | if (ret < 0) { | 3750 | if (ret < 0) { |
3751 | pr_err("strict_strtoul() failed with ret: %d\n", ret); | 3751 | pr_err("kstrtoul() failed with ret: %d\n", ret); |
3752 | return -EINVAL; | 3752 | return -EINVAL; |
3753 | } | 3753 | } |
3754 | if (val > MAX_SRPT_SRQ_SIZE) { | 3754 | if (val > MAX_SRPT_SRQ_SIZE) { |
@@ -3793,7 +3793,7 @@ static ssize_t srpt_tpg_store_enable( | |||
3793 | unsigned long tmp; | 3793 | unsigned long tmp; |
3794 | int ret; | 3794 | int ret; |
3795 | 3795 | ||
3796 | ret = strict_strtoul(page, 0, &tmp); | 3796 | ret = kstrtoul(page, 0, &tmp); |
3797 | if (ret < 0) { | 3797 | if (ret < 0) { |
3798 | printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n"); | 3798 | printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n"); |
3799 | return -EINVAL; | 3799 | return -EINVAL; |
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 0c707e4f4eaf..a4c7306ff43d 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
@@ -210,7 +210,9 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); | |||
210 | #define GC_MARK_RECLAIMABLE 0 | 210 | #define GC_MARK_RECLAIMABLE 0 |
211 | #define GC_MARK_DIRTY 1 | 211 | #define GC_MARK_DIRTY 1 |
212 | #define GC_MARK_METADATA 2 | 212 | #define GC_MARK_METADATA 2 |
213 | BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13); | 213 | #define GC_SECTORS_USED_SIZE 13 |
214 | #define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE)) | ||
215 | BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE); | ||
214 | BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); | 216 | BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); |
215 | 217 | ||
216 | #include "journal.h" | 218 | #include "journal.h" |
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 4f6b5940e609..3f74b4b0747b 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c | |||
@@ -23,7 +23,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set) | |||
23 | for (k = i->start; k < bset_bkey_last(i); k = next) { | 23 | for (k = i->start; k < bset_bkey_last(i); k = next) { |
24 | next = bkey_next(k); | 24 | next = bkey_next(k); |
25 | 25 | ||
26 | printk(KERN_ERR "block %u key %zi/%u: ", set, | 26 | printk(KERN_ERR "block %u key %li/%u: ", set, |
27 | (uint64_t *) k - i->d, i->keys); | 27 | (uint64_t *) k - i->d, i->keys); |
28 | 28 | ||
29 | if (b->ops->key_dump) | 29 | if (b->ops->key_dump) |
@@ -1185,9 +1185,12 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, | |||
1185 | struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO, | 1185 | struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO, |
1186 | order); | 1186 | order); |
1187 | if (!out) { | 1187 | if (!out) { |
1188 | struct page *outp; | ||
1189 | |||
1188 | BUG_ON(order > state->page_order); | 1190 | BUG_ON(order > state->page_order); |
1189 | 1191 | ||
1190 | out = page_address(mempool_alloc(state->pool, GFP_NOIO)); | 1192 | outp = mempool_alloc(state->pool, GFP_NOIO); |
1193 | out = page_address(outp); | ||
1191 | used_mempool = true; | 1194 | used_mempool = true; |
1192 | order = state->page_order; | 1195 | order = state->page_order; |
1193 | } | 1196 | } |
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 98cc0a810a36..5f9c2a665ca5 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -1167,7 +1167,7 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k) | |||
1167 | /* guard against overflow */ | 1167 | /* guard against overflow */ |
1168 | SET_GC_SECTORS_USED(g, min_t(unsigned, | 1168 | SET_GC_SECTORS_USED(g, min_t(unsigned, |
1169 | GC_SECTORS_USED(g) + KEY_SIZE(k), | 1169 | GC_SECTORS_USED(g) + KEY_SIZE(k), |
1170 | (1 << 14) - 1)); | 1170 | MAX_GC_SECTORS_USED)); |
1171 | 1171 | ||
1172 | BUG_ON(!GC_SECTORS_USED(g)); | 1172 | BUG_ON(!GC_SECTORS_USED(g)); |
1173 | } | 1173 | } |
@@ -1805,7 +1805,7 @@ static bool btree_insert_key(struct btree *b, struct bkey *k, | |||
1805 | 1805 | ||
1806 | static size_t insert_u64s_remaining(struct btree *b) | 1806 | static size_t insert_u64s_remaining(struct btree *b) |
1807 | { | 1807 | { |
1808 | ssize_t ret = bch_btree_keys_u64s_remaining(&b->keys); | 1808 | long ret = bch_btree_keys_u64s_remaining(&b->keys); |
1809 | 1809 | ||
1810 | /* | 1810 | /* |
1811 | * Might land in the middle of an existing extent and have to split it | 1811 | * Might land in the middle of an existing extent and have to split it |
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 72cd213f213f..5d5d031cf381 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -353,14 +353,14 @@ static void bch_data_insert_start(struct closure *cl) | |||
353 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); | 353 | struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); |
354 | struct bio *bio = op->bio, *n; | 354 | struct bio *bio = op->bio, *n; |
355 | 355 | ||
356 | if (op->bypass) | ||
357 | return bch_data_invalidate(cl); | ||
358 | |||
359 | if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { | 356 | if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { |
360 | set_gc_sectors(op->c); | 357 | set_gc_sectors(op->c); |
361 | wake_up_gc(op->c); | 358 | wake_up_gc(op->c); |
362 | } | 359 | } |
363 | 360 | ||
361 | if (op->bypass) | ||
362 | return bch_data_invalidate(cl); | ||
363 | |||
364 | /* | 364 | /* |
365 | * Journal writes are marked REQ_FLUSH; if the original write was a | 365 | * Journal writes are marked REQ_FLUSH; if the original write was a |
366 | * flush, it'll wait on the journal write. | 366 | * flush, it'll wait on the journal write. |
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index c6ab69333a6d..d8458d477a12 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c | |||
@@ -416,7 +416,7 @@ static int btree_bset_stats(struct btree_op *b_op, struct btree *b) | |||
416 | return MAP_CONTINUE; | 416 | return MAP_CONTINUE; |
417 | } | 417 | } |
418 | 418 | ||
419 | int bch_bset_print_stats(struct cache_set *c, char *buf) | 419 | static int bch_bset_print_stats(struct cache_set *c, char *buf) |
420 | { | 420 | { |
421 | struct bset_stats_op op; | 421 | struct bset_stats_op op; |
422 | int ret; | 422 | int ret; |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index fd3a2a14b587..4a6ca1cb2e78 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1953,11 +1953,15 @@ static int process_checks(struct r1bio *r1_bio) | |||
1953 | for (i = 0; i < conf->raid_disks * 2; i++) { | 1953 | for (i = 0; i < conf->raid_disks * 2; i++) { |
1954 | int j; | 1954 | int j; |
1955 | int size; | 1955 | int size; |
1956 | int uptodate; | ||
1956 | struct bio *b = r1_bio->bios[i]; | 1957 | struct bio *b = r1_bio->bios[i]; |
1957 | if (b->bi_end_io != end_sync_read) | 1958 | if (b->bi_end_io != end_sync_read) |
1958 | continue; | 1959 | continue; |
1959 | /* fixup the bio for reuse */ | 1960 | /* fixup the bio for reuse, but preserve BIO_UPTODATE */ |
1961 | uptodate = test_bit(BIO_UPTODATE, &b->bi_flags); | ||
1960 | bio_reset(b); | 1962 | bio_reset(b); |
1963 | if (!uptodate) | ||
1964 | clear_bit(BIO_UPTODATE, &b->bi_flags); | ||
1961 | b->bi_vcnt = vcnt; | 1965 | b->bi_vcnt = vcnt; |
1962 | b->bi_iter.bi_size = r1_bio->sectors << 9; | 1966 | b->bi_iter.bi_size = r1_bio->sectors << 9; |
1963 | b->bi_iter.bi_sector = r1_bio->sector + | 1967 | b->bi_iter.bi_sector = r1_bio->sector + |
@@ -1990,11 +1994,14 @@ static int process_checks(struct r1bio *r1_bio) | |||
1990 | int j; | 1994 | int j; |
1991 | struct bio *pbio = r1_bio->bios[primary]; | 1995 | struct bio *pbio = r1_bio->bios[primary]; |
1992 | struct bio *sbio = r1_bio->bios[i]; | 1996 | struct bio *sbio = r1_bio->bios[i]; |
1997 | int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags); | ||
1993 | 1998 | ||
1994 | if (sbio->bi_end_io != end_sync_read) | 1999 | if (sbio->bi_end_io != end_sync_read) |
1995 | continue; | 2000 | continue; |
2001 | /* Now we can 'fixup' the BIO_UPTODATE flag */ | ||
2002 | set_bit(BIO_UPTODATE, &sbio->bi_flags); | ||
1996 | 2003 | ||
1997 | if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) { | 2004 | if (uptodate) { |
1998 | for (j = vcnt; j-- ; ) { | 2005 | for (j = vcnt; j-- ; ) { |
1999 | struct page *p, *s; | 2006 | struct page *p, *s; |
2000 | p = pbio->bi_io_vec[j].bv_page; | 2007 | p = pbio->bi_io_vec[j].bv_page; |
@@ -2009,7 +2016,7 @@ static int process_checks(struct r1bio *r1_bio) | |||
2009 | if (j >= 0) | 2016 | if (j >= 0) |
2010 | atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); | 2017 | atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); |
2011 | if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) | 2018 | if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) |
2012 | && test_bit(BIO_UPTODATE, &sbio->bi_flags))) { | 2019 | && uptodate)) { |
2013 | /* No need to write to this device. */ | 2020 | /* No need to write to this device. */ |
2014 | sbio->bi_end_io = NULL; | 2021 | sbio->bi_end_io = NULL; |
2015 | rdev_dec_pending(conf->mirrors[i].rdev, mddev); | 2022 | rdev_dec_pending(conf->mirrors[i].rdev, mddev); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index f1feadeb7bb2..16f5c21963db 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -5514,23 +5514,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) | |||
5514 | return sectors * (raid_disks - conf->max_degraded); | 5514 | return sectors * (raid_disks - conf->max_degraded); |
5515 | } | 5515 | } |
5516 | 5516 | ||
5517 | static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) | ||
5518 | { | ||
5519 | safe_put_page(percpu->spare_page); | ||
5520 | kfree(percpu->scribble); | ||
5521 | percpu->spare_page = NULL; | ||
5522 | percpu->scribble = NULL; | ||
5523 | } | ||
5524 | |||
5525 | static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) | ||
5526 | { | ||
5527 | if (conf->level == 6 && !percpu->spare_page) | ||
5528 | percpu->spare_page = alloc_page(GFP_KERNEL); | ||
5529 | if (!percpu->scribble) | ||
5530 | percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); | ||
5531 | |||
5532 | if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { | ||
5533 | free_scratch_buffer(conf, percpu); | ||
5534 | return -ENOMEM; | ||
5535 | } | ||
5536 | |||
5537 | return 0; | ||
5538 | } | ||
5539 | |||
5517 | static void raid5_free_percpu(struct r5conf *conf) | 5540 | static void raid5_free_percpu(struct r5conf *conf) |
5518 | { | 5541 | { |
5519 | struct raid5_percpu *percpu; | ||
5520 | unsigned long cpu; | 5542 | unsigned long cpu; |
5521 | 5543 | ||
5522 | if (!conf->percpu) | 5544 | if (!conf->percpu) |
5523 | return; | 5545 | return; |
5524 | 5546 | ||
5525 | get_online_cpus(); | ||
5526 | for_each_possible_cpu(cpu) { | ||
5527 | percpu = per_cpu_ptr(conf->percpu, cpu); | ||
5528 | safe_put_page(percpu->spare_page); | ||
5529 | kfree(percpu->scribble); | ||
5530 | } | ||
5531 | #ifdef CONFIG_HOTPLUG_CPU | 5547 | #ifdef CONFIG_HOTPLUG_CPU |
5532 | unregister_cpu_notifier(&conf->cpu_notify); | 5548 | unregister_cpu_notifier(&conf->cpu_notify); |
5533 | #endif | 5549 | #endif |
5550 | |||
5551 | get_online_cpus(); | ||
5552 | for_each_possible_cpu(cpu) | ||
5553 | free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); | ||
5534 | put_online_cpus(); | 5554 | put_online_cpus(); |
5535 | 5555 | ||
5536 | free_percpu(conf->percpu); | 5556 | free_percpu(conf->percpu); |
@@ -5557,15 +5577,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, | |||
5557 | switch (action) { | 5577 | switch (action) { |
5558 | case CPU_UP_PREPARE: | 5578 | case CPU_UP_PREPARE: |
5559 | case CPU_UP_PREPARE_FROZEN: | 5579 | case CPU_UP_PREPARE_FROZEN: |
5560 | if (conf->level == 6 && !percpu->spare_page) | 5580 | if (alloc_scratch_buffer(conf, percpu)) { |
5561 | percpu->spare_page = alloc_page(GFP_KERNEL); | ||
5562 | if (!percpu->scribble) | ||
5563 | percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); | ||
5564 | |||
5565 | if (!percpu->scribble || | ||
5566 | (conf->level == 6 && !percpu->spare_page)) { | ||
5567 | safe_put_page(percpu->spare_page); | ||
5568 | kfree(percpu->scribble); | ||
5569 | pr_err("%s: failed memory allocation for cpu%ld\n", | 5581 | pr_err("%s: failed memory allocation for cpu%ld\n", |
5570 | __func__, cpu); | 5582 | __func__, cpu); |
5571 | return notifier_from_errno(-ENOMEM); | 5583 | return notifier_from_errno(-ENOMEM); |
@@ -5573,10 +5585,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, | |||
5573 | break; | 5585 | break; |
5574 | case CPU_DEAD: | 5586 | case CPU_DEAD: |
5575 | case CPU_DEAD_FROZEN: | 5587 | case CPU_DEAD_FROZEN: |
5576 | safe_put_page(percpu->spare_page); | 5588 | free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); |
5577 | kfree(percpu->scribble); | ||
5578 | percpu->spare_page = NULL; | ||
5579 | percpu->scribble = NULL; | ||
5580 | break; | 5589 | break; |
5581 | default: | 5590 | default: |
5582 | break; | 5591 | break; |
@@ -5588,40 +5597,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, | |||
5588 | static int raid5_alloc_percpu(struct r5conf *conf) | 5597 | static int raid5_alloc_percpu(struct r5conf *conf) |
5589 | { | 5598 | { |
5590 | unsigned long cpu; | 5599 | unsigned long cpu; |
5591 | struct page *spare_page; | 5600 | int err = 0; |
5592 | struct raid5_percpu __percpu *allcpus; | ||
5593 | void *scribble; | ||
5594 | int err; | ||
5595 | 5601 | ||
5596 | allcpus = alloc_percpu(struct raid5_percpu); | 5602 | conf->percpu = alloc_percpu(struct raid5_percpu); |
5597 | if (!allcpus) | 5603 | if (!conf->percpu) |
5598 | return -ENOMEM; | 5604 | return -ENOMEM; |
5599 | conf->percpu = allcpus; | 5605 | |
5606 | #ifdef CONFIG_HOTPLUG_CPU | ||
5607 | conf->cpu_notify.notifier_call = raid456_cpu_notify; | ||
5608 | conf->cpu_notify.priority = 0; | ||
5609 | err = register_cpu_notifier(&conf->cpu_notify); | ||
5610 | if (err) | ||
5611 | return err; | ||
5612 | #endif | ||
5600 | 5613 | ||
5601 | get_online_cpus(); | 5614 | get_online_cpus(); |
5602 | err = 0; | ||
5603 | for_each_present_cpu(cpu) { | 5615 | for_each_present_cpu(cpu) { |
5604 | if (conf->level == 6) { | 5616 | err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); |
5605 | spare_page = alloc_page(GFP_KERNEL); | 5617 | if (err) { |
5606 | if (!spare_page) { | 5618 | pr_err("%s: failed memory allocation for cpu%ld\n", |
5607 | err = -ENOMEM; | 5619 | __func__, cpu); |
5608 | break; | ||
5609 | } | ||
5610 | per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; | ||
5611 | } | ||
5612 | scribble = kmalloc(conf->scribble_len, GFP_KERNEL); | ||
5613 | if (!scribble) { | ||
5614 | err = -ENOMEM; | ||
5615 | break; | 5620 | break; |
5616 | } | 5621 | } |
5617 | per_cpu_ptr(conf->percpu, cpu)->scribble = scribble; | ||
5618 | } | 5622 | } |
5619 | #ifdef CONFIG_HOTPLUG_CPU | ||
5620 | conf->cpu_notify.notifier_call = raid456_cpu_notify; | ||
5621 | conf->cpu_notify.priority = 0; | ||
5622 | if (err == 0) | ||
5623 | err = register_cpu_notifier(&conf->cpu_notify); | ||
5624 | #endif | ||
5625 | put_online_cpus(); | 5623 | put_online_cpus(); |
5626 | 5624 | ||
5627 | return err; | 5625 | return err; |
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c index 8f8a6b327cdb..2c2c9cc75231 100644 --- a/drivers/misc/genwqe/card_dev.c +++ b/drivers/misc/genwqe/card_dev.c | |||
@@ -787,6 +787,7 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) | |||
787 | if (rc != 0) { | 787 | if (rc != 0) { |
788 | dev_err(&pci_dev->dev, | 788 | dev_err(&pci_dev->dev, |
789 | "[%s] genwqe_user_vmap rc=%d\n", __func__, rc); | 789 | "[%s] genwqe_user_vmap rc=%d\n", __func__, rc); |
790 | kfree(dma_map); | ||
790 | return rc; | 791 | return rc; |
791 | } | 792 | } |
792 | 793 | ||
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 1ee2b9492a82..9b809cfc2899 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c | |||
@@ -908,7 +908,6 @@ void mei_cl_all_disconnect(struct mei_device *dev) | |||
908 | list_for_each_entry_safe(cl, next, &dev->file_list, link) { | 908 | list_for_each_entry_safe(cl, next, &dev->file_list, link) { |
909 | cl->state = MEI_FILE_DISCONNECTED; | 909 | cl->state = MEI_FILE_DISCONNECTED; |
910 | cl->mei_flow_ctrl_creds = 0; | 910 | cl->mei_flow_ctrl_creds = 0; |
911 | cl->read_cb = NULL; | ||
912 | cl->timer_count = 0; | 911 | cl->timer_count = 0; |
913 | } | 912 | } |
914 | } | 913 | } |
@@ -942,8 +941,16 @@ void mei_cl_all_wakeup(struct mei_device *dev) | |||
942 | void mei_cl_all_write_clear(struct mei_device *dev) | 941 | void mei_cl_all_write_clear(struct mei_device *dev) |
943 | { | 942 | { |
944 | struct mei_cl_cb *cb, *next; | 943 | struct mei_cl_cb *cb, *next; |
944 | struct list_head *list; | ||
945 | 945 | ||
946 | list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { | 946 | list = &dev->write_list.list; |
947 | list_for_each_entry_safe(cb, next, list, list) { | ||
948 | list_del(&cb->list); | ||
949 | mei_io_cb_free(cb); | ||
950 | } | ||
951 | |||
952 | list = &dev->write_waiting_list.list; | ||
953 | list_for_each_entry_safe(cb, next, list, list) { | ||
947 | list_del(&cb->list); | 954 | list_del(&cb->list); |
948 | mei_io_cb_free(cb); | 955 | mei_io_cb_free(cb); |
949 | } | 956 | } |
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c index 752ff873f891..7e1ef0ebbb80 100644 --- a/drivers/misc/mic/host/mic_virtio.c +++ b/drivers/misc/mic/host/mic_virtio.c | |||
@@ -156,7 +156,8 @@ static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov, | |||
156 | static int _mic_virtio_copy(struct mic_vdev *mvdev, | 156 | static int _mic_virtio_copy(struct mic_vdev *mvdev, |
157 | struct mic_copy_desc *copy) | 157 | struct mic_copy_desc *copy) |
158 | { | 158 | { |
159 | int ret = 0, iovcnt = copy->iovcnt; | 159 | int ret = 0; |
160 | u32 iovcnt = copy->iovcnt; | ||
160 | struct iovec iov; | 161 | struct iovec iov; |
161 | struct iovec __user *u_iov = copy->iov; | 162 | struct iovec __user *u_iov = copy->iov; |
162 | void __user *ubuf = NULL; | 163 | void __user *ubuf = NULL; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 157fe8df2c3e..8ff57e8e3e91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig | |||
@@ -4,5 +4,5 @@ | |||
4 | 4 | ||
5 | config MLX5_CORE | 5 | config MLX5_CORE |
6 | tristate | 6 | tristate |
7 | depends on PCI && X86 | 7 | depends on PCI |
8 | default n | 8 | default n |
diff --git a/drivers/of/base.c b/drivers/of/base.c index ff85450d5683..10b51106c854 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -730,46 +730,64 @@ out: | |||
730 | } | 730 | } |
731 | EXPORT_SYMBOL(of_find_node_with_property); | 731 | EXPORT_SYMBOL(of_find_node_with_property); |
732 | 732 | ||
733 | static | 733 | static const struct of_device_id * |
734 | const struct of_device_id *__of_match_node(const struct of_device_id *matches, | 734 | of_match_compatible(const struct of_device_id *matches, |
735 | const struct device_node *node) | 735 | const struct device_node *node) |
736 | { | 736 | { |
737 | const char *cp; | 737 | const char *cp; |
738 | int cplen, l; | 738 | int cplen, l; |
739 | 739 | const struct of_device_id *m; | |
740 | if (!matches) | ||
741 | return NULL; | ||
742 | 740 | ||
743 | cp = __of_get_property(node, "compatible", &cplen); | 741 | cp = __of_get_property(node, "compatible", &cplen); |
744 | do { | 742 | while (cp && (cplen > 0)) { |
745 | const struct of_device_id *m = matches; | 743 | m = matches; |
746 | |||
747 | /* Check against matches with current compatible string */ | ||
748 | while (m->name[0] || m->type[0] || m->compatible[0]) { | 744 | while (m->name[0] || m->type[0] || m->compatible[0]) { |
749 | int match = 1; | 745 | /* Only match for the entries without type and name */ |
750 | if (m->name[0]) | 746 | if (m->name[0] || m->type[0] || |
751 | match &= node->name | 747 | of_compat_cmp(m->compatible, cp, |
752 | && !strcmp(m->name, node->name); | 748 | strlen(m->compatible))) |
753 | if (m->type[0]) | 749 | m++; |
754 | match &= node->type | 750 | else |
755 | && !strcmp(m->type, node->type); | ||
756 | if (m->compatible[0]) | ||
757 | match &= cp | ||
758 | && !of_compat_cmp(m->compatible, cp, | ||
759 | strlen(m->compatible)); | ||
760 | if (match) | ||
761 | return m; | 751 | return m; |
762 | m++; | ||
763 | } | 752 | } |
764 | 753 | ||
765 | /* Get node's next compatible string */ | 754 | /* Get node's next compatible string */ |
766 | if (cp) { | 755 | l = strlen(cp) + 1; |
767 | l = strlen(cp) + 1; | 756 | cp += l; |
768 | cp += l; | 757 | cplen -= l; |
769 | cplen -= l; | 758 | } |
770 | } | 759 | |
771 | } while (cp && (cplen > 0)); | 760 | return NULL; |
761 | } | ||
762 | |||
763 | static | ||
764 | const struct of_device_id *__of_match_node(const struct of_device_id *matches, | ||
765 | const struct device_node *node) | ||
766 | { | ||
767 | const struct of_device_id *m; | ||
772 | 768 | ||
769 | if (!matches) | ||
770 | return NULL; | ||
771 | |||
772 | m = of_match_compatible(matches, node); | ||
773 | if (m) | ||
774 | return m; | ||
775 | |||
776 | while (matches->name[0] || matches->type[0] || matches->compatible[0]) { | ||
777 | int match = 1; | ||
778 | if (matches->name[0]) | ||
779 | match &= node->name | ||
780 | && !strcmp(matches->name, node->name); | ||
781 | if (matches->type[0]) | ||
782 | match &= node->type | ||
783 | && !strcmp(matches->type, node->type); | ||
784 | if (matches->compatible[0]) | ||
785 | match &= __of_device_is_compatible(node, | ||
786 | matches->compatible); | ||
787 | if (match) | ||
788 | return matches; | ||
789 | matches++; | ||
790 | } | ||
773 | return NULL; | 791 | return NULL; |
774 | } | 792 | } |
775 | 793 | ||
@@ -778,10 +796,12 @@ const struct of_device_id *__of_match_node(const struct of_device_id *matches, | |||
778 | * @matches: array of of device match structures to search in | 796 | * @matches: array of of device match structures to search in |
779 | * @node: the of device structure to match against | 797 | * @node: the of device structure to match against |
780 | * | 798 | * |
781 | * Low level utility function used by device matching. Matching order | 799 | * Low level utility function used by device matching. We have two ways |
782 | * is to compare each of the node's compatibles with all given matches | 800 | * of matching: |
783 | * first. This implies node's compatible is sorted from specific to | 801 | * - Try to find the best compatible match by comparing each compatible |
784 | * generic while matches can be in any order. | 802 | * string of device node with all the given matches respectively. |
803 | * - If the above method failed, then try to match the compatible by using | ||
804 | * __of_device_is_compatible() besides the match in type and name. | ||
785 | */ | 805 | */ |
786 | const struct of_device_id *of_match_node(const struct of_device_id *matches, | 806 | const struct of_device_id *of_match_node(const struct of_device_id *matches, |
787 | const struct device_node *node) | 807 | const struct device_node *node) |
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index e2a783fdb98f..7c7a388c85ab 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -730,6 +730,17 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot) | |||
730 | return (unsigned int)sta; | 730 | return (unsigned int)sta; |
731 | } | 731 | } |
732 | 732 | ||
733 | static inline bool device_status_valid(unsigned int sta) | ||
734 | { | ||
735 | /* | ||
736 | * ACPI spec says that _STA may return bit 0 clear with bit 3 set | ||
737 | * if the device is valid but does not require a device driver to be | ||
738 | * loaded (Section 6.3.7 of ACPI 5.0A). | ||
739 | */ | ||
740 | unsigned int mask = ACPI_STA_DEVICE_ENABLED | ACPI_STA_DEVICE_FUNCTIONING; | ||
741 | return (sta & mask) == mask; | ||
742 | } | ||
743 | |||
733 | /** | 744 | /** |
734 | * trim_stale_devices - remove PCI devices that are not responding. | 745 | * trim_stale_devices - remove PCI devices that are not responding. |
735 | * @dev: PCI device to start walking the hierarchy from. | 746 | * @dev: PCI device to start walking the hierarchy from. |
@@ -745,7 +756,7 @@ static void trim_stale_devices(struct pci_dev *dev) | |||
745 | unsigned long long sta; | 756 | unsigned long long sta; |
746 | 757 | ||
747 | status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); | 758 | status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); |
748 | alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL) | 759 | alive = (ACPI_SUCCESS(status) && device_status_valid(sta)) |
749 | || acpiphp_no_hotplug(handle); | 760 | || acpiphp_no_hotplug(handle); |
750 | } | 761 | } |
751 | if (!alive) { | 762 | if (!alive) { |
@@ -792,7 +803,7 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge) | |||
792 | mutex_lock(&slot->crit_sect); | 803 | mutex_lock(&slot->crit_sect); |
793 | if (slot_no_hotplug(slot)) { | 804 | if (slot_no_hotplug(slot)) { |
794 | ; /* do nothing */ | 805 | ; /* do nothing */ |
795 | } else if (get_slot_status(slot) == ACPI_STA_ALL) { | 806 | } else if (device_status_valid(get_slot_status(slot))) { |
796 | /* remove stale devices if any */ | 807 | /* remove stale devices if any */ |
797 | list_for_each_entry_safe_reverse(dev, tmp, | 808 | list_for_each_entry_safe_reverse(dev, tmp, |
798 | &bus->devices, bus_list) | 809 | &bus->devices, bus_list) |
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 645c867c1257..5f5b0f4be5be 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c | |||
@@ -162,6 +162,9 @@ int phy_init(struct phy *phy) | |||
162 | { | 162 | { |
163 | int ret; | 163 | int ret; |
164 | 164 | ||
165 | if (!phy) | ||
166 | return 0; | ||
167 | |||
165 | ret = phy_pm_runtime_get_sync(phy); | 168 | ret = phy_pm_runtime_get_sync(phy); |
166 | if (ret < 0 && ret != -ENOTSUPP) | 169 | if (ret < 0 && ret != -ENOTSUPP) |
167 | return ret; | 170 | return ret; |
@@ -187,6 +190,9 @@ int phy_exit(struct phy *phy) | |||
187 | { | 190 | { |
188 | int ret; | 191 | int ret; |
189 | 192 | ||
193 | if (!phy) | ||
194 | return 0; | ||
195 | |||
190 | ret = phy_pm_runtime_get_sync(phy); | 196 | ret = phy_pm_runtime_get_sync(phy); |
191 | if (ret < 0 && ret != -ENOTSUPP) | 197 | if (ret < 0 && ret != -ENOTSUPP) |
192 | return ret; | 198 | return ret; |
@@ -212,6 +218,9 @@ int phy_power_on(struct phy *phy) | |||
212 | { | 218 | { |
213 | int ret; | 219 | int ret; |
214 | 220 | ||
221 | if (!phy) | ||
222 | return 0; | ||
223 | |||
215 | ret = phy_pm_runtime_get_sync(phy); | 224 | ret = phy_pm_runtime_get_sync(phy); |
216 | if (ret < 0 && ret != -ENOTSUPP) | 225 | if (ret < 0 && ret != -ENOTSUPP) |
217 | return ret; | 226 | return ret; |
@@ -240,6 +249,9 @@ int phy_power_off(struct phy *phy) | |||
240 | { | 249 | { |
241 | int ret; | 250 | int ret; |
242 | 251 | ||
252 | if (!phy) | ||
253 | return 0; | ||
254 | |||
243 | mutex_lock(&phy->mutex); | 255 | mutex_lock(&phy->mutex); |
244 | if (phy->power_count == 1 && phy->ops->power_off) { | 256 | if (phy->power_count == 1 && phy->ops->power_off) { |
245 | ret = phy->ops->power_off(phy); | 257 | ret = phy->ops->power_off(phy); |
@@ -308,7 +320,7 @@ err0: | |||
308 | */ | 320 | */ |
309 | void phy_put(struct phy *phy) | 321 | void phy_put(struct phy *phy) |
310 | { | 322 | { |
311 | if (IS_ERR(phy)) | 323 | if (!phy || IS_ERR(phy)) |
312 | return; | 324 | return; |
313 | 325 | ||
314 | module_put(phy->ops->owner); | 326 | module_put(phy->ops->owner); |
@@ -328,6 +340,9 @@ void devm_phy_put(struct device *dev, struct phy *phy) | |||
328 | { | 340 | { |
329 | int r; | 341 | int r; |
330 | 342 | ||
343 | if (!phy) | ||
344 | return; | ||
345 | |||
331 | r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy); | 346 | r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy); |
332 | dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); | 347 | dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); |
333 | } | 348 | } |
@@ -411,6 +426,27 @@ struct phy *phy_get(struct device *dev, const char *string) | |||
411 | EXPORT_SYMBOL_GPL(phy_get); | 426 | EXPORT_SYMBOL_GPL(phy_get); |
412 | 427 | ||
413 | /** | 428 | /** |
429 | * phy_optional_get() - lookup and obtain a reference to an optional phy. | ||
430 | * @dev: device that requests this phy | ||
431 | * @string: the phy name as given in the dt data or the name of the controller | ||
432 | * port for non-dt case | ||
433 | * | ||
434 | * Returns the phy driver, after getting a refcount to it; or | ||
435 | * NULL if there is no such phy. The caller is responsible for | ||
436 | * calling phy_put() to release that count. | ||
437 | */ | ||
438 | struct phy *phy_optional_get(struct device *dev, const char *string) | ||
439 | { | ||
440 | struct phy *phy = phy_get(dev, string); | ||
441 | |||
442 | if (PTR_ERR(phy) == -ENODEV) | ||
443 | phy = NULL; | ||
444 | |||
445 | return phy; | ||
446 | } | ||
447 | EXPORT_SYMBOL_GPL(phy_optional_get); | ||
448 | |||
449 | /** | ||
414 | * devm_phy_get() - lookup and obtain a reference to a phy. | 450 | * devm_phy_get() - lookup and obtain a reference to a phy. |
415 | * @dev: device that requests this phy | 451 | * @dev: device that requests this phy |
416 | * @string: the phy name as given in the dt data or phy device name | 452 | * @string: the phy name as given in the dt data or phy device name |
@@ -441,6 +477,30 @@ struct phy *devm_phy_get(struct device *dev, const char *string) | |||
441 | EXPORT_SYMBOL_GPL(devm_phy_get); | 477 | EXPORT_SYMBOL_GPL(devm_phy_get); |
442 | 478 | ||
443 | /** | 479 | /** |
480 | * devm_phy_optional_get() - lookup and obtain a reference to an optional phy. | ||
481 | * @dev: device that requests this phy | ||
482 | * @string: the phy name as given in the dt data or phy device name | ||
483 | * for non-dt case | ||
484 | * | ||
485 | * Gets the phy using phy_get(), and associates a device with it using | ||
486 | * devres. On driver detach, release function is invoked on the devres | ||
487 | * data, then, devres data is freed. This differs to devm_phy_get() in | ||
488 | * that if the phy does not exist, it is not considered an error and | ||
489 | * -ENODEV will not be returned. Instead the NULL phy is returned, | ||
490 | * which can be passed to all other phy consumer calls. | ||
491 | */ | ||
492 | struct phy *devm_phy_optional_get(struct device *dev, const char *string) | ||
493 | { | ||
494 | struct phy *phy = devm_phy_get(dev, string); | ||
495 | |||
496 | if (PTR_ERR(phy) == -ENODEV) | ||
497 | phy = NULL; | ||
498 | |||
499 | return phy; | ||
500 | } | ||
501 | EXPORT_SYMBOL_GPL(devm_phy_optional_get); | ||
502 | |||
503 | /** | ||
444 | * phy_create() - create a new phy | 504 | * phy_create() - create a new phy |
445 | * @dev: device that is creating the new phy | 505 | * @dev: device that is creating the new phy |
446 | * @ops: function pointers for performing phy operations | 506 | * @ops: function pointers for performing phy operations |
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c index 563174891c90..041f9b638d28 100644 --- a/drivers/power/ds2782_battery.c +++ b/drivers/power/ds2782_battery.c | |||
@@ -192,7 +192,7 @@ static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uV) | |||
192 | 192 | ||
193 | /* | 193 | /* |
194 | * Voltage is measured in units of 1.22mV. The voltage is stored as | 194 | * Voltage is measured in units of 1.22mV. The voltage is stored as |
195 | * a 10-bit number plus sign, in the upper bits of a 16-bit register | 195 | * a 12-bit number plus sign, in the upper bits of a 16-bit register |
196 | */ | 196 | */ |
197 | err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw); | 197 | err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw); |
198 | if (err) | 198 | if (err) |
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c index 80edb7d8cb54..0b4cf9d63291 100644 --- a/drivers/power/isp1704_charger.c +++ b/drivers/power/isp1704_charger.c | |||
@@ -444,8 +444,6 @@ static int isp1704_charger_probe(struct platform_device *pdev) | |||
444 | ret = PTR_ERR(isp->phy); | 444 | ret = PTR_ERR(isp->phy); |
445 | goto fail0; | 445 | goto fail0; |
446 | } | 446 | } |
447 | if (!isp->phy) | ||
448 | goto fail0; | ||
449 | 447 | ||
450 | isp->dev = &pdev->dev; | 448 | isp->dev = &pdev->dev; |
451 | platform_set_drvdata(pdev, isp); | 449 | platform_set_drvdata(pdev, isp); |
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c index c7ff6d67f158..0fbac861080d 100644 --- a/drivers/power/max17040_battery.c +++ b/drivers/power/max17040_battery.c | |||
@@ -148,7 +148,7 @@ static void max17040_get_online(struct i2c_client *client) | |||
148 | { | 148 | { |
149 | struct max17040_chip *chip = i2c_get_clientdata(client); | 149 | struct max17040_chip *chip = i2c_get_clientdata(client); |
150 | 150 | ||
151 | if (chip->pdata->battery_online) | 151 | if (chip->pdata && chip->pdata->battery_online) |
152 | chip->online = chip->pdata->battery_online(); | 152 | chip->online = chip->pdata->battery_online(); |
153 | else | 153 | else |
154 | chip->online = 1; | 154 | chip->online = 1; |
@@ -158,7 +158,8 @@ static void max17040_get_status(struct i2c_client *client) | |||
158 | { | 158 | { |
159 | struct max17040_chip *chip = i2c_get_clientdata(client); | 159 | struct max17040_chip *chip = i2c_get_clientdata(client); |
160 | 160 | ||
161 | if (!chip->pdata->charger_online || !chip->pdata->charger_enable) { | 161 | if (!chip->pdata || !chip->pdata->charger_online |
162 | || !chip->pdata->charger_enable) { | ||
162 | chip->status = POWER_SUPPLY_STATUS_UNKNOWN; | 163 | chip->status = POWER_SUPPLY_STATUS_UNKNOWN; |
163 | return; | 164 | return; |
164 | } | 165 | } |
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c index 7f340206d329..b14ebdad5dd2 100644 --- a/drivers/regulator/da9055-regulator.c +++ b/drivers/regulator/da9055-regulator.c | |||
@@ -576,7 +576,9 @@ static int da9055_regulator_probe(struct platform_device *pdev) | |||
576 | /* Only LDO 5 and 6 has got the over current interrupt */ | 576 | /* Only LDO 5 and 6 has got the over current interrupt */ |
577 | if (pdev->id == DA9055_ID_LDO5 || pdev->id == DA9055_ID_LDO6) { | 577 | if (pdev->id == DA9055_ID_LDO5 || pdev->id == DA9055_ID_LDO6) { |
578 | irq = platform_get_irq_byname(pdev, "REGULATOR"); | 578 | irq = platform_get_irq_byname(pdev, "REGULATOR"); |
579 | irq = regmap_irq_get_virq(da9055->irq_data, irq); | 579 | if (irq < 0) |
580 | return irq; | ||
581 | |||
580 | ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, | 582 | ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, |
581 | da9055_ldo5_6_oc_irq, | 583 | da9055_ldo5_6_oc_irq, |
582 | IRQF_TRIGGER_HIGH | | 584 | IRQF_TRIGGER_HIGH | |
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c index b1078ba3f393..186df8785a91 100644 --- a/drivers/regulator/max14577.c +++ b/drivers/regulator/max14577.c | |||
@@ -168,10 +168,11 @@ static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev) | |||
168 | MAX14577_REG_MAX); | 168 | MAX14577_REG_MAX); |
169 | if (ret < 0) { | 169 | if (ret < 0) { |
170 | dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret); | 170 | dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret); |
171 | return ret; | ||
172 | } | 171 | } |
173 | 172 | ||
174 | return 0; | 173 | of_node_put(np); |
174 | |||
175 | return ret; | ||
175 | } | 176 | } |
176 | 177 | ||
177 | static inline struct regulator_init_data *match_init_data(int index) | 178 | static inline struct regulator_init_data *match_init_data(int index) |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 9e80d61e5a3a..2eb97d7e8d12 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
@@ -2595,8 +2595,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, | |||
2595 | return -ENOMEM; | 2595 | return -ENOMEM; |
2596 | } | 2596 | } |
2597 | 2597 | ||
2598 | INIT_LIST_HEAD(&cmd->cmd_list); | ||
2599 | |||
2600 | memcpy(&cmd->atio, atio, sizeof(*atio)); | 2598 | memcpy(&cmd->atio, atio, sizeof(*atio)); |
2601 | cmd->state = QLA_TGT_STATE_NEW; | 2599 | cmd->state = QLA_TGT_STATE_NEW; |
2602 | cmd->tgt = vha->vha_tgt.qla_tgt; | 2600 | cmd->tgt = vha->vha_tgt.qla_tgt; |
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 1d10eecad499..66e755cdde57 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h | |||
@@ -855,7 +855,6 @@ struct qla_tgt_cmd { | |||
855 | uint16_t loop_id; /* to save extra sess dereferences */ | 855 | uint16_t loop_id; /* to save extra sess dereferences */ |
856 | struct qla_tgt *tgt; /* to save extra sess dereferences */ | 856 | struct qla_tgt *tgt; /* to save extra sess dereferences */ |
857 | struct scsi_qla_host *vha; | 857 | struct scsi_qla_host *vha; |
858 | struct list_head cmd_list; | ||
859 | 858 | ||
860 | struct atio_from_isp atio; | 859 | struct atio_from_isp atio; |
861 | }; | 860 | }; |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index ba9310bc9acb..581ee2a8856b 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -376,10 +376,10 @@ config SPI_PXA2XX_PCI | |||
376 | def_tristate SPI_PXA2XX && PCI | 376 | def_tristate SPI_PXA2XX && PCI |
377 | 377 | ||
378 | config SPI_RSPI | 378 | config SPI_RSPI |
379 | tristate "Renesas RSPI controller" | 379 | tristate "Renesas RSPI/QSPI controller" |
380 | depends on (SUPERH && SH_DMAE_BASE) || ARCH_SHMOBILE | 380 | depends on (SUPERH && SH_DMAE_BASE) || ARCH_SHMOBILE |
381 | help | 381 | help |
382 | SPI driver for Renesas RSPI blocks. | 382 | SPI driver for Renesas RSPI and QSPI blocks. |
383 | 383 | ||
384 | config SPI_S3C24XX | 384 | config SPI_S3C24XX |
385 | tristate "Samsung S3C24XX series SPI" | 385 | tristate "Samsung S3C24XX series SPI" |
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c index 50406306bc20..bae97ffec4b9 100644 --- a/drivers/spi/spi-nuc900.c +++ b/drivers/spi/spi-nuc900.c | |||
@@ -361,6 +361,8 @@ static int nuc900_spi_probe(struct platform_device *pdev) | |||
361 | init_completion(&hw->done); | 361 | init_completion(&hw->done); |
362 | 362 | ||
363 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | 363 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; |
364 | if (hw->pdata->lsb) | ||
365 | master->mode_bits |= SPI_LSB_FIRST; | ||
364 | master->num_chipselect = hw->pdata->num_cs; | 366 | master->num_chipselect = hw->pdata->num_cs; |
365 | master->bus_num = hw->pdata->bus_num; | 367 | master->bus_num = hw->pdata->bus_num; |
366 | hw->bitbang.master = hw->master; | 368 | hw->bitbang.master = hw->master; |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 23756b0f9036..d0b28bba38be 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -755,9 +755,7 @@ static void spi_pump_messages(struct kthread_work *work) | |||
755 | ret = master->transfer_one_message(master, master->cur_msg); | 755 | ret = master->transfer_one_message(master, master->cur_msg); |
756 | if (ret) { | 756 | if (ret) { |
757 | dev_err(&master->dev, | 757 | dev_err(&master->dev, |
758 | "failed to transfer one message from queue: %d\n", ret); | 758 | "failed to transfer one message from queue\n"); |
759 | master->cur_msg->status = ret; | ||
760 | spi_finalize_current_message(master); | ||
761 | return; | 759 | return; |
762 | } | 760 | } |
763 | } | 761 | } |
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 23948f167012..713a97226787 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c | |||
@@ -295,21 +295,29 @@ static ssize_t ashmem_read(struct file *file, char __user *buf, | |||
295 | 295 | ||
296 | /* If size is not set, or set to 0, always return EOF. */ | 296 | /* If size is not set, or set to 0, always return EOF. */ |
297 | if (asma->size == 0) | 297 | if (asma->size == 0) |
298 | goto out; | 298 | goto out_unlock; |
299 | 299 | ||
300 | if (!asma->file) { | 300 | if (!asma->file) { |
301 | ret = -EBADF; | 301 | ret = -EBADF; |
302 | goto out; | 302 | goto out_unlock; |
303 | } | 303 | } |
304 | 304 | ||
305 | ret = asma->file->f_op->read(asma->file, buf, len, pos); | 305 | mutex_unlock(&ashmem_mutex); |
306 | if (ret < 0) | ||
307 | goto out; | ||
308 | 306 | ||
309 | /** Update backing file pos, since f_ops->read() doesn't */ | 307 | /* |
310 | asma->file->f_pos = *pos; | 308 | * asma and asma->file are used outside the lock here. We assume |
309 | * once asma->file is set it will never be changed, and will not | ||
310 | * be destroyed until all references to the file are dropped and | ||
311 | * ashmem_release is called. | ||
312 | */ | ||
313 | ret = asma->file->f_op->read(asma->file, buf, len, pos); | ||
314 | if (ret >= 0) { | ||
315 | /** Update backing file pos, since f_ops->read() doesn't */ | ||
316 | asma->file->f_pos = *pos; | ||
317 | } | ||
318 | return ret; | ||
311 | 319 | ||
312 | out: | 320 | out_unlock: |
313 | mutex_unlock(&ashmem_mutex); | 321 | mutex_unlock(&ashmem_mutex); |
314 | return ret; | 322 | return ret; |
315 | } | 323 | } |
@@ -498,6 +506,7 @@ out: | |||
498 | 506 | ||
499 | static int set_name(struct ashmem_area *asma, void __user *name) | 507 | static int set_name(struct ashmem_area *asma, void __user *name) |
500 | { | 508 | { |
509 | int len; | ||
501 | int ret = 0; | 510 | int ret = 0; |
502 | char local_name[ASHMEM_NAME_LEN]; | 511 | char local_name[ASHMEM_NAME_LEN]; |
503 | 512 | ||
@@ -510,21 +519,19 @@ static int set_name(struct ashmem_area *asma, void __user *name) | |||
510 | * variable that does not need protection and later copy the local | 519 | * variable that does not need protection and later copy the local |
511 | * variable to the structure member with lock held. | 520 | * variable to the structure member with lock held. |
512 | */ | 521 | */ |
513 | if (copy_from_user(local_name, name, ASHMEM_NAME_LEN)) | 522 | len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN); |
514 | return -EFAULT; | 523 | if (len < 0) |
515 | 524 | return len; | |
525 | if (len == ASHMEM_NAME_LEN) | ||
526 | local_name[ASHMEM_NAME_LEN - 1] = '\0'; | ||
516 | mutex_lock(&ashmem_mutex); | 527 | mutex_lock(&ashmem_mutex); |
517 | /* cannot change an existing mapping's name */ | 528 | /* cannot change an existing mapping's name */ |
518 | if (unlikely(asma->file)) { | 529 | if (unlikely(asma->file)) |
519 | ret = -EINVAL; | 530 | ret = -EINVAL; |
520 | goto out; | 531 | else |
521 | } | 532 | strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); |
522 | memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, | ||
523 | local_name, ASHMEM_NAME_LEN); | ||
524 | asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0'; | ||
525 | out: | ||
526 | mutex_unlock(&ashmem_mutex); | ||
527 | 533 | ||
534 | mutex_unlock(&ashmem_mutex); | ||
528 | return ret; | 535 | return ret; |
529 | } | 536 | } |
530 | 537 | ||
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c index af6cd370b30f..ee3a7380e53b 100644 --- a/drivers/staging/android/ion/compat_ion.c +++ b/drivers/staging/android/ion/compat_ion.c | |||
@@ -35,9 +35,14 @@ struct compat_ion_custom_data { | |||
35 | compat_ulong_t arg; | 35 | compat_ulong_t arg; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | struct compat_ion_handle_data { | ||
39 | compat_int_t handle; | ||
40 | }; | ||
41 | |||
38 | #define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ | 42 | #define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ |
39 | struct compat_ion_allocation_data) | 43 | struct compat_ion_allocation_data) |
40 | #define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) | 44 | #define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \ |
45 | struct compat_ion_handle_data) | ||
41 | #define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \ | 46 | #define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \ |
42 | struct compat_ion_custom_data) | 47 | struct compat_ion_custom_data) |
43 | 48 | ||
@@ -64,6 +69,19 @@ static int compat_get_ion_allocation_data( | |||
64 | return err; | 69 | return err; |
65 | } | 70 | } |
66 | 71 | ||
72 | static int compat_get_ion_handle_data( | ||
73 | struct compat_ion_handle_data __user *data32, | ||
74 | struct ion_handle_data __user *data) | ||
75 | { | ||
76 | compat_int_t i; | ||
77 | int err; | ||
78 | |||
79 | err = get_user(i, &data32->handle); | ||
80 | err |= put_user(i, &data->handle); | ||
81 | |||
82 | return err; | ||
83 | } | ||
84 | |||
67 | static int compat_put_ion_allocation_data( | 85 | static int compat_put_ion_allocation_data( |
68 | struct compat_ion_allocation_data __user *data32, | 86 | struct compat_ion_allocation_data __user *data32, |
69 | struct ion_allocation_data __user *data) | 87 | struct ion_allocation_data __user *data) |
@@ -132,8 +150,8 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
132 | } | 150 | } |
133 | case COMPAT_ION_IOC_FREE: | 151 | case COMPAT_ION_IOC_FREE: |
134 | { | 152 | { |
135 | struct compat_ion_allocation_data __user *data32; | 153 | struct compat_ion_handle_data __user *data32; |
136 | struct ion_allocation_data __user *data; | 154 | struct ion_handle_data __user *data; |
137 | int err; | 155 | int err; |
138 | 156 | ||
139 | data32 = compat_ptr(arg); | 157 | data32 = compat_ptr(arg); |
@@ -141,7 +159,7 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
141 | if (data == NULL) | 159 | if (data == NULL) |
142 | return -EFAULT; | 160 | return -EFAULT; |
143 | 161 | ||
144 | err = compat_get_ion_allocation_data(data32, data); | 162 | err = compat_get_ion_handle_data(data32, data); |
145 | if (err) | 163 | if (err) |
146 | return err; | 164 | return err; |
147 | 165 | ||
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c index 55b2002753f2..01cdc8aee898 100644 --- a/drivers/staging/android/ion/ion_dummy_driver.c +++ b/drivers/staging/android/ion/ion_dummy_driver.c | |||
@@ -17,9 +17,11 @@ | |||
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/init.h> | ||
20 | #include <linux/bootmem.h> | 21 | #include <linux/bootmem.h> |
21 | #include <linux/memblock.h> | 22 | #include <linux/memblock.h> |
22 | #include <linux/sizes.h> | 23 | #include <linux/sizes.h> |
24 | #include <linux/io.h> | ||
23 | #include "ion.h" | 25 | #include "ion.h" |
24 | #include "ion_priv.h" | 26 | #include "ion_priv.h" |
25 | 27 | ||
@@ -57,7 +59,7 @@ struct ion_platform_heap dummy_heaps[] = { | |||
57 | }; | 59 | }; |
58 | 60 | ||
59 | struct ion_platform_data dummy_ion_pdata = { | 61 | struct ion_platform_data dummy_ion_pdata = { |
60 | .nr = 4, | 62 | .nr = ARRAY_SIZE(dummy_heaps), |
61 | .heaps = dummy_heaps, | 63 | .heaps = dummy_heaps, |
62 | }; | 64 | }; |
63 | 65 | ||
@@ -69,7 +71,7 @@ static int __init ion_dummy_init(void) | |||
69 | heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr, | 71 | heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr, |
70 | GFP_KERNEL); | 72 | GFP_KERNEL); |
71 | if (!heaps) | 73 | if (!heaps) |
72 | return PTR_ERR(heaps); | 74 | return -ENOMEM; |
73 | 75 | ||
74 | 76 | ||
75 | /* Allocate a dummy carveout heap */ | 77 | /* Allocate a dummy carveout heap */ |
@@ -128,6 +130,7 @@ err: | |||
128 | } | 130 | } |
129 | return err; | 131 | return err; |
130 | } | 132 | } |
133 | device_initcall(ion_dummy_init); | ||
131 | 134 | ||
132 | static void __exit ion_dummy_exit(void) | 135 | static void __exit ion_dummy_exit(void) |
133 | { | 136 | { |
@@ -152,7 +155,4 @@ static void __exit ion_dummy_exit(void) | |||
152 | 155 | ||
153 | return; | 156 | return; |
154 | } | 157 | } |
155 | 158 | __exitcall(ion_dummy_exit); | |
156 | module_init(ion_dummy_init); | ||
157 | module_exit(ion_dummy_exit); | ||
158 | |||
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index 296c74f98dc0..37e64d51394c 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c | |||
@@ -243,12 +243,12 @@ int ion_heap_init_deferred_free(struct ion_heap *heap) | |||
243 | init_waitqueue_head(&heap->waitqueue); | 243 | init_waitqueue_head(&heap->waitqueue); |
244 | heap->task = kthread_run(ion_heap_deferred_free, heap, | 244 | heap->task = kthread_run(ion_heap_deferred_free, heap, |
245 | "%s", heap->name); | 245 | "%s", heap->name); |
246 | sched_setscheduler(heap->task, SCHED_IDLE, ¶m); | ||
247 | if (IS_ERR(heap->task)) { | 246 | if (IS_ERR(heap->task)) { |
248 | pr_err("%s: creating thread for deferred free failed\n", | 247 | pr_err("%s: creating thread for deferred free failed\n", |
249 | __func__); | 248 | __func__); |
250 | return PTR_RET(heap->task); | 249 | return PTR_RET(heap->task); |
251 | } | 250 | } |
251 | sched_setscheduler(heap->task, SCHED_IDLE, ¶m); | ||
252 | return 0; | 252 | return 0; |
253 | } | 253 | } |
254 | 254 | ||
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h index d98673981cc4..fc2e4fccf69d 100644 --- a/drivers/staging/android/ion/ion_priv.h +++ b/drivers/staging/android/ion/ion_priv.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #ifndef _ION_PRIV_H | 17 | #ifndef _ION_PRIV_H |
18 | #define _ION_PRIV_H | 18 | #define _ION_PRIV_H |
19 | 19 | ||
20 | #include <linux/device.h> | ||
20 | #include <linux/dma-direction.h> | 21 | #include <linux/dma-direction.h> |
21 | #include <linux/kref.h> | 22 | #include <linux/kref.h> |
22 | #include <linux/mm_types.h> | 23 | #include <linux/mm_types.h> |
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index 7f0729130d65..9849f3963e75 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c | |||
@@ -124,6 +124,7 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap, | |||
124 | 124 | ||
125 | info->page = page; | 125 | info->page = page; |
126 | info->order = orders[i]; | 126 | info->order = orders[i]; |
127 | INIT_LIST_HEAD(&info->list); | ||
127 | return info; | 128 | return info; |
128 | } | 129 | } |
129 | kfree(info); | 130 | kfree(info); |
@@ -145,12 +146,15 @@ static int ion_system_heap_allocate(struct ion_heap *heap, | |||
145 | struct list_head pages; | 146 | struct list_head pages; |
146 | struct page_info *info, *tmp_info; | 147 | struct page_info *info, *tmp_info; |
147 | int i = 0; | 148 | int i = 0; |
148 | long size_remaining = PAGE_ALIGN(size); | 149 | unsigned long size_remaining = PAGE_ALIGN(size); |
149 | unsigned int max_order = orders[0]; | 150 | unsigned int max_order = orders[0]; |
150 | 151 | ||
151 | if (align > PAGE_SIZE) | 152 | if (align > PAGE_SIZE) |
152 | return -EINVAL; | 153 | return -EINVAL; |
153 | 154 | ||
155 | if (size / PAGE_SIZE > totalram_pages / 2) | ||
156 | return -ENOMEM; | ||
157 | |||
154 | INIT_LIST_HEAD(&pages); | 158 | INIT_LIST_HEAD(&pages); |
155 | while (size_remaining > 0) { | 159 | while (size_remaining > 0) { |
156 | info = alloc_largest_available(sys_heap, buffer, size_remaining, | 160 | info = alloc_largest_available(sys_heap, buffer, size_remaining, |
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h index 585040be5f18..5aaf71d6974b 100644 --- a/drivers/staging/android/sw_sync.h +++ b/drivers/staging/android/sw_sync.h | |||
@@ -35,10 +35,27 @@ struct sw_sync_pt { | |||
35 | u32 value; | 35 | u32 value; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | #if IS_ENABLED(CONFIG_SW_SYNC) | ||
38 | struct sw_sync_timeline *sw_sync_timeline_create(const char *name); | 39 | struct sw_sync_timeline *sw_sync_timeline_create(const char *name); |
39 | void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc); | 40 | void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc); |
40 | 41 | ||
41 | struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value); | 42 | struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value); |
43 | #else | ||
44 | static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name) | ||
45 | { | ||
46 | return NULL; | ||
47 | } | ||
48 | |||
49 | static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc) | ||
50 | { | ||
51 | } | ||
52 | |||
53 | static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, | ||
54 | u32 value) | ||
55 | { | ||
56 | return NULL; | ||
57 | } | ||
58 | #endif /* IS_ENABLED(CONFIG_SW_SYNC) */ | ||
42 | 59 | ||
43 | #endif /* __KERNEL __ */ | 60 | #endif /* __KERNEL __ */ |
44 | 61 | ||
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c index 38e5d3b5ed9b..3d05f662110b 100644 --- a/drivers/staging/android/sync.c +++ b/drivers/staging/android/sync.c | |||
@@ -79,27 +79,27 @@ static void sync_timeline_free(struct kref *kref) | |||
79 | container_of(kref, struct sync_timeline, kref); | 79 | container_of(kref, struct sync_timeline, kref); |
80 | unsigned long flags; | 80 | unsigned long flags; |
81 | 81 | ||
82 | if (obj->ops->release_obj) | ||
83 | obj->ops->release_obj(obj); | ||
84 | |||
85 | spin_lock_irqsave(&sync_timeline_list_lock, flags); | 82 | spin_lock_irqsave(&sync_timeline_list_lock, flags); |
86 | list_del(&obj->sync_timeline_list); | 83 | list_del(&obj->sync_timeline_list); |
87 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); | 84 | spin_unlock_irqrestore(&sync_timeline_list_lock, flags); |
88 | 85 | ||
86 | if (obj->ops->release_obj) | ||
87 | obj->ops->release_obj(obj); | ||
88 | |||
89 | kfree(obj); | 89 | kfree(obj); |
90 | } | 90 | } |
91 | 91 | ||
92 | void sync_timeline_destroy(struct sync_timeline *obj) | 92 | void sync_timeline_destroy(struct sync_timeline *obj) |
93 | { | 93 | { |
94 | obj->destroyed = true; | 94 | obj->destroyed = true; |
95 | smp_wmb(); | ||
95 | 96 | ||
96 | /* | 97 | /* |
97 | * If this is not the last reference, signal any children | 98 | * signal any children that their parent is going away. |
98 | * that their parent is going away. | ||
99 | */ | 99 | */ |
100 | sync_timeline_signal(obj); | ||
100 | 101 | ||
101 | if (!kref_put(&obj->kref, sync_timeline_free)) | 102 | kref_put(&obj->kref, sync_timeline_free); |
102 | sync_timeline_signal(obj); | ||
103 | } | 103 | } |
104 | EXPORT_SYMBOL(sync_timeline_destroy); | 104 | EXPORT_SYMBOL(sync_timeline_destroy); |
105 | 105 | ||
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index 246080316c90..5b15033a94bf 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c | |||
@@ -616,8 +616,6 @@ int comedi_auto_config(struct device *hardware_device, | |||
616 | ret = driver->auto_attach(dev, context); | 616 | ret = driver->auto_attach(dev, context); |
617 | if (ret >= 0) | 617 | if (ret >= 0) |
618 | ret = comedi_device_postconfig(dev); | 618 | ret = comedi_device_postconfig(dev); |
619 | if (ret < 0) | ||
620 | comedi_device_detach(dev); | ||
621 | mutex_unlock(&dev->mutex); | 619 | mutex_unlock(&dev->mutex); |
622 | 620 | ||
623 | if (ret < 0) { | 621 | if (ret < 0) { |
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c index 593676cf706a..d9ad2c0fdda2 100644 --- a/drivers/staging/comedi/drivers/adv_pci1710.c +++ b/drivers/staging/comedi/drivers/adv_pci1710.c | |||
@@ -494,6 +494,7 @@ static int pci171x_insn_write_ao(struct comedi_device *dev, | |||
494 | struct comedi_insn *insn, unsigned int *data) | 494 | struct comedi_insn *insn, unsigned int *data) |
495 | { | 495 | { |
496 | struct pci1710_private *devpriv = dev->private; | 496 | struct pci1710_private *devpriv = dev->private; |
497 | unsigned int val; | ||
497 | int n, chan, range, ofs; | 498 | int n, chan, range, ofs; |
498 | 499 | ||
499 | chan = CR_CHAN(insn->chanspec); | 500 | chan = CR_CHAN(insn->chanspec); |
@@ -509,11 +510,14 @@ static int pci171x_insn_write_ao(struct comedi_device *dev, | |||
509 | outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF); | 510 | outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF); |
510 | ofs = PCI171x_DA1; | 511 | ofs = PCI171x_DA1; |
511 | } | 512 | } |
513 | val = devpriv->ao_data[chan]; | ||
512 | 514 | ||
513 | for (n = 0; n < insn->n; n++) | 515 | for (n = 0; n < insn->n; n++) { |
514 | outw(data[n], dev->iobase + ofs); | 516 | val = data[n]; |
517 | outw(val, dev->iobase + ofs); | ||
518 | } | ||
515 | 519 | ||
516 | devpriv->ao_data[chan] = data[n]; | 520 | devpriv->ao_data[chan] = val; |
517 | 521 | ||
518 | return n; | 522 | return n; |
519 | 523 | ||
@@ -679,6 +683,7 @@ static int pci1720_insn_write_ao(struct comedi_device *dev, | |||
679 | struct comedi_insn *insn, unsigned int *data) | 683 | struct comedi_insn *insn, unsigned int *data) |
680 | { | 684 | { |
681 | struct pci1710_private *devpriv = dev->private; | 685 | struct pci1710_private *devpriv = dev->private; |
686 | unsigned int val; | ||
682 | int n, rangereg, chan; | 687 | int n, rangereg, chan; |
683 | 688 | ||
684 | chan = CR_CHAN(insn->chanspec); | 689 | chan = CR_CHAN(insn->chanspec); |
@@ -688,13 +693,15 @@ static int pci1720_insn_write_ao(struct comedi_device *dev, | |||
688 | outb(rangereg, dev->iobase + PCI1720_RANGE); | 693 | outb(rangereg, dev->iobase + PCI1720_RANGE); |
689 | devpriv->da_ranges = rangereg; | 694 | devpriv->da_ranges = rangereg; |
690 | } | 695 | } |
696 | val = devpriv->ao_data[chan]; | ||
691 | 697 | ||
692 | for (n = 0; n < insn->n; n++) { | 698 | for (n = 0; n < insn->n; n++) { |
693 | outw(data[n], dev->iobase + PCI1720_DA0 + (chan << 1)); | 699 | val = data[n]; |
700 | outw(val, dev->iobase + PCI1720_DA0 + (chan << 1)); | ||
694 | outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */ | 701 | outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */ |
695 | } | 702 | } |
696 | 703 | ||
697 | devpriv->ao_data[chan] = data[n]; | 704 | devpriv->ao_data[chan] = val; |
698 | 705 | ||
699 | return n; | 706 | return n; |
700 | } | 707 | } |
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c index 3beeb1254152..88c60b6020c4 100644 --- a/drivers/staging/comedi/drivers/usbduxsigma.c +++ b/drivers/staging/comedi/drivers/usbduxsigma.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/usb.h> | 48 | #include <linux/usb.h> |
49 | #include <linux/fcntl.h> | 49 | #include <linux/fcntl.h> |
50 | #include <linux/compiler.h> | 50 | #include <linux/compiler.h> |
51 | #include <asm/unaligned.h> | ||
51 | 52 | ||
52 | #include "comedi_fc.h" | 53 | #include "comedi_fc.h" |
53 | #include "../comedidev.h" | 54 | #include "../comedidev.h" |
@@ -792,7 +793,8 @@ static int usbduxsigma_ai_insn_read(struct comedi_device *dev, | |||
792 | } | 793 | } |
793 | 794 | ||
794 | /* 32 bits big endian from the A/D converter */ | 795 | /* 32 bits big endian from the A/D converter */ |
795 | val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf) + 1))); | 796 | val = be32_to_cpu(get_unaligned((uint32_t |
797 | *)(devpriv->insn_buf + 1))); | ||
796 | val &= 0x00ffffff; /* strip status byte */ | 798 | val &= 0x00ffffff; /* strip status byte */ |
797 | val ^= 0x00800000; /* convert to unsigned */ | 799 | val ^= 0x00800000; /* convert to unsigned */ |
798 | 800 | ||
@@ -1357,7 +1359,7 @@ static int usbduxsigma_getstatusinfo(struct comedi_device *dev, int chan) | |||
1357 | return ret; | 1359 | return ret; |
1358 | 1360 | ||
1359 | /* 32 bits big endian from the A/D converter */ | 1361 | /* 32 bits big endian from the A/D converter */ |
1360 | val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf)+1))); | 1362 | val = be32_to_cpu(get_unaligned((uint32_t *)(devpriv->insn_buf + 1))); |
1361 | val &= 0x00ffffff; /* strip status byte */ | 1363 | val &= 0x00ffffff; /* strip status byte */ |
1362 | val ^= 0x00800000; /* convert to unsigned */ | 1364 | val ^= 0x00800000; /* convert to unsigned */ |
1363 | 1365 | ||
diff --git a/drivers/staging/dgrp/dgrp_net_ops.c b/drivers/staging/dgrp/dgrp_net_ops.c index 1f61b89eca44..33ac7fb88cbd 100644 --- a/drivers/staging/dgrp/dgrp_net_ops.c +++ b/drivers/staging/dgrp/dgrp_net_ops.c | |||
@@ -2232,177 +2232,6 @@ done: | |||
2232 | return rtn; | 2232 | return rtn; |
2233 | } | 2233 | } |
2234 | 2234 | ||
2235 | /* | ||
2236 | * Common Packet Handling code | ||
2237 | */ | ||
2238 | |||
2239 | static void handle_data_in_packet(struct nd_struct *nd, struct ch_struct *ch, | ||
2240 | long dlen, long plen, int n1, u8 *dbuf) | ||
2241 | { | ||
2242 | char *error; | ||
2243 | long n; | ||
2244 | long remain; | ||
2245 | u8 *buf; | ||
2246 | u8 *b; | ||
2247 | |||
2248 | remain = nd->nd_remain; | ||
2249 | nd->nd_tx_work = 1; | ||
2250 | |||
2251 | /* | ||
2252 | * Otherwise data should appear only when we are | ||
2253 | * in the CS_READY state. | ||
2254 | */ | ||
2255 | |||
2256 | if (ch->ch_state < CS_READY) { | ||
2257 | error = "Data received before RWIN established"; | ||
2258 | nd->nd_remain = 0; | ||
2259 | nd->nd_state = NS_SEND_ERROR; | ||
2260 | nd->nd_error = error; | ||
2261 | } | ||
2262 | |||
2263 | /* | ||
2264 | * Assure that the data received is within the | ||
2265 | * allowable window. | ||
2266 | */ | ||
2267 | |||
2268 | n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff; | ||
2269 | |||
2270 | if (dlen > n) { | ||
2271 | error = "Receive data overrun"; | ||
2272 | nd->nd_remain = 0; | ||
2273 | nd->nd_state = NS_SEND_ERROR; | ||
2274 | nd->nd_error = error; | ||
2275 | } | ||
2276 | |||
2277 | /* | ||
2278 | * If we received 3 or less characters, | ||
2279 | * assume it is a human typing, and set RTIME | ||
2280 | * to 10 milliseconds. | ||
2281 | * | ||
2282 | * If we receive 10 or more characters, | ||
2283 | * assume its not a human typing, and set RTIME | ||
2284 | * to 100 milliseconds. | ||
2285 | */ | ||
2286 | |||
2287 | if (ch->ch_edelay != DGRP_RTIME) { | ||
2288 | if (ch->ch_rtime != ch->ch_edelay) { | ||
2289 | ch->ch_rtime = ch->ch_edelay; | ||
2290 | ch->ch_flag |= CH_PARAM; | ||
2291 | } | ||
2292 | } else if (dlen <= 3) { | ||
2293 | if (ch->ch_rtime != 10) { | ||
2294 | ch->ch_rtime = 10; | ||
2295 | ch->ch_flag |= CH_PARAM; | ||
2296 | } | ||
2297 | } else { | ||
2298 | if (ch->ch_rtime != DGRP_RTIME) { | ||
2299 | ch->ch_rtime = DGRP_RTIME; | ||
2300 | ch->ch_flag |= CH_PARAM; | ||
2301 | } | ||
2302 | } | ||
2303 | |||
2304 | /* | ||
2305 | * If a portion of the packet is outside the | ||
2306 | * buffer, shorten the effective length of the | ||
2307 | * data packet to be the amount of data received. | ||
2308 | */ | ||
2309 | |||
2310 | if (remain < plen) | ||
2311 | dlen -= plen - remain; | ||
2312 | |||
2313 | /* | ||
2314 | * Detect if receive flush is now complete. | ||
2315 | */ | ||
2316 | |||
2317 | if ((ch->ch_flag & CH_RX_FLUSH) != 0 && | ||
2318 | ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >= | ||
2319 | ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) { | ||
2320 | ch->ch_flag &= ~CH_RX_FLUSH; | ||
2321 | } | ||
2322 | |||
2323 | /* | ||
2324 | * If we are ready to receive, move the data into | ||
2325 | * the receive buffer. | ||
2326 | */ | ||
2327 | |||
2328 | ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff; | ||
2329 | |||
2330 | if (ch->ch_state == CS_READY && | ||
2331 | (ch->ch_tun.un_open_count != 0) && | ||
2332 | (ch->ch_tun.un_flag & UN_CLOSING) == 0 && | ||
2333 | (ch->ch_cflag & CF_CREAD) != 0 && | ||
2334 | (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 && | ||
2335 | (ch->ch_send & RR_RX_FLUSH) == 0) { | ||
2336 | |||
2337 | if (ch->ch_rin + dlen >= RBUF_MAX) { | ||
2338 | n = RBUF_MAX - ch->ch_rin; | ||
2339 | |||
2340 | memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n); | ||
2341 | |||
2342 | ch->ch_rin = 0; | ||
2343 | dbuf += n; | ||
2344 | dlen -= n; | ||
2345 | } | ||
2346 | |||
2347 | memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen); | ||
2348 | |||
2349 | ch->ch_rin += dlen; | ||
2350 | |||
2351 | |||
2352 | /* | ||
2353 | * If we are not in fastcook mode, or | ||
2354 | * if there is a fastcook thread | ||
2355 | * waiting for data, send the data to | ||
2356 | * the line discipline. | ||
2357 | */ | ||
2358 | |||
2359 | if ((ch->ch_flag & CH_FAST_READ) == 0 || | ||
2360 | ch->ch_inwait != 0) { | ||
2361 | dgrp_input(ch); | ||
2362 | } | ||
2363 | |||
2364 | /* | ||
2365 | * If there is a read thread waiting | ||
2366 | * in select, and we are in fastcook | ||
2367 | * mode, wake him up. | ||
2368 | */ | ||
2369 | |||
2370 | if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) && | ||
2371 | (ch->ch_flag & CH_FAST_READ) != 0) | ||
2372 | wake_up_interruptible(&ch->ch_tun.un_tty->read_wait); | ||
2373 | |||
2374 | /* | ||
2375 | * Wake any thread waiting in the | ||
2376 | * fastcook loop. | ||
2377 | */ | ||
2378 | |||
2379 | if ((ch->ch_flag & CH_INPUT) != 0) { | ||
2380 | ch->ch_flag &= ~CH_INPUT; | ||
2381 | wake_up_interruptible(&ch->ch_flag_wait); | ||
2382 | } | ||
2383 | } | ||
2384 | |||
2385 | /* | ||
2386 | * Fabricate and insert a data packet header to | ||
2387 | * preced the remaining data when it comes in. | ||
2388 | */ | ||
2389 | |||
2390 | if (remain < plen) { | ||
2391 | dlen = plen - remain; | ||
2392 | b = buf; | ||
2393 | |||
2394 | b[0] = 0x90 + n1; | ||
2395 | put_unaligned_be16(dlen, b + 1); | ||
2396 | |||
2397 | remain = 3; | ||
2398 | if (remain > 0 && b != buf) | ||
2399 | memcpy(buf, b, remain); | ||
2400 | |||
2401 | nd->nd_remain = remain; | ||
2402 | return; | ||
2403 | } | ||
2404 | } | ||
2405 | |||
2406 | /** | 2235 | /** |
2407 | * dgrp_receive() -- decode data packets received from the remote PortServer. | 2236 | * dgrp_receive() -- decode data packets received from the remote PortServer. |
2408 | * @nd: pointer to a node structure | 2237 | * @nd: pointer to a node structure |
@@ -2477,8 +2306,7 @@ static void dgrp_receive(struct nd_struct *nd) | |||
2477 | plen = dlen + 1; | 2306 | plen = dlen + 1; |
2478 | 2307 | ||
2479 | dbuf = b + 1; | 2308 | dbuf = b + 1; |
2480 | handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf); | 2309 | goto data; |
2481 | break; | ||
2482 | 2310 | ||
2483 | /* | 2311 | /* |
2484 | * Process 2-byte header data packet. | 2312 | * Process 2-byte header data packet. |
@@ -2492,8 +2320,7 @@ static void dgrp_receive(struct nd_struct *nd) | |||
2492 | plen = dlen + 2; | 2320 | plen = dlen + 2; |
2493 | 2321 | ||
2494 | dbuf = b + 2; | 2322 | dbuf = b + 2; |
2495 | handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf); | 2323 | goto data; |
2496 | break; | ||
2497 | 2324 | ||
2498 | /* | 2325 | /* |
2499 | * Process 3-byte header data packet. | 2326 | * Process 3-byte header data packet. |
@@ -2508,6 +2335,159 @@ static void dgrp_receive(struct nd_struct *nd) | |||
2508 | 2335 | ||
2509 | dbuf = b + 3; | 2336 | dbuf = b + 3; |
2510 | 2337 | ||
2338 | /* | ||
2339 | * Common packet handling code. | ||
2340 | */ | ||
2341 | |||
2342 | data: | ||
2343 | nd->nd_tx_work = 1; | ||
2344 | |||
2345 | /* | ||
2346 | * Otherwise data should appear only when we are | ||
2347 | * in the CS_READY state. | ||
2348 | */ | ||
2349 | |||
2350 | if (ch->ch_state < CS_READY) { | ||
2351 | error = "Data received before RWIN established"; | ||
2352 | goto prot_error; | ||
2353 | } | ||
2354 | |||
2355 | /* | ||
2356 | * Assure that the data received is within the | ||
2357 | * allowable window. | ||
2358 | */ | ||
2359 | |||
2360 | n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff; | ||
2361 | |||
2362 | if (dlen > n) { | ||
2363 | error = "Receive data overrun"; | ||
2364 | goto prot_error; | ||
2365 | } | ||
2366 | |||
2367 | /* | ||
2368 | * If we received 3 or less characters, | ||
2369 | * assume it is a human typing, and set RTIME | ||
2370 | * to 10 milliseconds. | ||
2371 | * | ||
2372 | * If we receive 10 or more characters, | ||
2373 | * assume its not a human typing, and set RTIME | ||
2374 | * to 100 milliseconds. | ||
2375 | */ | ||
2376 | |||
2377 | if (ch->ch_edelay != DGRP_RTIME) { | ||
2378 | if (ch->ch_rtime != ch->ch_edelay) { | ||
2379 | ch->ch_rtime = ch->ch_edelay; | ||
2380 | ch->ch_flag |= CH_PARAM; | ||
2381 | } | ||
2382 | } else if (dlen <= 3) { | ||
2383 | if (ch->ch_rtime != 10) { | ||
2384 | ch->ch_rtime = 10; | ||
2385 | ch->ch_flag |= CH_PARAM; | ||
2386 | } | ||
2387 | } else { | ||
2388 | if (ch->ch_rtime != DGRP_RTIME) { | ||
2389 | ch->ch_rtime = DGRP_RTIME; | ||
2390 | ch->ch_flag |= CH_PARAM; | ||
2391 | } | ||
2392 | } | ||
2393 | |||
2394 | /* | ||
2395 | * If a portion of the packet is outside the | ||
2396 | * buffer, shorten the effective length of the | ||
2397 | * data packet to be the amount of data received. | ||
2398 | */ | ||
2399 | |||
2400 | if (remain < plen) | ||
2401 | dlen -= plen - remain; | ||
2402 | |||
2403 | /* | ||
2404 | * Detect if receive flush is now complete. | ||
2405 | */ | ||
2406 | |||
2407 | if ((ch->ch_flag & CH_RX_FLUSH) != 0 && | ||
2408 | ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >= | ||
2409 | ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) { | ||
2410 | ch->ch_flag &= ~CH_RX_FLUSH; | ||
2411 | } | ||
2412 | |||
2413 | /* | ||
2414 | * If we are ready to receive, move the data into | ||
2415 | * the receive buffer. | ||
2416 | */ | ||
2417 | |||
2418 | ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff; | ||
2419 | |||
2420 | if (ch->ch_state == CS_READY && | ||
2421 | (ch->ch_tun.un_open_count != 0) && | ||
2422 | (ch->ch_tun.un_flag & UN_CLOSING) == 0 && | ||
2423 | (ch->ch_cflag & CF_CREAD) != 0 && | ||
2424 | (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 && | ||
2425 | (ch->ch_send & RR_RX_FLUSH) == 0) { | ||
2426 | |||
2427 | if (ch->ch_rin + dlen >= RBUF_MAX) { | ||
2428 | n = RBUF_MAX - ch->ch_rin; | ||
2429 | |||
2430 | memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n); | ||
2431 | |||
2432 | ch->ch_rin = 0; | ||
2433 | dbuf += n; | ||
2434 | dlen -= n; | ||
2435 | } | ||
2436 | |||
2437 | memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen); | ||
2438 | |||
2439 | ch->ch_rin += dlen; | ||
2440 | |||
2441 | |||
2442 | /* | ||
2443 | * If we are not in fastcook mode, or | ||
2444 | * if there is a fastcook thread | ||
2445 | * waiting for data, send the data to | ||
2446 | * the line discipline. | ||
2447 | */ | ||
2448 | |||
2449 | if ((ch->ch_flag & CH_FAST_READ) == 0 || | ||
2450 | ch->ch_inwait != 0) { | ||
2451 | dgrp_input(ch); | ||
2452 | } | ||
2453 | |||
2454 | /* | ||
2455 | * If there is a read thread waiting | ||
2456 | * in select, and we are in fastcook | ||
2457 | * mode, wake him up. | ||
2458 | */ | ||
2459 | |||
2460 | if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) && | ||
2461 | (ch->ch_flag & CH_FAST_READ) != 0) | ||
2462 | wake_up_interruptible(&ch->ch_tun.un_tty->read_wait); | ||
2463 | |||
2464 | /* | ||
2465 | * Wake any thread waiting in the | ||
2466 | * fastcook loop. | ||
2467 | */ | ||
2468 | |||
2469 | if ((ch->ch_flag & CH_INPUT) != 0) { | ||
2470 | ch->ch_flag &= ~CH_INPUT; | ||
2471 | |||
2472 | wake_up_interruptible(&ch->ch_flag_wait); | ||
2473 | } | ||
2474 | } | ||
2475 | |||
2476 | /* | ||
2477 | * Fabricate and insert a data packet header to | ||
2478 | * preced the remaining data when it comes in. | ||
2479 | */ | ||
2480 | |||
2481 | if (remain < plen) { | ||
2482 | dlen = plen - remain; | ||
2483 | b = buf; | ||
2484 | |||
2485 | b[0] = 0x90 + n1; | ||
2486 | put_unaligned_be16(dlen, b + 1); | ||
2487 | |||
2488 | remain = 3; | ||
2489 | goto done; | ||
2490 | } | ||
2511 | break; | 2491 | break; |
2512 | 2492 | ||
2513 | /* | 2493 | /* |
diff --git a/drivers/staging/gdm72xx/gdm_usb.c b/drivers/staging/gdm72xx/gdm_usb.c index f8788bf0a7d3..cdeffe75496b 100644 --- a/drivers/staging/gdm72xx/gdm_usb.c +++ b/drivers/staging/gdm72xx/gdm_usb.c | |||
@@ -635,11 +635,14 @@ static int gdm_usb_probe(struct usb_interface *intf, | |||
635 | #endif /* CONFIG_WIMAX_GDM72XX_USB_PM */ | 635 | #endif /* CONFIG_WIMAX_GDM72XX_USB_PM */ |
636 | 636 | ||
637 | ret = register_wimax_device(phy_dev, &intf->dev); | 637 | ret = register_wimax_device(phy_dev, &intf->dev); |
638 | if (ret) | ||
639 | release_usb(udev); | ||
638 | 640 | ||
639 | out: | 641 | out: |
640 | if (ret) { | 642 | if (ret) { |
641 | kfree(phy_dev); | 643 | kfree(phy_dev); |
642 | kfree(udev); | 644 | kfree(udev); |
645 | usb_put_dev(usbdev); | ||
643 | } else { | 646 | } else { |
644 | usb_set_intfdata(intf, phy_dev); | 647 | usb_set_intfdata(intf, phy_dev); |
645 | } | 648 | } |
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h index 35154d60faf6..c9fedb79e3a2 100644 --- a/drivers/staging/iio/Documentation/iio_utils.h +++ b/drivers/staging/iio/Documentation/iio_utils.h | |||
@@ -77,7 +77,6 @@ struct iio_channel_info { | |||
77 | uint64_t mask; | 77 | uint64_t mask; |
78 | unsigned be; | 78 | unsigned be; |
79 | unsigned is_signed; | 79 | unsigned is_signed; |
80 | unsigned enabled; | ||
81 | unsigned location; | 80 | unsigned location; |
82 | }; | 81 | }; |
83 | 82 | ||
@@ -335,6 +334,7 @@ inline int build_channel_array(const char *device_dir, | |||
335 | while (ent = readdir(dp), ent != NULL) { | 334 | while (ent = readdir(dp), ent != NULL) { |
336 | if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"), | 335 | if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"), |
337 | "_en") == 0) { | 336 | "_en") == 0) { |
337 | int current_enabled = 0; | ||
338 | current = &(*ci_array)[count++]; | 338 | current = &(*ci_array)[count++]; |
339 | ret = asprintf(&filename, | 339 | ret = asprintf(&filename, |
340 | "%s/%s", scan_el_dir, ent->d_name); | 340 | "%s/%s", scan_el_dir, ent->d_name); |
@@ -350,10 +350,10 @@ inline int build_channel_array(const char *device_dir, | |||
350 | ret = -errno; | 350 | ret = -errno; |
351 | goto error_cleanup_array; | 351 | goto error_cleanup_array; |
352 | } | 352 | } |
353 | fscanf(sysfsfp, "%u", ¤t->enabled); | 353 | fscanf(sysfsfp, "%u", ¤t_enabled); |
354 | fclose(sysfsfp); | 354 | fclose(sysfsfp); |
355 | 355 | ||
356 | if (!current->enabled) { | 356 | if (!current_enabled) { |
357 | free(filename); | 357 | free(filename); |
358 | count--; | 358 | count--; |
359 | continue; | 359 | continue; |
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c index 5ea36410f716..5708ffc62aec 100644 --- a/drivers/staging/iio/adc/ad799x_core.c +++ b/drivers/staging/iio/adc/ad799x_core.c | |||
@@ -393,7 +393,7 @@ static const struct iio_event_spec ad799x_events[] = { | |||
393 | }, { | 393 | }, { |
394 | .type = IIO_EV_TYPE_THRESH, | 394 | .type = IIO_EV_TYPE_THRESH, |
395 | .dir = IIO_EV_DIR_FALLING, | 395 | .dir = IIO_EV_DIR_FALLING, |
396 | .mask_separate = BIT(IIO_EV_INFO_VALUE), | 396 | .mask_separate = BIT(IIO_EV_INFO_VALUE) | |
397 | BIT(IIO_EV_INFO_ENABLE), | 397 | BIT(IIO_EV_INFO_ENABLE), |
398 | }, { | 398 | }, { |
399 | .type = IIO_EV_TYPE_THRESH, | 399 | .type = IIO_EV_TYPE_THRESH, |
@@ -409,7 +409,13 @@ static const struct iio_event_spec ad799x_events[] = { | |||
409 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ | 409 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ |
410 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ | 410 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ |
411 | .scan_index = (_index), \ | 411 | .scan_index = (_index), \ |
412 | .scan_type = IIO_ST('u', _realbits, 16, 12 - (_realbits)), \ | 412 | .scan_type = { \ |
413 | .sign = 'u', \ | ||
414 | .realbits = (_realbits), \ | ||
415 | .storagebits = 16, \ | ||
416 | .shift = 12 - (_realbits), \ | ||
417 | .endianness = IIO_BE, \ | ||
418 | }, \ | ||
413 | .event_spec = _ev_spec, \ | 419 | .event_spec = _ev_spec, \ |
414 | .num_event_specs = _num_ev_spec, \ | 420 | .num_event_specs = _num_ev_spec, \ |
415 | } | 421 | } |
@@ -588,7 +594,8 @@ static int ad799x_probe(struct i2c_client *client, | |||
588 | return 0; | 594 | return 0; |
589 | 595 | ||
590 | error_free_irq: | 596 | error_free_irq: |
591 | free_irq(client->irq, indio_dev); | 597 | if (client->irq > 0) |
598 | free_irq(client->irq, indio_dev); | ||
592 | error_cleanup_ring: | 599 | error_cleanup_ring: |
593 | ad799x_ring_cleanup(indio_dev); | 600 | ad799x_ring_cleanup(indio_dev); |
594 | error_disable_reg: | 601 | error_disable_reg: |
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c index df71669bb60e..7fc66a6a6e36 100644 --- a/drivers/staging/iio/adc/mxs-lradc.c +++ b/drivers/staging/iio/adc/mxs-lradc.c | |||
@@ -1035,8 +1035,6 @@ SHOW_SCALE_AVAILABLE_ATTR(4); | |||
1035 | SHOW_SCALE_AVAILABLE_ATTR(5); | 1035 | SHOW_SCALE_AVAILABLE_ATTR(5); |
1036 | SHOW_SCALE_AVAILABLE_ATTR(6); | 1036 | SHOW_SCALE_AVAILABLE_ATTR(6); |
1037 | SHOW_SCALE_AVAILABLE_ATTR(7); | 1037 | SHOW_SCALE_AVAILABLE_ATTR(7); |
1038 | SHOW_SCALE_AVAILABLE_ATTR(8); | ||
1039 | SHOW_SCALE_AVAILABLE_ATTR(9); | ||
1040 | SHOW_SCALE_AVAILABLE_ATTR(10); | 1038 | SHOW_SCALE_AVAILABLE_ATTR(10); |
1041 | SHOW_SCALE_AVAILABLE_ATTR(11); | 1039 | SHOW_SCALE_AVAILABLE_ATTR(11); |
1042 | SHOW_SCALE_AVAILABLE_ATTR(12); | 1040 | SHOW_SCALE_AVAILABLE_ATTR(12); |
@@ -1053,8 +1051,6 @@ static struct attribute *mxs_lradc_attributes[] = { | |||
1053 | &iio_dev_attr_in_voltage5_scale_available.dev_attr.attr, | 1051 | &iio_dev_attr_in_voltage5_scale_available.dev_attr.attr, |
1054 | &iio_dev_attr_in_voltage6_scale_available.dev_attr.attr, | 1052 | &iio_dev_attr_in_voltage6_scale_available.dev_attr.attr, |
1055 | &iio_dev_attr_in_voltage7_scale_available.dev_attr.attr, | 1053 | &iio_dev_attr_in_voltage7_scale_available.dev_attr.attr, |
1056 | &iio_dev_attr_in_voltage8_scale_available.dev_attr.attr, | ||
1057 | &iio_dev_attr_in_voltage9_scale_available.dev_attr.attr, | ||
1058 | &iio_dev_attr_in_voltage10_scale_available.dev_attr.attr, | 1054 | &iio_dev_attr_in_voltage10_scale_available.dev_attr.attr, |
1059 | &iio_dev_attr_in_voltage11_scale_available.dev_attr.attr, | 1055 | &iio_dev_attr_in_voltage11_scale_available.dev_attr.attr, |
1060 | &iio_dev_attr_in_voltage12_scale_available.dev_attr.attr, | 1056 | &iio_dev_attr_in_voltage12_scale_available.dev_attr.attr, |
@@ -1613,7 +1609,7 @@ static int mxs_lradc_probe(struct platform_device *pdev) | |||
1613 | * of the array. | 1609 | * of the array. |
1614 | */ | 1610 | */ |
1615 | scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >> | 1611 | scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >> |
1616 | (iio->channels[i].scan_type.realbits - s); | 1612 | (LRADC_RESOLUTION - s); |
1617 | lradc->scale_avail[i][s].nano = | 1613 | lradc->scale_avail[i][s].nano = |
1618 | do_div(scale_uv, 100000000) * 10; | 1614 | do_div(scale_uv, 100000000) * 10; |
1619 | lradc->scale_avail[i][s].integer = scale_uv; | 1615 | lradc->scale_avail[i][s].integer = scale_uv; |
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index 0a4298b744e6..2b96665da8a2 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c | |||
@@ -629,7 +629,7 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev) | |||
629 | struct iio_buffer *buffer; | 629 | struct iio_buffer *buffer; |
630 | 630 | ||
631 | buffer = iio_kfifo_allocate(indio_dev); | 631 | buffer = iio_kfifo_allocate(indio_dev); |
632 | if (buffer) | 632 | if (!buffer) |
633 | return -ENOMEM; | 633 | return -ENOMEM; |
634 | 634 | ||
635 | iio_device_attach_buffer(indio_dev, buffer); | 635 | iio_device_attach_buffer(indio_dev, buffer); |
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 09ef5fb8bae6..236ed66f116a 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c | |||
@@ -88,9 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm) | |||
88 | 88 | ||
89 | imx_drm_device_put(); | 89 | imx_drm_device_put(); |
90 | 90 | ||
91 | drm_vblank_cleanup(imxdrm->drm); | 91 | drm_vblank_cleanup(drm); |
92 | drm_kms_helper_poll_fini(imxdrm->drm); | 92 | drm_kms_helper_poll_fini(drm); |
93 | drm_mode_config_cleanup(imxdrm->drm); | 93 | drm_mode_config_cleanup(drm); |
94 | 94 | ||
95 | return 0; | 95 | return 0; |
96 | } | 96 | } |
@@ -142,19 +142,19 @@ EXPORT_SYMBOL_GPL(imx_drm_crtc_panel_format); | |||
142 | 142 | ||
143 | int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc) | 143 | int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc) |
144 | { | 144 | { |
145 | return drm_vblank_get(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe); | 145 | return drm_vblank_get(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe); |
146 | } | 146 | } |
147 | EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get); | 147 | EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get); |
148 | 148 | ||
149 | void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc) | 149 | void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc) |
150 | { | 150 | { |
151 | drm_vblank_put(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe); | 151 | drm_vblank_put(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe); |
152 | } | 152 | } |
153 | EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put); | 153 | EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put); |
154 | 154 | ||
155 | void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc) | 155 | void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc) |
156 | { | 156 | { |
157 | drm_handle_vblank(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe); | 157 | drm_handle_vblank(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe); |
158 | } | 158 | } |
159 | EXPORT_SYMBOL_GPL(imx_drm_handle_vblank); | 159 | EXPORT_SYMBOL_GPL(imx_drm_handle_vblank); |
160 | 160 | ||
@@ -370,29 +370,6 @@ static void imx_drm_connector_unregister( | |||
370 | } | 370 | } |
371 | 371 | ||
372 | /* | 372 | /* |
373 | * register a crtc to the drm core | ||
374 | */ | ||
375 | static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc) | ||
376 | { | ||
377 | struct imx_drm_device *imxdrm = __imx_drm_device(); | ||
378 | int ret; | ||
379 | |||
380 | ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256); | ||
381 | if (ret) | ||
382 | return ret; | ||
383 | |||
384 | drm_crtc_helper_add(imx_drm_crtc->crtc, | ||
385 | imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); | ||
386 | |||
387 | drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc, | ||
388 | imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); | ||
389 | |||
390 | drm_mode_group_reinit(imxdrm->drm); | ||
391 | |||
392 | return 0; | ||
393 | } | ||
394 | |||
395 | /* | ||
396 | * Called by the CRTC driver when all CRTCs are registered. This | 373 | * Called by the CRTC driver when all CRTCs are registered. This |
397 | * puts all the pieces together and initializes the driver. | 374 | * puts all the pieces together and initializes the driver. |
398 | * Once this is called no more CRTCs can be registered since | 375 | * Once this is called no more CRTCs can be registered since |
@@ -424,15 +401,15 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) | |||
424 | 401 | ||
425 | mutex_lock(&imxdrm->mutex); | 402 | mutex_lock(&imxdrm->mutex); |
426 | 403 | ||
427 | drm_kms_helper_poll_init(imxdrm->drm); | 404 | drm_kms_helper_poll_init(drm); |
428 | 405 | ||
429 | /* setup the grouping for the legacy output */ | 406 | /* setup the grouping for the legacy output */ |
430 | ret = drm_mode_group_init_legacy_group(imxdrm->drm, | 407 | ret = drm_mode_group_init_legacy_group(drm, |
431 | &imxdrm->drm->primary->mode_group); | 408 | &drm->primary->mode_group); |
432 | if (ret) | 409 | if (ret) |
433 | goto err_kms; | 410 | goto err_kms; |
434 | 411 | ||
435 | ret = drm_vblank_init(imxdrm->drm, MAX_CRTC); | 412 | ret = drm_vblank_init(drm, MAX_CRTC); |
436 | if (ret) | 413 | if (ret) |
437 | goto err_kms; | 414 | goto err_kms; |
438 | 415 | ||
@@ -441,7 +418,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) | |||
441 | * by drm timer once a current process gives up ownership of | 418 | * by drm timer once a current process gives up ownership of |
442 | * vblank event.(after drm_vblank_put function is called) | 419 | * vblank event.(after drm_vblank_put function is called) |
443 | */ | 420 | */ |
444 | imxdrm->drm->vblank_disable_allowed = true; | 421 | drm->vblank_disable_allowed = true; |
445 | 422 | ||
446 | if (!imx_drm_device_get()) { | 423 | if (!imx_drm_device_get()) { |
447 | ret = -EINVAL; | 424 | ret = -EINVAL; |
@@ -536,10 +513,18 @@ int imx_drm_add_crtc(struct drm_crtc *crtc, | |||
536 | 513 | ||
537 | *new_crtc = imx_drm_crtc; | 514 | *new_crtc = imx_drm_crtc; |
538 | 515 | ||
539 | ret = imx_drm_crtc_register(imx_drm_crtc); | 516 | ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256); |
540 | if (ret) | 517 | if (ret) |
541 | goto err_register; | 518 | goto err_register; |
542 | 519 | ||
520 | drm_crtc_helper_add(crtc, | ||
521 | imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); | ||
522 | |||
523 | drm_crtc_init(imxdrm->drm, crtc, | ||
524 | imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); | ||
525 | |||
526 | drm_mode_group_reinit(imxdrm->drm); | ||
527 | |||
543 | imx_drm_update_possible_crtcs(); | 528 | imx_drm_update_possible_crtcs(); |
544 | 529 | ||
545 | mutex_unlock(&imxdrm->mutex); | 530 | mutex_unlock(&imxdrm->mutex); |
diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c index f3a1f5e2e492..62ce0e86f14b 100644 --- a/drivers/staging/imx-drm/imx-hdmi.c +++ b/drivers/staging/imx-drm/imx-hdmi.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
19 | #include <linux/hdmi.h> | ||
19 | #include <linux/regmap.h> | 20 | #include <linux/regmap.h> |
20 | #include <linux/mfd/syscon.h> | 21 | #include <linux/mfd/syscon.h> |
21 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> | 22 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> |
@@ -52,11 +53,6 @@ enum hdmi_datamap { | |||
52 | YCbCr422_12B = 0x12, | 53 | YCbCr422_12B = 0x12, |
53 | }; | 54 | }; |
54 | 55 | ||
55 | enum hdmi_colorimetry { | ||
56 | ITU601, | ||
57 | ITU709, | ||
58 | }; | ||
59 | |||
60 | enum imx_hdmi_devtype { | 56 | enum imx_hdmi_devtype { |
61 | IMX6Q_HDMI, | 57 | IMX6Q_HDMI, |
62 | IMX6DL_HDMI, | 58 | IMX6DL_HDMI, |
@@ -489,12 +485,12 @@ static void imx_hdmi_update_csc_coeffs(struct imx_hdmi *hdmi) | |||
489 | 485 | ||
490 | if (is_color_space_conversion(hdmi)) { | 486 | if (is_color_space_conversion(hdmi)) { |
491 | if (hdmi->hdmi_data.enc_out_format == RGB) { | 487 | if (hdmi->hdmi_data.enc_out_format == RGB) { |
492 | if (hdmi->hdmi_data.colorimetry == ITU601) | 488 | if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601) |
493 | csc_coeff = &csc_coeff_rgb_out_eitu601; | 489 | csc_coeff = &csc_coeff_rgb_out_eitu601; |
494 | else | 490 | else |
495 | csc_coeff = &csc_coeff_rgb_out_eitu709; | 491 | csc_coeff = &csc_coeff_rgb_out_eitu709; |
496 | } else if (hdmi->hdmi_data.enc_in_format == RGB) { | 492 | } else if (hdmi->hdmi_data.enc_in_format == RGB) { |
497 | if (hdmi->hdmi_data.colorimetry == ITU601) | 493 | if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601) |
498 | csc_coeff = &csc_coeff_rgb_in_eitu601; | 494 | csc_coeff = &csc_coeff_rgb_in_eitu601; |
499 | else | 495 | else |
500 | csc_coeff = &csc_coeff_rgb_in_eitu709; | 496 | csc_coeff = &csc_coeff_rgb_in_eitu709; |
@@ -1140,16 +1136,16 @@ static void hdmi_config_AVI(struct imx_hdmi *hdmi) | |||
1140 | /* Set up colorimetry */ | 1136 | /* Set up colorimetry */ |
1141 | if (hdmi->hdmi_data.enc_out_format == XVYCC444) { | 1137 | if (hdmi->hdmi_data.enc_out_format == XVYCC444) { |
1142 | colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO; | 1138 | colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO; |
1143 | if (hdmi->hdmi_data.colorimetry == ITU601) | 1139 | if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601) |
1144 | ext_colorimetry = | 1140 | ext_colorimetry = |
1145 | HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601; | 1141 | HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601; |
1146 | else /* hdmi->hdmi_data.colorimetry == ITU709 */ | 1142 | else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/ |
1147 | ext_colorimetry = | 1143 | ext_colorimetry = |
1148 | HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709; | 1144 | HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709; |
1149 | } else if (hdmi->hdmi_data.enc_out_format != RGB) { | 1145 | } else if (hdmi->hdmi_data.enc_out_format != RGB) { |
1150 | if (hdmi->hdmi_data.colorimetry == ITU601) | 1146 | if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601) |
1151 | colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE; | 1147 | colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE; |
1152 | else /* hdmi->hdmi_data.colorimetry == ITU709 */ | 1148 | else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/ |
1153 | colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR; | 1149 | colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR; |
1154 | ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601; | 1150 | ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601; |
1155 | } else { /* Carries no data */ | 1151 | } else { /* Carries no data */ |
@@ -1379,9 +1375,9 @@ static int imx_hdmi_setup(struct imx_hdmi *hdmi, struct drm_display_mode *mode) | |||
1379 | (hdmi->vic == 21) || (hdmi->vic == 22) || | 1375 | (hdmi->vic == 21) || (hdmi->vic == 22) || |
1380 | (hdmi->vic == 2) || (hdmi->vic == 3) || | 1376 | (hdmi->vic == 2) || (hdmi->vic == 3) || |
1381 | (hdmi->vic == 17) || (hdmi->vic == 18)) | 1377 | (hdmi->vic == 17) || (hdmi->vic == 18)) |
1382 | hdmi->hdmi_data.colorimetry = ITU601; | 1378 | hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601; |
1383 | else | 1379 | else |
1384 | hdmi->hdmi_data.colorimetry = ITU709; | 1380 | hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709; |
1385 | 1381 | ||
1386 | if ((hdmi->vic == 10) || (hdmi->vic == 11) || | 1382 | if ((hdmi->vic == 10) || (hdmi->vic == 11) || |
1387 | (hdmi->vic == 12) || (hdmi->vic == 13) || | 1383 | (hdmi->vic == 12) || (hdmi->vic == 13) || |
diff --git a/drivers/staging/lustre/TODO b/drivers/staging/lustre/TODO index 22742d6d62a8..0a2b6cb3775e 100644 --- a/drivers/staging/lustre/TODO +++ b/drivers/staging/lustre/TODO | |||
@@ -9,5 +9,6 @@ | |||
9 | * Other minor misc cleanups... | 9 | * Other minor misc cleanups... |
10 | 10 | ||
11 | Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, Andreas Dilger | 11 | Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, Andreas Dilger |
12 | <andreas.dilger@intel.com> and Peng Tao <tao.peng@emc.com>. CCing | 12 | <andreas.dilger@intel.com>, Oleg Drokin <oleg.drokin@intel.com> and |
13 | hpdd-discuss <hpdd-discuss@lists.01.org> would be great too. | 13 | Peng Tao <tao.peng@emc.com>. CCing hpdd-discuss <hpdd-discuss@lists.01.org> |
14 | would be great too. | ||
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h index 596a15fc8996..037ae8a6d531 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h | |||
@@ -61,6 +61,8 @@ struct kuc_hdr { | |||
61 | __u16 kuc_msglen; /* Including header */ | 61 | __u16 kuc_msglen; /* Including header */ |
62 | } __attribute__((aligned(sizeof(__u64)))); | 62 | } __attribute__((aligned(sizeof(__u64)))); |
63 | 63 | ||
64 | #define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr)+CR_MAXSIZE) | ||
65 | |||
64 | #define KUC_MAGIC 0x191C /*Lustre9etLinC */ | 66 | #define KUC_MAGIC 0x191C /*Lustre9etLinC */ |
65 | #define KUC_FL_BLOCK 0x01 /* Wait for send */ | 67 | #define KUC_FL_BLOCK 0x01 /* Wait for send */ |
66 | 68 | ||
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h index d0d942ced01a..dddccca120c9 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h | |||
@@ -120,7 +120,7 @@ do { \ | |||
120 | do { \ | 120 | do { \ |
121 | LASSERT(!in_interrupt() || \ | 121 | LASSERT(!in_interrupt() || \ |
122 | ((size) <= LIBCFS_VMALLOC_SIZE && \ | 122 | ((size) <= LIBCFS_VMALLOC_SIZE && \ |
123 | ((mask) & GFP_ATOMIC)) != 0); \ | 123 | ((mask) & __GFP_WAIT) == 0)); \ |
124 | } while (0) | 124 | } while (0) |
125 | 125 | ||
126 | #define LIBCFS_ALLOC_POST(ptr, size) \ | 126 | #define LIBCFS_ALLOC_POST(ptr, size) \ |
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index 93648632ba26..6f58ead20393 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c | |||
@@ -529,7 +529,7 @@ kiblnd_kvaddr_to_page (unsigned long vaddr) | |||
529 | { | 529 | { |
530 | struct page *page; | 530 | struct page *page; |
531 | 531 | ||
532 | if (is_vmalloc_addr(vaddr)) { | 532 | if (is_vmalloc_addr((void *)vaddr)) { |
533 | page = vmalloc_to_page ((void *)vaddr); | 533 | page = vmalloc_to_page ((void *)vaddr); |
534 | LASSERT (page != NULL); | 534 | LASSERT (page != NULL); |
535 | return page; | 535 | return page; |
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c index 68a4f52ec998..b7b53b579c85 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c | |||
@@ -924,7 +924,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) | |||
924 | int | 924 | int |
925 | ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) | 925 | ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) |
926 | { | 926 | { |
927 | int mpflag = 0; | 927 | int mpflag = 1; |
928 | int type = lntmsg->msg_type; | 928 | int type = lntmsg->msg_type; |
929 | lnet_process_id_t target = lntmsg->msg_target; | 929 | lnet_process_id_t target = lntmsg->msg_target; |
930 | unsigned int payload_niov = lntmsg->msg_niov; | 930 | unsigned int payload_niov = lntmsg->msg_niov; |
@@ -993,8 +993,9 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) | |||
993 | 993 | ||
994 | /* The first fragment will be set later in pro_pack */ | 994 | /* The first fragment will be set later in pro_pack */ |
995 | rc = ksocknal_launch_packet(ni, tx, target); | 995 | rc = ksocknal_launch_packet(ni, tx, target); |
996 | if (lntmsg->msg_vmflush) | 996 | if (!mpflag) |
997 | cfs_memory_pressure_restore(mpflag); | 997 | cfs_memory_pressure_restore(mpflag); |
998 | |||
998 | if (rc == 0) | 999 | if (rc == 0) |
999 | return (0); | 1000 | return (0); |
1000 | 1001 | ||
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h index 6b6c0240e824..7893d83e131f 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h | |||
@@ -760,7 +760,8 @@ static inline void hsm_set_cl_error(int *flags, int error) | |||
760 | *flags |= (error << CLF_HSM_ERR_L); | 760 | *flags |= (error << CLF_HSM_ERR_L); |
761 | } | 761 | } |
762 | 762 | ||
763 | #define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + sizeof(struct changelog_rec)) | 763 | #define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + \ |
764 | sizeof(struct changelog_ext_rec)) | ||
764 | 765 | ||
765 | struct changelog_rec { | 766 | struct changelog_rec { |
766 | __u16 cr_namelen; | 767 | __u16 cr_namelen; |
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c index 22d0acc95bc5..52b7731bcc38 100644 --- a/drivers/staging/lustre/lustre/llite/dir.c +++ b/drivers/staging/lustre/lustre/llite/dir.c | |||
@@ -1086,7 +1086,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl) | |||
1086 | break; | 1086 | break; |
1087 | case Q_GETQUOTA: | 1087 | case Q_GETQUOTA: |
1088 | if (((type == USRQUOTA && | 1088 | if (((type == USRQUOTA && |
1089 | uid_eq(current_euid(), make_kuid(&init_user_ns, id))) || | 1089 | !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) || |
1090 | (type == GRPQUOTA && | 1090 | (type == GRPQUOTA && |
1091 | !in_egroup_p(make_kgid(&init_user_ns, id)))) && | 1091 | !in_egroup_p(make_kgid(&init_user_ns, id)))) && |
1092 | (!cfs_capable(CFS_CAP_SYS_ADMIN) || | 1092 | (!cfs_capable(CFS_CAP_SYS_ADMIN) || |
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c index d1ad91c34ddc..83013927e131 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_request.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c | |||
@@ -1430,7 +1430,7 @@ static struct kuc_hdr *changelog_kuc_hdr(char *buf, int len, int flags) | |||
1430 | { | 1430 | { |
1431 | struct kuc_hdr *lh = (struct kuc_hdr *)buf; | 1431 | struct kuc_hdr *lh = (struct kuc_hdr *)buf; |
1432 | 1432 | ||
1433 | LASSERT(len <= CR_MAXSIZE); | 1433 | LASSERT(len <= KUC_CHANGELOG_MSG_MAXSIZE); |
1434 | 1434 | ||
1435 | lh->kuc_magic = KUC_MAGIC; | 1435 | lh->kuc_magic = KUC_MAGIC; |
1436 | lh->kuc_transport = KUC_TRANSPORT_CHANGELOG; | 1436 | lh->kuc_transport = KUC_TRANSPORT_CHANGELOG; |
@@ -1503,7 +1503,7 @@ static int mdc_changelog_send_thread(void *csdata) | |||
1503 | CDEBUG(D_CHANGELOG, "changelog to fp=%p start "LPU64"\n", | 1503 | CDEBUG(D_CHANGELOG, "changelog to fp=%p start "LPU64"\n", |
1504 | cs->cs_fp, cs->cs_startrec); | 1504 | cs->cs_fp, cs->cs_startrec); |
1505 | 1505 | ||
1506 | OBD_ALLOC(cs->cs_buf, CR_MAXSIZE); | 1506 | OBD_ALLOC(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE); |
1507 | if (cs->cs_buf == NULL) | 1507 | if (cs->cs_buf == NULL) |
1508 | GOTO(out, rc = -ENOMEM); | 1508 | GOTO(out, rc = -ENOMEM); |
1509 | 1509 | ||
@@ -1540,7 +1540,7 @@ out: | |||
1540 | if (ctxt) | 1540 | if (ctxt) |
1541 | llog_ctxt_put(ctxt); | 1541 | llog_ctxt_put(ctxt); |
1542 | if (cs->cs_buf) | 1542 | if (cs->cs_buf) |
1543 | OBD_FREE(cs->cs_buf, CR_MAXSIZE); | 1543 | OBD_FREE(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE); |
1544 | OBD_FREE_PTR(cs); | 1544 | OBD_FREE_PTR(cs); |
1545 | return rc; | 1545 | return rc; |
1546 | } | 1546 | } |
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c index eedffed17e39..d8ea25486a33 100644 --- a/drivers/staging/netlogic/xlr_net.c +++ b/drivers/staging/netlogic/xlr_net.c | |||
@@ -892,6 +892,11 @@ static int xlr_setup_mdio(struct xlr_net_priv *priv, | |||
892 | priv->mii_bus->write = xlr_mii_write; | 892 | priv->mii_bus->write = xlr_mii_write; |
893 | priv->mii_bus->parent = &pdev->dev; | 893 | priv->mii_bus->parent = &pdev->dev; |
894 | priv->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); | 894 | priv->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); |
895 | if (priv->mii_bus->irq == NULL) { | ||
896 | pr_err("irq alloc failed\n"); | ||
897 | mdiobus_free(priv->mii_bus); | ||
898 | return -ENOMEM; | ||
899 | } | ||
895 | priv->mii_bus->irq[priv->phy_addr] = priv->ndev->irq; | 900 | priv->mii_bus->irq[priv->phy_addr] = priv->ndev->irq; |
896 | 901 | ||
897 | /* Scan only the enabled address */ | 902 | /* Scan only the enabled address */ |
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c index 47e0a91238a1..5a001d9b4252 100644 --- a/drivers/staging/octeon-usb/octeon-hcd.c +++ b/drivers/staging/octeon-usb/octeon-hcd.c | |||
@@ -275,13 +275,6 @@ enum cvmx_usb_pipe_flags { | |||
275 | */ | 275 | */ |
276 | #define MAX_TRANSFER_PACKETS ((1<<10)-1) | 276 | #define MAX_TRANSFER_PACKETS ((1<<10)-1) |
277 | 277 | ||
278 | enum { | ||
279 | USB_CLOCK_TYPE_REF_12, | ||
280 | USB_CLOCK_TYPE_REF_24, | ||
281 | USB_CLOCK_TYPE_REF_48, | ||
282 | USB_CLOCK_TYPE_CRYSTAL_12, | ||
283 | }; | ||
284 | |||
285 | /** | 278 | /** |
286 | * Logical transactions may take numerous low level | 279 | * Logical transactions may take numerous low level |
287 | * transactions, especially when splits are concerned. This | 280 | * transactions, especially when splits are concerned. This |
@@ -471,19 +464,6 @@ struct octeon_hcd { | |||
471 | /* Returns the IO address to push/pop stuff data from the FIFOs */ | 464 | /* Returns the IO address to push/pop stuff data from the FIFOs */ |
472 | #define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000) | 465 | #define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000) |
473 | 466 | ||
474 | static int octeon_usb_get_clock_type(void) | ||
475 | { | ||
476 | switch (cvmx_sysinfo_get()->board_type) { | ||
477 | case CVMX_BOARD_TYPE_BBGW_REF: | ||
478 | case CVMX_BOARD_TYPE_LANAI2_A: | ||
479 | case CVMX_BOARD_TYPE_LANAI2_U: | ||
480 | case CVMX_BOARD_TYPE_LANAI2_G: | ||
481 | case CVMX_BOARD_TYPE_UBNT_E100: | ||
482 | return USB_CLOCK_TYPE_CRYSTAL_12; | ||
483 | } | ||
484 | return USB_CLOCK_TYPE_REF_48; | ||
485 | } | ||
486 | |||
487 | /** | 467 | /** |
488 | * Read a USB 32bit CSR. It performs the necessary address swizzle | 468 | * Read a USB 32bit CSR. It performs the necessary address swizzle |
489 | * for 32bit CSRs and logs the value in a readable format if | 469 | * for 32bit CSRs and logs the value in a readable format if |
@@ -582,37 +562,6 @@ static inline int __cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe) | |||
582 | return 0; /* Data0 */ | 562 | return 0; /* Data0 */ |
583 | } | 563 | } |
584 | 564 | ||
585 | |||
586 | /** | ||
587 | * Return the number of USB ports supported by this Octeon | ||
588 | * chip. If the chip doesn't support USB, or is not supported | ||
589 | * by this API, a zero will be returned. Most Octeon chips | ||
590 | * support one usb port, but some support two ports. | ||
591 | * cvmx_usb_initialize() must be called on independent | ||
592 | * struct cvmx_usb_state. | ||
593 | * | ||
594 | * Returns: Number of port, zero if usb isn't supported | ||
595 | */ | ||
596 | static int cvmx_usb_get_num_ports(void) | ||
597 | { | ||
598 | int arch_ports = 0; | ||
599 | |||
600 | if (OCTEON_IS_MODEL(OCTEON_CN56XX)) | ||
601 | arch_ports = 1; | ||
602 | else if (OCTEON_IS_MODEL(OCTEON_CN52XX)) | ||
603 | arch_ports = 2; | ||
604 | else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) | ||
605 | arch_ports = 1; | ||
606 | else if (OCTEON_IS_MODEL(OCTEON_CN31XX)) | ||
607 | arch_ports = 1; | ||
608 | else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) | ||
609 | arch_ports = 1; | ||
610 | else | ||
611 | arch_ports = 0; | ||
612 | |||
613 | return arch_ports; | ||
614 | } | ||
615 | |||
616 | /** | 565 | /** |
617 | * Initialize a USB port for use. This must be called before any | 566 | * Initialize a USB port for use. This must be called before any |
618 | * other access to the Octeon USB port is made. The port starts | 567 | * other access to the Octeon USB port is made. The port starts |
@@ -628,41 +577,16 @@ static int cvmx_usb_get_num_ports(void) | |||
628 | * Returns: 0 or a negative error code. | 577 | * Returns: 0 or a negative error code. |
629 | */ | 578 | */ |
630 | static int cvmx_usb_initialize(struct cvmx_usb_state *usb, | 579 | static int cvmx_usb_initialize(struct cvmx_usb_state *usb, |
631 | int usb_port_number) | 580 | int usb_port_number, |
581 | enum cvmx_usb_initialize_flags flags) | ||
632 | { | 582 | { |
633 | union cvmx_usbnx_clk_ctl usbn_clk_ctl; | 583 | union cvmx_usbnx_clk_ctl usbn_clk_ctl; |
634 | union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status; | 584 | union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status; |
635 | enum cvmx_usb_initialize_flags flags = 0; | ||
636 | int i; | 585 | int i; |
637 | 586 | ||
638 | /* At first allow 0-1 for the usb port number */ | 587 | /* At first allow 0-1 for the usb port number */ |
639 | if ((usb_port_number < 0) || (usb_port_number > 1)) | 588 | if ((usb_port_number < 0) || (usb_port_number > 1)) |
640 | return -EINVAL; | 589 | return -EINVAL; |
641 | /* For all chips except 52XX there is only one port */ | ||
642 | if (!OCTEON_IS_MODEL(OCTEON_CN52XX) && (usb_port_number > 0)) | ||
643 | return -EINVAL; | ||
644 | /* Try to determine clock type automatically */ | ||
645 | if (octeon_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12) { | ||
646 | /* Only 12 MHZ crystals are supported */ | ||
647 | flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI; | ||
648 | } else { | ||
649 | flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND; | ||
650 | |||
651 | switch (octeon_usb_get_clock_type()) { | ||
652 | case USB_CLOCK_TYPE_REF_12: | ||
653 | flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ; | ||
654 | break; | ||
655 | case USB_CLOCK_TYPE_REF_24: | ||
656 | flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ; | ||
657 | break; | ||
658 | case USB_CLOCK_TYPE_REF_48: | ||
659 | flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ; | ||
660 | break; | ||
661 | default: | ||
662 | return -EINVAL; | ||
663 | break; | ||
664 | } | ||
665 | } | ||
666 | 590 | ||
667 | memset(usb, 0, sizeof(*usb)); | 591 | memset(usb, 0, sizeof(*usb)); |
668 | usb->init_flags = flags; | 592 | usb->init_flags = flags; |
@@ -3431,7 +3355,6 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
3431 | return 0; | 3355 | return 0; |
3432 | } | 3356 | } |
3433 | 3357 | ||
3434 | |||
3435 | static const struct hc_driver octeon_hc_driver = { | 3358 | static const struct hc_driver octeon_hc_driver = { |
3436 | .description = "Octeon USB", | 3359 | .description = "Octeon USB", |
3437 | .product_desc = "Octeon Host Controller", | 3360 | .product_desc = "Octeon Host Controller", |
@@ -3448,15 +3371,74 @@ static const struct hc_driver octeon_hc_driver = { | |||
3448 | .hub_control = octeon_usb_hub_control, | 3371 | .hub_control = octeon_usb_hub_control, |
3449 | }; | 3372 | }; |
3450 | 3373 | ||
3451 | 3374 | static int octeon_usb_probe(struct platform_device *pdev) | |
3452 | static int octeon_usb_driver_probe(struct device *dev) | ||
3453 | { | 3375 | { |
3454 | int status; | 3376 | int status; |
3455 | int usb_num = to_platform_device(dev)->id; | 3377 | int initialize_flags; |
3456 | int irq = platform_get_irq(to_platform_device(dev), 0); | 3378 | int usb_num; |
3379 | struct resource *res_mem; | ||
3380 | struct device_node *usbn_node; | ||
3381 | int irq = platform_get_irq(pdev, 0); | ||
3382 | struct device *dev = &pdev->dev; | ||
3457 | struct octeon_hcd *priv; | 3383 | struct octeon_hcd *priv; |
3458 | struct usb_hcd *hcd; | 3384 | struct usb_hcd *hcd; |
3459 | unsigned long flags; | 3385 | unsigned long flags; |
3386 | u32 clock_rate = 48000000; | ||
3387 | bool is_crystal_clock = false; | ||
3388 | const char *clock_type; | ||
3389 | int i; | ||
3390 | |||
3391 | if (dev->of_node == NULL) { | ||
3392 | dev_err(dev, "Error: empty of_node\n"); | ||
3393 | return -ENXIO; | ||
3394 | } | ||
3395 | usbn_node = dev->of_node->parent; | ||
3396 | |||
3397 | i = of_property_read_u32(usbn_node, | ||
3398 | "refclk-frequency", &clock_rate); | ||
3399 | if (i) { | ||
3400 | dev_err(dev, "No USBN \"refclk-frequency\"\n"); | ||
3401 | return -ENXIO; | ||
3402 | } | ||
3403 | switch (clock_rate) { | ||
3404 | case 12000000: | ||
3405 | initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ; | ||
3406 | break; | ||
3407 | case 24000000: | ||
3408 | initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ; | ||
3409 | break; | ||
3410 | case 48000000: | ||
3411 | initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ; | ||
3412 | break; | ||
3413 | default: | ||
3414 | dev_err(dev, "Illebal USBN \"refclk-frequency\" %u\n", clock_rate); | ||
3415 | return -ENXIO; | ||
3416 | |||
3417 | } | ||
3418 | |||
3419 | i = of_property_read_string(usbn_node, | ||
3420 | "refclk-type", &clock_type); | ||
3421 | |||
3422 | if (!i && strcmp("crystal", clock_type) == 0) | ||
3423 | is_crystal_clock = true; | ||
3424 | |||
3425 | if (is_crystal_clock) | ||
3426 | initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI; | ||
3427 | else | ||
3428 | initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND; | ||
3429 | |||
3430 | res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
3431 | if (res_mem == NULL) { | ||
3432 | dev_err(dev, "found no memory resource\n"); | ||
3433 | return -ENXIO; | ||
3434 | } | ||
3435 | usb_num = (res_mem->start >> 44) & 1; | ||
3436 | |||
3437 | if (irq < 0) { | ||
3438 | /* Defective device tree, but we know how to fix it. */ | ||
3439 | irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56; | ||
3440 | irq = irq_create_mapping(NULL, hwirq); | ||
3441 | } | ||
3460 | 3442 | ||
3461 | /* | 3443 | /* |
3462 | * Set the DMA mask to 64bits so we get buffers already translated for | 3444 | * Set the DMA mask to 64bits so we get buffers already translated for |
@@ -3465,6 +3447,26 @@ static int octeon_usb_driver_probe(struct device *dev) | |||
3465 | dev->coherent_dma_mask = ~0; | 3447 | dev->coherent_dma_mask = ~0; |
3466 | dev->dma_mask = &dev->coherent_dma_mask; | 3448 | dev->dma_mask = &dev->coherent_dma_mask; |
3467 | 3449 | ||
3450 | /* | ||
3451 | * Only cn52XX and cn56XX have DWC_OTG USB hardware and the | ||
3452 | * IOB priority registers. Under heavy network load USB | ||
3453 | * hardware can be starved by the IOB causing a crash. Give | ||
3454 | * it a priority boost if it has been waiting more than 400 | ||
3455 | * cycles to avoid this situation. | ||
3456 | * | ||
3457 | * Testing indicates that a cnt_val of 8192 is not sufficient, | ||
3458 | * but no failures are seen with 4096. We choose a value of | ||
3459 | * 400 to give a safety factor of 10. | ||
3460 | */ | ||
3461 | if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) { | ||
3462 | union cvmx_iob_n2c_l2c_pri_cnt pri_cnt; | ||
3463 | |||
3464 | pri_cnt.u64 = 0; | ||
3465 | pri_cnt.s.cnt_enb = 1; | ||
3466 | pri_cnt.s.cnt_val = 400; | ||
3467 | cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64); | ||
3468 | } | ||
3469 | |||
3468 | hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev)); | 3470 | hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev)); |
3469 | if (!hcd) { | 3471 | if (!hcd) { |
3470 | dev_dbg(dev, "Failed to allocate memory for HCD\n"); | 3472 | dev_dbg(dev, "Failed to allocate memory for HCD\n"); |
@@ -3478,7 +3480,7 @@ static int octeon_usb_driver_probe(struct device *dev) | |||
3478 | tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work, (unsigned long)priv); | 3480 | tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work, (unsigned long)priv); |
3479 | INIT_LIST_HEAD(&priv->dequeue_list); | 3481 | INIT_LIST_HEAD(&priv->dequeue_list); |
3480 | 3482 | ||
3481 | status = cvmx_usb_initialize(&priv->usb, usb_num); | 3483 | status = cvmx_usb_initialize(&priv->usb, usb_num, initialize_flags); |
3482 | if (status) { | 3484 | if (status) { |
3483 | dev_dbg(dev, "USB initialization failed with %d\n", status); | 3485 | dev_dbg(dev, "USB initialization failed with %d\n", status); |
3484 | kfree(hcd); | 3486 | kfree(hcd); |
@@ -3492,7 +3494,7 @@ static int octeon_usb_driver_probe(struct device *dev) | |||
3492 | cvmx_usb_poll(&priv->usb); | 3494 | cvmx_usb_poll(&priv->usb); |
3493 | spin_unlock_irqrestore(&priv->lock, flags); | 3495 | spin_unlock_irqrestore(&priv->lock, flags); |
3494 | 3496 | ||
3495 | status = usb_add_hcd(hcd, irq, IRQF_SHARED); | 3497 | status = usb_add_hcd(hcd, irq, 0); |
3496 | if (status) { | 3498 | if (status) { |
3497 | dev_dbg(dev, "USB add HCD failed with %d\n", status); | 3499 | dev_dbg(dev, "USB add HCD failed with %d\n", status); |
3498 | kfree(hcd); | 3500 | kfree(hcd); |
@@ -3500,14 +3502,15 @@ static int octeon_usb_driver_probe(struct device *dev) | |||
3500 | } | 3502 | } |
3501 | device_wakeup_enable(hcd->self.controller); | 3503 | device_wakeup_enable(hcd->self.controller); |
3502 | 3504 | ||
3503 | dev_dbg(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq); | 3505 | dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq); |
3504 | 3506 | ||
3505 | return 0; | 3507 | return 0; |
3506 | } | 3508 | } |
3507 | 3509 | ||
3508 | static int octeon_usb_driver_remove(struct device *dev) | 3510 | static int octeon_usb_remove(struct platform_device *pdev) |
3509 | { | 3511 | { |
3510 | int status; | 3512 | int status; |
3513 | struct device *dev = &pdev->dev; | ||
3511 | struct usb_hcd *hcd = dev_get_drvdata(dev); | 3514 | struct usb_hcd *hcd = dev_get_drvdata(dev); |
3512 | struct octeon_hcd *priv = hcd_to_octeon(hcd); | 3515 | struct octeon_hcd *priv = hcd_to_octeon(hcd); |
3513 | unsigned long flags; | 3516 | unsigned long flags; |
@@ -3525,85 +3528,41 @@ static int octeon_usb_driver_remove(struct device *dev) | |||
3525 | return 0; | 3528 | return 0; |
3526 | } | 3529 | } |
3527 | 3530 | ||
3528 | static struct device_driver octeon_usb_driver = { | 3531 | static struct of_device_id octeon_usb_match[] = { |
3529 | .name = "OcteonUSB", | 3532 | { |
3530 | .bus = &platform_bus_type, | 3533 | .compatible = "cavium,octeon-5750-usbc", |
3531 | .probe = octeon_usb_driver_probe, | 3534 | }, |
3532 | .remove = octeon_usb_driver_remove, | 3535 | {}, |
3533 | }; | 3536 | }; |
3534 | 3537 | ||
3538 | static struct platform_driver octeon_usb_driver = { | ||
3539 | .driver = { | ||
3540 | .name = "OcteonUSB", | ||
3541 | .owner = THIS_MODULE, | ||
3542 | .of_match_table = octeon_usb_match, | ||
3543 | }, | ||
3544 | .probe = octeon_usb_probe, | ||
3545 | .remove = octeon_usb_remove, | ||
3546 | }; | ||
3535 | 3547 | ||
3536 | #define MAX_USB_PORTS 10 | 3548 | static int __init octeon_usb_driver_init(void) |
3537 | static struct platform_device *pdev_glob[MAX_USB_PORTS]; | ||
3538 | static int octeon_usb_registered; | ||
3539 | static int __init octeon_usb_module_init(void) | ||
3540 | { | 3549 | { |
3541 | int num_devices = cvmx_usb_get_num_ports(); | 3550 | if (usb_disabled()) |
3542 | int device; | 3551 | return 0; |
3543 | |||
3544 | if (usb_disabled() || num_devices == 0) | ||
3545 | return -ENODEV; | ||
3546 | |||
3547 | if (driver_register(&octeon_usb_driver)) | ||
3548 | return -ENOMEM; | ||
3549 | |||
3550 | octeon_usb_registered = 1; | ||
3551 | |||
3552 | /* | ||
3553 | * Only cn52XX and cn56XX have DWC_OTG USB hardware and the | ||
3554 | * IOB priority registers. Under heavy network load USB | ||
3555 | * hardware can be starved by the IOB causing a crash. Give | ||
3556 | * it a priority boost if it has been waiting more than 400 | ||
3557 | * cycles to avoid this situation. | ||
3558 | * | ||
3559 | * Testing indicates that a cnt_val of 8192 is not sufficient, | ||
3560 | * but no failures are seen with 4096. We choose a value of | ||
3561 | * 400 to give a safety factor of 10. | ||
3562 | */ | ||
3563 | if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) { | ||
3564 | union cvmx_iob_n2c_l2c_pri_cnt pri_cnt; | ||
3565 | |||
3566 | pri_cnt.u64 = 0; | ||
3567 | pri_cnt.s.cnt_enb = 1; | ||
3568 | pri_cnt.s.cnt_val = 400; | ||
3569 | cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64); | ||
3570 | } | ||
3571 | |||
3572 | for (device = 0; device < num_devices; device++) { | ||
3573 | struct resource irq_resource; | ||
3574 | struct platform_device *pdev; | ||
3575 | memset(&irq_resource, 0, sizeof(irq_resource)); | ||
3576 | irq_resource.start = (device == 0) ? OCTEON_IRQ_USB0 : OCTEON_IRQ_USB1; | ||
3577 | irq_resource.end = irq_resource.start; | ||
3578 | irq_resource.flags = IORESOURCE_IRQ; | ||
3579 | pdev = platform_device_register_simple((char *)octeon_usb_driver. name, device, &irq_resource, 1); | ||
3580 | if (IS_ERR(pdev)) { | ||
3581 | driver_unregister(&octeon_usb_driver); | ||
3582 | octeon_usb_registered = 0; | ||
3583 | return PTR_ERR(pdev); | ||
3584 | } | ||
3585 | if (device < MAX_USB_PORTS) | ||
3586 | pdev_glob[device] = pdev; | ||
3587 | 3552 | ||
3588 | } | 3553 | return platform_driver_register(&octeon_usb_driver); |
3589 | return 0; | ||
3590 | } | 3554 | } |
3555 | module_init(octeon_usb_driver_init); | ||
3591 | 3556 | ||
3592 | static void __exit octeon_usb_module_cleanup(void) | 3557 | static void __exit octeon_usb_driver_exit(void) |
3593 | { | 3558 | { |
3594 | int i; | 3559 | if (usb_disabled()) |
3560 | return; | ||
3595 | 3561 | ||
3596 | for (i = 0; i < MAX_USB_PORTS; i++) | 3562 | platform_driver_unregister(&octeon_usb_driver); |
3597 | if (pdev_glob[i]) { | ||
3598 | platform_device_unregister(pdev_glob[i]); | ||
3599 | pdev_glob[i] = NULL; | ||
3600 | } | ||
3601 | if (octeon_usb_registered) | ||
3602 | driver_unregister(&octeon_usb_driver); | ||
3603 | } | 3563 | } |
3564 | module_exit(octeon_usb_driver_exit); | ||
3604 | 3565 | ||
3605 | MODULE_LICENSE("GPL"); | 3566 | MODULE_LICENSE("GPL"); |
3606 | MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>"); | 3567 | MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>"); |
3607 | MODULE_DESCRIPTION("Cavium Networks Octeon USB Host driver."); | 3568 | MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver."); |
3608 | module_init(octeon_usb_module_init); | ||
3609 | module_exit(octeon_usb_module_cleanup); | ||
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c index cb060364dfe7..5d965cf06d59 100644 --- a/drivers/staging/ozwpan/ozproto.c +++ b/drivers/staging/ozwpan/ozproto.c | |||
@@ -668,8 +668,8 @@ void oz_binding_add(const char *net_dev) | |||
668 | if (binding) { | 668 | if (binding) { |
669 | binding->ptype.type = __constant_htons(OZ_ETHERTYPE); | 669 | binding->ptype.type = __constant_htons(OZ_ETHERTYPE); |
670 | binding->ptype.func = oz_pkt_recv; | 670 | binding->ptype.func = oz_pkt_recv; |
671 | memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN); | ||
672 | if (net_dev && *net_dev) { | 671 | if (net_dev && *net_dev) { |
672 | memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN); | ||
673 | oz_dbg(ON, "Adding binding: %s\n", net_dev); | 673 | oz_dbg(ON, "Adding binding: %s\n", net_dev); |
674 | binding->ptype.dev = | 674 | binding->ptype.dev = |
675 | dev_get_by_name(&init_net, net_dev); | 675 | dev_get_by_name(&init_net, net_dev); |
@@ -680,6 +680,7 @@ void oz_binding_add(const char *net_dev) | |||
680 | } | 680 | } |
681 | } else { | 681 | } else { |
682 | oz_dbg(ON, "Binding to all netcards\n"); | 682 | oz_dbg(ON, "Binding to all netcards\n"); |
683 | memset(binding->name, 0, OZ_MAX_BINDING_LEN); | ||
683 | binding->ptype.dev = NULL; | 684 | binding->ptype.dev = NULL; |
684 | } | 685 | } |
685 | if (binding) { | 686 | if (binding) { |
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c index 153ec61493ab..96df62f95b6b 100644 --- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c +++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c | |||
@@ -912,12 +912,12 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len) | |||
912 | unsigned char *pbuf; | 912 | unsigned char *pbuf; |
913 | u32 wpa_ielen = 0; | 913 | u32 wpa_ielen = 0; |
914 | u8 *pbssid = GetAddr3Ptr(pframe); | 914 | u8 *pbssid = GetAddr3Ptr(pframe); |
915 | u32 hidden_ssid = 0; | ||
916 | struct HT_info_element *pht_info = NULL; | 915 | struct HT_info_element *pht_info = NULL; |
917 | struct rtw_ieee80211_ht_cap *pht_cap = NULL; | 916 | struct rtw_ieee80211_ht_cap *pht_cap = NULL; |
918 | u32 bcn_channel; | 917 | u32 bcn_channel; |
919 | unsigned short ht_cap_info; | 918 | unsigned short ht_cap_info; |
920 | unsigned char ht_info_infos_0; | 919 | unsigned char ht_info_infos_0; |
920 | int ssid_len; | ||
921 | 921 | ||
922 | if (is_client_associated_to_ap(Adapter) == false) | 922 | if (is_client_associated_to_ap(Adapter) == false) |
923 | return true; | 923 | return true; |
@@ -999,21 +999,15 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len) | |||
999 | } | 999 | } |
1000 | 1000 | ||
1001 | /* checking SSID */ | 1001 | /* checking SSID */ |
1002 | ssid_len = 0; | ||
1002 | p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_); | 1003 | p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_); |
1003 | if (p == NULL) { | 1004 | if (p) { |
1004 | DBG_88E("%s marc: cannot find SSID for survey event\n", __func__); | 1005 | ssid_len = *(p + 1); |
1005 | hidden_ssid = true; | 1006 | if (ssid_len > NDIS_802_11_LENGTH_SSID) |
1006 | } else { | 1007 | ssid_len = 0; |
1007 | hidden_ssid = false; | ||
1008 | } | ||
1009 | |||
1010 | if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) { | ||
1011 | memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1)); | ||
1012 | bssid->Ssid.SsidLength = *(p + 1); | ||
1013 | } else { | ||
1014 | bssid->Ssid.SsidLength = 0; | ||
1015 | bssid->Ssid.Ssid[0] = '\0'; | ||
1016 | } | 1008 | } |
1009 | memcpy(bssid->Ssid.Ssid, (p + 2), ssid_len); | ||
1010 | bssid->Ssid.SsidLength = ssid_len; | ||
1017 | 1011 | ||
1018 | RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d " | 1012 | RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d " |
1019 | "cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid, | 1013 | "cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid, |
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c index dec992569476..4ad80ae1067f 100644 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c | |||
@@ -2500,7 +2500,7 @@ static int rtw_mp_ioctl_hdl(struct net_device *dev, struct iw_request_info *info | |||
2500 | ("rtw_mp_ioctl_hdl: subcode [%d], len[%d], buffer_len[%d]\r\n", | 2500 | ("rtw_mp_ioctl_hdl: subcode [%d], len[%d], buffer_len[%d]\r\n", |
2501 | poidparam->subcode, poidparam->len, len)); | 2501 | poidparam->subcode, poidparam->len, len)); |
2502 | 2502 | ||
2503 | if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) { | 2503 | if (poidparam->subcode >= ARRAY_SIZE(mp_ioctl_hdl)) { |
2504 | RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("no matching drvext subcodes\r\n")); | 2504 | RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("no matching drvext subcodes\r\n")); |
2505 | ret = -EINVAL; | 2505 | ret = -EINVAL; |
2506 | goto _rtw_mp_ioctl_hdl_exit; | 2506 | goto _rtw_mp_ioctl_hdl_exit; |
@@ -3164,9 +3164,7 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev, | |||
3164 | u8 *p2pie; | 3164 | u8 *p2pie; |
3165 | uint p2pielen = 0, attr_contentlen = 0; | 3165 | uint p2pielen = 0, attr_contentlen = 0; |
3166 | u8 attr_content[100] = {0x00}; | 3166 | u8 attr_content[100] = {0x00}; |
3167 | 3167 | u8 go_devadd_str[17 + 12] = {}; | |
3168 | u8 go_devadd_str[17 + 10] = {0x00}; | ||
3169 | /* +10 is for the str "go_devadd =", we have to clear it at wrqu->data.pointer */ | ||
3170 | 3168 | ||
3171 | /* Commented by Albert 20121209 */ | 3169 | /* Commented by Albert 20121209 */ |
3172 | /* The input data is the GO's interface address which the application wants to know its device address. */ | 3170 | /* The input data is the GO's interface address which the application wants to know its device address. */ |
@@ -3223,12 +3221,12 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev, | |||
3223 | spin_unlock_bh(&pmlmepriv->scanned_queue.lock); | 3221 | spin_unlock_bh(&pmlmepriv->scanned_queue.lock); |
3224 | 3222 | ||
3225 | if (!blnMatch) | 3223 | if (!blnMatch) |
3226 | sprintf(go_devadd_str, "\n\ndev_add = NULL"); | 3224 | snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add = NULL"); |
3227 | else | 3225 | else |
3228 | sprintf(go_devadd_str, "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X", | 3226 | snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X", |
3229 | attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]); | 3227 | attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]); |
3230 | 3228 | ||
3231 | if (copy_to_user(wrqu->data.pointer, go_devadd_str, 10 + 17)) | 3229 | if (copy_to_user(wrqu->data.pointer, go_devadd_str, sizeof(go_devadd_str))) |
3232 | return -EFAULT; | 3230 | return -EFAULT; |
3233 | return ret; | 3231 | return ret; |
3234 | } | 3232 | } |
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index 0a341d6ec51f..a70dcef1419e 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c | |||
@@ -53,7 +53,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { | |||
53 | {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */ | 53 | {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */ |
54 | /*=== Customer ID ===*/ | 54 | /*=== Customer ID ===*/ |
55 | /****** 8188EUS ********/ | 55 | /****** 8188EUS ********/ |
56 | {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */ | 56 | {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */ |
57 | {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ | 57 | {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ |
58 | {} /* Terminating entry */ | 58 | {} /* Terminating entry */ |
59 | }; | 59 | }; |
diff --git a/drivers/staging/rtl8821ae/Kconfig b/drivers/staging/rtl8821ae/Kconfig index 2aa5dac2f1df..abccc9dabd65 100644 --- a/drivers/staging/rtl8821ae/Kconfig +++ b/drivers/staging/rtl8821ae/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config R8821AE | 1 | config R8821AE |
2 | tristate "RealTek RTL8821AE Wireless LAN NIC driver" | 2 | tristate "RealTek RTL8821AE Wireless LAN NIC driver" |
3 | depends on PCI && WLAN | 3 | depends on PCI && WLAN && MAC80211 |
4 | depends on m | 4 | depends on m |
5 | select WIRELESS_EXT | 5 | select WIRELESS_EXT |
6 | select WEXT_PRIV | 6 | select WEXT_PRIV |
diff --git a/drivers/staging/rtl8821ae/wifi.h b/drivers/staging/rtl8821ae/wifi.h index cfe88a1efd55..76bef93ad70a 100644 --- a/drivers/staging/rtl8821ae/wifi.h +++ b/drivers/staging/rtl8821ae/wifi.h | |||
@@ -1414,7 +1414,7 @@ struct rtl_dm { | |||
1414 | 1414 | ||
1415 | 1415 | ||
1416 | /*88e tx power tracking*/ | 1416 | /*88e tx power tracking*/ |
1417 | u8 bb_swing_idx_ofdm[2]; | 1417 | u8 bb_swing_idx_ofdm[MAX_RF_PATH]; |
1418 | u8 bb_swing_idx_ofdm_current; | 1418 | u8 bb_swing_idx_ofdm_current; |
1419 | u8 bb_swing_idx_ofdm_base[MAX_RF_PATH]; | 1419 | u8 bb_swing_idx_ofdm_base[MAX_RF_PATH]; |
1420 | bool bb_swing_flag_Ofdm; | 1420 | bool bb_swing_flag_Ofdm; |
diff --git a/drivers/staging/usbip/userspace/libsrc/names.c b/drivers/staging/usbip/userspace/libsrc/names.c index 3c8d28b771e0..81ff8522405c 100644 --- a/drivers/staging/usbip/userspace/libsrc/names.c +++ b/drivers/staging/usbip/userspace/libsrc/names.c | |||
@@ -169,14 +169,14 @@ static void *my_malloc(size_t size) | |||
169 | struct pool *p; | 169 | struct pool *p; |
170 | 170 | ||
171 | p = calloc(1, sizeof(struct pool)); | 171 | p = calloc(1, sizeof(struct pool)); |
172 | if (!p) { | 172 | if (!p) |
173 | free(p); | ||
174 | return NULL; | 173 | return NULL; |
175 | } | ||
176 | 174 | ||
177 | p->mem = calloc(1, size); | 175 | p->mem = calloc(1, size); |
178 | if (!p->mem) | 176 | if (!p->mem) { |
177 | free(p); | ||
179 | return NULL; | 178 | return NULL; |
179 | } | ||
180 | 180 | ||
181 | p->next = pool_head; | 181 | p->next = pool_head; |
182 | pool_head = p; | 182 | pool_head = p; |
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c index 9b51586d11d9..0141bc34d5cc 100644 --- a/drivers/staging/usbip/vhci_sysfs.c +++ b/drivers/staging/usbip/vhci_sysfs.c | |||
@@ -149,7 +149,8 @@ static int valid_args(__u32 rhport, enum usb_device_speed speed) | |||
149 | case USB_SPEED_WIRELESS: | 149 | case USB_SPEED_WIRELESS: |
150 | break; | 150 | break; |
151 | default: | 151 | default: |
152 | pr_err("speed %d\n", speed); | 152 | pr_err("Failed attach request for unsupported USB speed: %s\n", |
153 | usb_speed_string(speed)); | ||
153 | return -EINVAL; | 154 | return -EINVAL; |
154 | } | 155 | } |
155 | 156 | ||
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c index 4a1ddaf5e00f..187fc060de26 100644 --- a/drivers/staging/wlags49_h2/wl_wext.c +++ b/drivers/staging/wlags49_h2/wl_wext.c | |||
@@ -1061,7 +1061,7 @@ static int wireless_set_essid(struct net_device *dev, struct iw_request_info *in | |||
1061 | goto out; | 1061 | goto out; |
1062 | } | 1062 | } |
1063 | 1063 | ||
1064 | if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN + 1) { | 1064 | if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN) { |
1065 | ret = -EINVAL; | 1065 | ret = -EINVAL; |
1066 | goto out; | 1066 | goto out; |
1067 | } | 1067 | } |
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index e048d6439f4a..cda4d80cfaef 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c | |||
@@ -507,7 +507,9 @@ int iscsit_handle_status_snack( | |||
507 | u32 last_statsn; | 507 | u32 last_statsn; |
508 | int found_cmd; | 508 | int found_cmd; |
509 | 509 | ||
510 | if (conn->exp_statsn > begrun) { | 510 | if (!begrun) { |
511 | begrun = conn->exp_statsn; | ||
512 | } else if (conn->exp_statsn > begrun) { | ||
511 | pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:" | 513 | pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:" |
512 | " 0x%08x but already got ExpStatSN: 0x%08x on CID:" | 514 | " 0x%08x but already got ExpStatSN: 0x%08x on CID:" |
513 | " %hu.\n", begrun, runlength, conn->exp_statsn, | 515 | " %hu.\n", begrun, runlength, conn->exp_statsn, |
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 12da9b386169..c3d9df6aaf5f 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c | |||
@@ -500,7 +500,7 @@ static inline int core_alua_state_lba_dependent( | |||
500 | 500 | ||
501 | if (segment_mult) { | 501 | if (segment_mult) { |
502 | u64 tmp = lba; | 502 | u64 tmp = lba; |
503 | start_lba = sector_div(tmp, segment_size * segment_mult); | 503 | start_lba = do_div(tmp, segment_size * segment_mult); |
504 | 504 | ||
505 | last_lba = first_lba + segment_size - 1; | 505 | last_lba = first_lba + segment_size - 1; |
506 | if (start_lba >= first_lba && | 506 | if (start_lba >= first_lba && |
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 2f5d77932c80..3013287a2aaa 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c | |||
@@ -2009,7 +2009,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, | |||
2009 | struct t10_reservation *pr_tmpl = &dev->t10_pr; | 2009 | struct t10_reservation *pr_tmpl = &dev->t10_pr; |
2010 | unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; | 2010 | unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; |
2011 | sense_reason_t ret = TCM_NO_SENSE; | 2011 | sense_reason_t ret = TCM_NO_SENSE; |
2012 | int pr_holder = 0; | 2012 | int pr_holder = 0, type; |
2013 | 2013 | ||
2014 | if (!se_sess || !se_lun) { | 2014 | if (!se_sess || !se_lun) { |
2015 | pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); | 2015 | pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); |
@@ -2131,6 +2131,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, | |||
2131 | ret = TCM_RESERVATION_CONFLICT; | 2131 | ret = TCM_RESERVATION_CONFLICT; |
2132 | goto out; | 2132 | goto out; |
2133 | } | 2133 | } |
2134 | type = pr_reg->pr_res_type; | ||
2134 | 2135 | ||
2135 | spin_lock(&pr_tmpl->registration_lock); | 2136 | spin_lock(&pr_tmpl->registration_lock); |
2136 | /* | 2137 | /* |
@@ -2161,6 +2162,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, | |||
2161 | * Release the calling I_T Nexus registration now.. | 2162 | * Release the calling I_T Nexus registration now.. |
2162 | */ | 2163 | */ |
2163 | __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1); | 2164 | __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1); |
2165 | pr_reg = NULL; | ||
2164 | 2166 | ||
2165 | /* | 2167 | /* |
2166 | * From spc4r17, section 5.7.11.3 Unregistering | 2168 | * From spc4r17, section 5.7.11.3 Unregistering |
@@ -2174,8 +2176,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, | |||
2174 | * RESERVATIONS RELEASED. | 2176 | * RESERVATIONS RELEASED. |
2175 | */ | 2177 | */ |
2176 | if (pr_holder && | 2178 | if (pr_holder && |
2177 | (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY || | 2179 | (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY || |
2178 | pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) { | 2180 | type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) { |
2179 | list_for_each_entry(pr_reg_p, | 2181 | list_for_each_entry(pr_reg_p, |
2180 | &pr_tmpl->registration_list, | 2182 | &pr_tmpl->registration_list, |
2181 | pr_reg_list) { | 2183 | pr_reg_list) { |
@@ -2194,7 +2196,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, | |||
2194 | ret = core_scsi3_update_and_write_aptpl(dev, aptpl); | 2196 | ret = core_scsi3_update_and_write_aptpl(dev, aptpl); |
2195 | 2197 | ||
2196 | out: | 2198 | out: |
2197 | core_scsi3_put_pr_reg(pr_reg); | 2199 | if (pr_reg) |
2200 | core_scsi3_put_pr_reg(pr_reg); | ||
2198 | return ret; | 2201 | return ret; |
2199 | } | 2202 | } |
2200 | 2203 | ||
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index fa3cae393e13..a4489444ffbc 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c | |||
@@ -1074,12 +1074,19 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, | |||
1074 | struct scatterlist *psg; | 1074 | struct scatterlist *psg; |
1075 | void *paddr, *addr; | 1075 | void *paddr, *addr; |
1076 | unsigned int i, len, left; | 1076 | unsigned int i, len, left; |
1077 | unsigned int offset = 0; | ||
1077 | 1078 | ||
1078 | left = sectors * dev->prot_length; | 1079 | left = sectors * dev->prot_length; |
1079 | 1080 | ||
1080 | for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { | 1081 | for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { |
1081 | 1082 | ||
1082 | len = min(psg->length, left); | 1083 | len = min(psg->length, left); |
1084 | if (offset >= sg->length) { | ||
1085 | sg = sg_next(sg); | ||
1086 | offset = 0; | ||
1087 | sg_off = sg->offset; | ||
1088 | } | ||
1089 | |||
1083 | paddr = kmap_atomic(sg_page(psg)) + psg->offset; | 1090 | paddr = kmap_atomic(sg_page(psg)) + psg->offset; |
1084 | addr = kmap_atomic(sg_page(sg)) + sg_off; | 1091 | addr = kmap_atomic(sg_page(sg)) + sg_off; |
1085 | 1092 | ||
@@ -1089,6 +1096,7 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, | |||
1089 | memcpy(addr, paddr, len); | 1096 | memcpy(addr, paddr, len); |
1090 | 1097 | ||
1091 | left -= len; | 1098 | left -= len; |
1099 | offset += len; | ||
1092 | kunmap_atomic(paddr); | 1100 | kunmap_atomic(paddr); |
1093 | kunmap_atomic(addr); | 1101 | kunmap_atomic(addr); |
1094 | } | 1102 | } |
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 43c5ca9878bc..3bebc71ea033 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c | |||
@@ -440,8 +440,8 @@ check_scsi_name: | |||
440 | padding = ((-scsi_target_len) & 3); | 440 | padding = ((-scsi_target_len) & 3); |
441 | if (padding) | 441 | if (padding) |
442 | scsi_target_len += padding; | 442 | scsi_target_len += padding; |
443 | if (scsi_name_len > 256) | 443 | if (scsi_target_len > 256) |
444 | scsi_name_len = 256; | 444 | scsi_target_len = 256; |
445 | 445 | ||
446 | buf[off-1] = scsi_target_len; | 446 | buf[off-1] = scsi_target_len; |
447 | off += scsi_target_len; | 447 | off += scsi_target_len; |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index c50fd9f11aab..24b4f65d8777 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -669,9 +669,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) | |||
669 | return; | 669 | return; |
670 | } | 670 | } |
671 | 671 | ||
672 | if (!success) | ||
673 | cmd->transport_state |= CMD_T_FAILED; | ||
674 | |||
675 | /* | 672 | /* |
676 | * Check for case where an explicit ABORT_TASK has been received | 673 | * Check for case where an explicit ABORT_TASK has been received |
677 | * and transport_wait_for_tasks() will be waiting for completion.. | 674 | * and transport_wait_for_tasks() will be waiting for completion.. |
@@ -681,7 +678,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) | |||
681 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 678 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
682 | complete(&cmd->t_transport_stop_comp); | 679 | complete(&cmd->t_transport_stop_comp); |
683 | return; | 680 | return; |
684 | } else if (cmd->transport_state & CMD_T_FAILED) { | 681 | } else if (!success) { |
685 | INIT_WORK(&cmd->work, target_complete_failure_work); | 682 | INIT_WORK(&cmd->work, target_complete_failure_work); |
686 | } else { | 683 | } else { |
687 | INIT_WORK(&cmd->work, target_complete_ok_work); | 684 | INIT_WORK(&cmd->work, target_complete_ok_work); |
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c index 6496872e2e47..b01659bd4f7c 100644 --- a/drivers/tty/hvc/hvc_opal.c +++ b/drivers/tty/hvc/hvc_opal.c | |||
@@ -255,13 +255,7 @@ static int __init hvc_opal_init(void) | |||
255 | /* Register as a vio device to receive callbacks */ | 255 | /* Register as a vio device to receive callbacks */ |
256 | return platform_driver_register(&hvc_opal_driver); | 256 | return platform_driver_register(&hvc_opal_driver); |
257 | } | 257 | } |
258 | module_init(hvc_opal_init); | 258 | device_initcall(hvc_opal_init); |
259 | |||
260 | static void __exit hvc_opal_exit(void) | ||
261 | { | ||
262 | platform_driver_unregister(&hvc_opal_driver); | ||
263 | } | ||
264 | module_exit(hvc_opal_exit); | ||
265 | 259 | ||
266 | static void udbg_opal_putc(char c) | 260 | static void udbg_opal_putc(char c) |
267 | { | 261 | { |
diff --git a/drivers/tty/hvc/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c index 0069bb86ba49..08c87920b74a 100644 --- a/drivers/tty/hvc/hvc_rtas.c +++ b/drivers/tty/hvc/hvc_rtas.c | |||
@@ -102,17 +102,7 @@ static int __init hvc_rtas_init(void) | |||
102 | 102 | ||
103 | return 0; | 103 | return 0; |
104 | } | 104 | } |
105 | module_init(hvc_rtas_init); | 105 | device_initcall(hvc_rtas_init); |
106 | |||
107 | /* This will tear down the tty portion of the driver */ | ||
108 | static void __exit hvc_rtas_exit(void) | ||
109 | { | ||
110 | /* Really the fun isn't over until the worker thread breaks down and | ||
111 | * the tty cleans up */ | ||
112 | if (hvc_rtas_dev) | ||
113 | hvc_remove(hvc_rtas_dev); | ||
114 | } | ||
115 | module_exit(hvc_rtas_exit); | ||
116 | 106 | ||
117 | /* This will happen prior to module init. There is no tty at this time? */ | 107 | /* This will happen prior to module init. There is no tty at this time? */ |
118 | static int __init hvc_rtas_console_init(void) | 108 | static int __init hvc_rtas_console_init(void) |
diff --git a/drivers/tty/hvc/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c index 72228276fe31..9cf573d06a29 100644 --- a/drivers/tty/hvc/hvc_udbg.c +++ b/drivers/tty/hvc/hvc_udbg.c | |||
@@ -80,14 +80,7 @@ static int __init hvc_udbg_init(void) | |||
80 | 80 | ||
81 | return 0; | 81 | return 0; |
82 | } | 82 | } |
83 | module_init(hvc_udbg_init); | 83 | device_initcall(hvc_udbg_init); |
84 | |||
85 | static void __exit hvc_udbg_exit(void) | ||
86 | { | ||
87 | if (hvc_udbg_dev) | ||
88 | hvc_remove(hvc_udbg_dev); | ||
89 | } | ||
90 | module_exit(hvc_udbg_exit); | ||
91 | 84 | ||
92 | static int __init hvc_udbg_console_init(void) | 85 | static int __init hvc_udbg_console_init(void) |
93 | { | 86 | { |
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index 636c9baad7a5..2dc2831840ca 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c | |||
@@ -561,18 +561,7 @@ static int __init xen_hvc_init(void) | |||
561 | #endif | 561 | #endif |
562 | return r; | 562 | return r; |
563 | } | 563 | } |
564 | 564 | device_initcall(xen_hvc_init); | |
565 | static void __exit xen_hvc_fini(void) | ||
566 | { | ||
567 | struct xencons_info *entry, *next; | ||
568 | |||
569 | if (list_empty(&xenconsoles)) | ||
570 | return; | ||
571 | |||
572 | list_for_each_entry_safe(entry, next, &xenconsoles, list) { | ||
573 | xen_console_remove(entry); | ||
574 | } | ||
575 | } | ||
576 | 565 | ||
577 | static int xen_cons_init(void) | 566 | static int xen_cons_init(void) |
578 | { | 567 | { |
@@ -598,10 +587,6 @@ static int xen_cons_init(void) | |||
598 | hvc_instantiate(HVC_COOKIE, 0, ops); | 587 | hvc_instantiate(HVC_COOKIE, 0, ops); |
599 | return 0; | 588 | return 0; |
600 | } | 589 | } |
601 | |||
602 | |||
603 | module_init(xen_hvc_init); | ||
604 | module_exit(xen_hvc_fini); | ||
605 | console_initcall(xen_cons_init); | 590 | console_initcall(xen_cons_init); |
606 | 591 | ||
607 | #ifdef CONFIG_EARLY_PRINTK | 592 | #ifdef CONFIG_EARLY_PRINTK |
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index f34461c5f14e..2ebe47b78a3e 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c | |||
@@ -1090,6 +1090,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen) | |||
1090 | { | 1090 | { |
1091 | unsigned int addr = 0; | 1091 | unsigned int addr = 0; |
1092 | unsigned int modem = 0; | 1092 | unsigned int modem = 0; |
1093 | unsigned int brk = 0; | ||
1093 | struct gsm_dlci *dlci; | 1094 | struct gsm_dlci *dlci; |
1094 | int len = clen; | 1095 | int len = clen; |
1095 | u8 *dp = data; | 1096 | u8 *dp = data; |
@@ -1116,6 +1117,16 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen) | |||
1116 | if (len == 0) | 1117 | if (len == 0) |
1117 | return; | 1118 | return; |
1118 | } | 1119 | } |
1120 | len--; | ||
1121 | if (len > 0) { | ||
1122 | while (gsm_read_ea(&brk, *dp++) == 0) { | ||
1123 | len--; | ||
1124 | if (len == 0) | ||
1125 | return; | ||
1126 | } | ||
1127 | modem <<= 7; | ||
1128 | modem |= (brk & 0x7f); | ||
1129 | } | ||
1119 | tty = tty_port_tty_get(&dlci->port); | 1130 | tty = tty_port_tty_get(&dlci->port); |
1120 | gsm_process_modem(tty, dlci, modem, clen); | 1131 | gsm_process_modem(tty, dlci, modem, clen); |
1121 | if (tty) { | 1132 | if (tty) { |
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index cb8017aa4434..d15624c1b751 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c | |||
@@ -817,8 +817,7 @@ static void process_echoes(struct tty_struct *tty) | |||
817 | struct n_tty_data *ldata = tty->disc_data; | 817 | struct n_tty_data *ldata = tty->disc_data; |
818 | size_t echoed; | 818 | size_t echoed; |
819 | 819 | ||
820 | if ((!L_ECHO(tty) && !L_ECHONL(tty)) || | 820 | if (ldata->echo_mark == ldata->echo_tail) |
821 | ldata->echo_mark == ldata->echo_tail) | ||
822 | return; | 821 | return; |
823 | 822 | ||
824 | mutex_lock(&ldata->output_lock); | 823 | mutex_lock(&ldata->output_lock); |
@@ -1244,7 +1243,8 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c) | |||
1244 | if (L_ECHO(tty)) { | 1243 | if (L_ECHO(tty)) { |
1245 | echo_char(c, tty); | 1244 | echo_char(c, tty); |
1246 | commit_echoes(tty); | 1245 | commit_echoes(tty); |
1247 | } | 1246 | } else |
1247 | process_echoes(tty); | ||
1248 | isig(signal, tty); | 1248 | isig(signal, tty); |
1249 | return; | 1249 | return; |
1250 | } | 1250 | } |
@@ -1274,7 +1274,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c) | |||
1274 | if (I_IXON(tty)) { | 1274 | if (I_IXON(tty)) { |
1275 | if (c == START_CHAR(tty)) { | 1275 | if (c == START_CHAR(tty)) { |
1276 | start_tty(tty); | 1276 | start_tty(tty); |
1277 | commit_echoes(tty); | 1277 | process_echoes(tty); |
1278 | return 0; | 1278 | return 0; |
1279 | } | 1279 | } |
1280 | if (c == STOP_CHAR(tty)) { | 1280 | if (c == STOP_CHAR(tty)) { |
@@ -1820,8 +1820,10 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) | |||
1820 | * Fix tty hang when I_IXON(tty) is cleared, but the tty | 1820 | * Fix tty hang when I_IXON(tty) is cleared, but the tty |
1821 | * been stopped by STOP_CHAR(tty) before it. | 1821 | * been stopped by STOP_CHAR(tty) before it. |
1822 | */ | 1822 | */ |
1823 | if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) | 1823 | if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) { |
1824 | start_tty(tty); | 1824 | start_tty(tty); |
1825 | process_echoes(tty); | ||
1826 | } | ||
1825 | 1827 | ||
1826 | /* The termios change make the tty ready for I/O */ | 1828 | /* The termios change make the tty ready for I/O */ |
1827 | if (waitqueue_active(&tty->write_wait)) | 1829 | if (waitqueue_active(&tty->write_wait)) |
@@ -1896,7 +1898,7 @@ err: | |||
1896 | static inline int input_available_p(struct tty_struct *tty, int poll) | 1898 | static inline int input_available_p(struct tty_struct *tty, int poll) |
1897 | { | 1899 | { |
1898 | struct n_tty_data *ldata = tty->disc_data; | 1900 | struct n_tty_data *ldata = tty->disc_data; |
1899 | int amt = poll && !TIME_CHAR(tty) ? MIN_CHAR(tty) : 1; | 1901 | int amt = poll && !TIME_CHAR(tty) && MIN_CHAR(tty) ? MIN_CHAR(tty) : 1; |
1900 | 1902 | ||
1901 | if (ldata->icanon && !L_EXTPROC(tty)) { | 1903 | if (ldata->icanon && !L_EXTPROC(tty)) { |
1902 | if (ldata->canon_head != ldata->read_tail) | 1904 | if (ldata->canon_head != ldata->read_tail) |
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 61ecd709a722..69932b7556cf 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c | |||
@@ -2433,6 +2433,24 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, | |||
2433 | serial_dl_write(up, quot); | 2433 | serial_dl_write(up, quot); |
2434 | 2434 | ||
2435 | /* | 2435 | /* |
2436 | * XR17V35x UARTs have an extra fractional divisor register (DLD) | ||
2437 | * | ||
2438 | * We need to recalculate all of the registers, because DLM and DLL | ||
2439 | * are already rounded to a whole integer. | ||
2440 | * | ||
2441 | * When recalculating we use a 32x clock instead of a 16x clock to | ||
2442 | * allow 1-bit for rounding in the fractional part. | ||
2443 | */ | ||
2444 | if (up->port.type == PORT_XR17V35X) { | ||
2445 | unsigned int baud_x32 = (port->uartclk * 2) / baud; | ||
2446 | u16 quot = baud_x32 / 32; | ||
2447 | u8 quot_frac = DIV_ROUND_CLOSEST(baud_x32 % 32, 2); | ||
2448 | |||
2449 | serial_dl_write(up, quot); | ||
2450 | serial_port_out(port, 0x2, quot_frac & 0xf); | ||
2451 | } | ||
2452 | |||
2453 | /* | ||
2436 | * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR | 2454 | * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR |
2437 | * is written without DLAB set, this mode will be disabled. | 2455 | * is written without DLAB set, this mode will be disabled. |
2438 | */ | 2456 | */ |
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index faa64e646100..ed3113576740 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c | |||
@@ -391,7 +391,7 @@ static int dw8250_remove(struct platform_device *pdev) | |||
391 | return 0; | 391 | return 0; |
392 | } | 392 | } |
393 | 393 | ||
394 | #ifdef CONFIG_PM | 394 | #ifdef CONFIG_PM_SLEEP |
395 | static int dw8250_suspend(struct device *dev) | 395 | static int dw8250_suspend(struct device *dev) |
396 | { | 396 | { |
397 | struct dw8250_data *data = dev_get_drvdata(dev); | 397 | struct dw8250_data *data = dev_get_drvdata(dev); |
@@ -409,7 +409,7 @@ static int dw8250_resume(struct device *dev) | |||
409 | 409 | ||
410 | return 0; | 410 | return 0; |
411 | } | 411 | } |
412 | #endif /* CONFIG_PM */ | 412 | #endif /* CONFIG_PM_SLEEP */ |
413 | 413 | ||
414 | #ifdef CONFIG_PM_RUNTIME | 414 | #ifdef CONFIG_PM_RUNTIME |
415 | static int dw8250_runtime_suspend(struct device *dev) | 415 | static int dw8250_runtime_suspend(struct device *dev) |
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 50228eed3b6f..0ff3e3624d4c 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c | |||
@@ -783,7 +783,8 @@ static int pci_netmos_9900_setup(struct serial_private *priv, | |||
783 | { | 783 | { |
784 | unsigned int bar; | 784 | unsigned int bar; |
785 | 785 | ||
786 | if ((priv->dev->subsystem_device & 0xff00) == 0x3000) { | 786 | if ((priv->dev->device != PCI_DEVICE_ID_NETMOS_9865) && |
787 | (priv->dev->subsystem_device & 0xff00) == 0x3000) { | ||
787 | /* netmos apparently orders BARs by datasheet layout, so serial | 788 | /* netmos apparently orders BARs by datasheet layout, so serial |
788 | * ports get BARs 0 and 3 (or 1 and 4 for memmapped) | 789 | * ports get BARs 0 and 3 (or 1 and 4 for memmapped) |
789 | */ | 790 | */ |
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index fa511ebab67c..77f035158d6c 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c | |||
@@ -738,9 +738,6 @@ static int serial_omap_startup(struct uart_port *port) | |||
738 | return retval; | 738 | return retval; |
739 | } | 739 | } |
740 | disable_irq(up->wakeirq); | 740 | disable_irq(up->wakeirq); |
741 | } else { | ||
742 | dev_info(up->port.dev, "no wakeirq for uart%d\n", | ||
743 | up->port.line); | ||
744 | } | 741 | } |
745 | 742 | ||
746 | dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line); | 743 | dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line); |
@@ -1604,8 +1601,11 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up, | |||
1604 | flags & SER_RS485_RTS_AFTER_SEND); | 1601 | flags & SER_RS485_RTS_AFTER_SEND); |
1605 | if (ret < 0) | 1602 | if (ret < 0) |
1606 | return ret; | 1603 | return ret; |
1607 | } else | 1604 | } else if (up->rts_gpio == -EPROBE_DEFER) { |
1605 | return -EPROBE_DEFER; | ||
1606 | } else { | ||
1608 | up->rts_gpio = -EINVAL; | 1607 | up->rts_gpio = -EINVAL; |
1608 | } | ||
1609 | 1609 | ||
1610 | if (of_property_read_u32_array(np, "rs485-rts-delay", | 1610 | if (of_property_read_u32_array(np, "rs485-rts-delay", |
1611 | rs485_delay, 2) == 0) { | 1611 | rs485_delay, 2) == 0) { |
@@ -1687,6 +1687,9 @@ static int serial_omap_probe(struct platform_device *pdev) | |||
1687 | up->port.iotype = UPIO_MEM; | 1687 | up->port.iotype = UPIO_MEM; |
1688 | up->port.irq = uartirq; | 1688 | up->port.irq = uartirq; |
1689 | up->wakeirq = wakeirq; | 1689 | up->wakeirq = wakeirq; |
1690 | if (!up->wakeirq) | ||
1691 | dev_info(up->port.dev, "no wakeirq for uart%d\n", | ||
1692 | up->port.line); | ||
1690 | 1693 | ||
1691 | up->port.regshift = 2; | 1694 | up->port.regshift = 2; |
1692 | up->port.fifosize = 64; | 1695 | up->port.fifosize = 64; |
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c index 49a2ffd101a7..b7bfe24d4ebc 100644 --- a/drivers/tty/serial/sirfsoc_uart.c +++ b/drivers/tty/serial/sirfsoc_uart.c | |||
@@ -542,8 +542,10 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param) | |||
542 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, | 542 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, |
543 | rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | | 543 | rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | |
544 | SIRFUART_IO_MODE); | 544 | SIRFUART_IO_MODE); |
545 | sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count); | ||
546 | spin_unlock_irqrestore(&sirfport->rx_lock, flags); | 545 | spin_unlock_irqrestore(&sirfport->rx_lock, flags); |
546 | spin_lock(&port->lock); | ||
547 | sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count); | ||
548 | spin_unlock(&port->lock); | ||
547 | if (sirfport->rx_io_count == 4) { | 549 | if (sirfport->rx_io_count == 4) { |
548 | spin_lock_irqsave(&sirfport->rx_lock, flags); | 550 | spin_lock_irqsave(&sirfport->rx_lock, flags); |
549 | sirfport->rx_io_count = 0; | 551 | sirfport->rx_io_count = 0; |
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index c74a00ad7add..bd2715a9d8e5 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
@@ -1267,16 +1267,17 @@ static void pty_line_name(struct tty_driver *driver, int index, char *p) | |||
1267 | * @p: output buffer of at least 7 bytes | 1267 | * @p: output buffer of at least 7 bytes |
1268 | * | 1268 | * |
1269 | * Generate a name from a driver reference and write it to the output | 1269 | * Generate a name from a driver reference and write it to the output |
1270 | * buffer. | 1270 | * buffer. Return the number of bytes written. |
1271 | * | 1271 | * |
1272 | * Locking: None | 1272 | * Locking: None |
1273 | */ | 1273 | */ |
1274 | static void tty_line_name(struct tty_driver *driver, int index, char *p) | 1274 | static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p) |
1275 | { | 1275 | { |
1276 | if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE) | 1276 | if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE) |
1277 | strcpy(p, driver->name); | 1277 | return sprintf(p, "%s", driver->name); |
1278 | else | 1278 | else |
1279 | sprintf(p, "%s%d", driver->name, index + driver->name_base); | 1279 | return sprintf(p, "%s%d", driver->name, |
1280 | index + driver->name_base); | ||
1280 | } | 1281 | } |
1281 | 1282 | ||
1282 | /** | 1283 | /** |
@@ -3545,9 +3546,19 @@ static ssize_t show_cons_active(struct device *dev, | |||
3545 | if (i >= ARRAY_SIZE(cs)) | 3546 | if (i >= ARRAY_SIZE(cs)) |
3546 | break; | 3547 | break; |
3547 | } | 3548 | } |
3548 | while (i--) | 3549 | while (i--) { |
3549 | count += sprintf(buf + count, "%s%d%c", | 3550 | struct tty_driver *driver; |
3550 | cs[i]->name, cs[i]->index, i ? ' ':'\n'); | 3551 | const char *name = cs[i]->name; |
3552 | int index = cs[i]->index; | ||
3553 | |||
3554 | driver = cs[i]->device(cs[i], &index); | ||
3555 | if (driver) { | ||
3556 | count += tty_line_name(driver, index, buf + count); | ||
3557 | count += sprintf(buf + count, "%c", i ? ' ':'\n'); | ||
3558 | } else | ||
3559 | count += sprintf(buf + count, "%s%d%c", | ||
3560 | name, index, i ? ' ':'\n'); | ||
3561 | } | ||
3551 | console_unlock(); | 3562 | console_unlock(); |
3552 | 3563 | ||
3553 | return count; | 3564 | return count; |
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 61b1137d7e56..23b5d32954bf 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c | |||
@@ -1164,6 +1164,8 @@ static void csi_J(struct vc_data *vc, int vpar) | |||
1164 | scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char, | 1164 | scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char, |
1165 | vc->vc_screenbuf_size >> 1); | 1165 | vc->vc_screenbuf_size >> 1); |
1166 | set_origin(vc); | 1166 | set_origin(vc); |
1167 | if (CON_IS_VISIBLE(vc)) | ||
1168 | update_screen(vc); | ||
1167 | /* fall through */ | 1169 | /* fall through */ |
1168 | case 2: /* erase whole display */ | 1170 | case 2: /* erase whole display */ |
1169 | count = vc->vc_cols * vc->vc_rows; | 1171 | count = vc->vc_cols * vc->vc_rows; |
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 5d01558cef66..ab90a0156828 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c | |||
@@ -63,8 +63,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids, | |||
63 | dynid->id.idProduct = idProduct; | 63 | dynid->id.idProduct = idProduct; |
64 | dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE; | 64 | dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE; |
65 | if (fields > 2 && bInterfaceClass) { | 65 | if (fields > 2 && bInterfaceClass) { |
66 | if (bInterfaceClass > 255) | 66 | if (bInterfaceClass > 255) { |
67 | return -EINVAL; | 67 | retval = -EINVAL; |
68 | goto fail; | ||
69 | } | ||
68 | 70 | ||
69 | dynid->id.bInterfaceClass = (u8)bInterfaceClass; | 71 | dynid->id.bInterfaceClass = (u8)bInterfaceClass; |
70 | dynid->id.match_flags |= USB_DEVICE_ID_MATCH_INT_CLASS; | 72 | dynid->id.match_flags |= USB_DEVICE_ID_MATCH_INT_CLASS; |
@@ -73,17 +75,21 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids, | |||
73 | if (fields > 4) { | 75 | if (fields > 4) { |
74 | const struct usb_device_id *id = id_table; | 76 | const struct usb_device_id *id = id_table; |
75 | 77 | ||
76 | if (!id) | 78 | if (!id) { |
77 | return -ENODEV; | 79 | retval = -ENODEV; |
80 | goto fail; | ||
81 | } | ||
78 | 82 | ||
79 | for (; id->match_flags; id++) | 83 | for (; id->match_flags; id++) |
80 | if (id->idVendor == refVendor && id->idProduct == refProduct) | 84 | if (id->idVendor == refVendor && id->idProduct == refProduct) |
81 | break; | 85 | break; |
82 | 86 | ||
83 | if (id->match_flags) | 87 | if (id->match_flags) { |
84 | dynid->id.driver_info = id->driver_info; | 88 | dynid->id.driver_info = id->driver_info; |
85 | else | 89 | } else { |
86 | return -ENODEV; | 90 | retval = -ENODEV; |
91 | goto fail; | ||
92 | } | ||
87 | } | 93 | } |
88 | 94 | ||
89 | spin_lock(&dynids->lock); | 95 | spin_lock(&dynids->lock); |
@@ -95,6 +101,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids, | |||
95 | if (retval) | 101 | if (retval) |
96 | return retval; | 102 | return retval; |
97 | return count; | 103 | return count; |
104 | |||
105 | fail: | ||
106 | kfree(dynid); | ||
107 | return retval; | ||
98 | } | 108 | } |
99 | EXPORT_SYMBOL_GPL(usb_store_new_id); | 109 | EXPORT_SYMBOL_GPL(usb_store_new_id); |
100 | 110 | ||
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 199aaea6bfe0..2518c3250750 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
@@ -1032,7 +1032,6 @@ static int register_root_hub(struct usb_hcd *hcd) | |||
1032 | dev_name(&usb_dev->dev), retval); | 1032 | dev_name(&usb_dev->dev), retval); |
1033 | return retval; | 1033 | return retval; |
1034 | } | 1034 | } |
1035 | usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev); | ||
1036 | } | 1035 | } |
1037 | 1036 | ||
1038 | retval = usb_new_device (usb_dev); | 1037 | retval = usb_new_device (usb_dev); |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index babba885978d..64ea21971be2 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -128,7 +128,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev) | |||
128 | return usb_get_intfdata(hdev->actconfig->interface[0]); | 128 | return usb_get_intfdata(hdev->actconfig->interface[0]); |
129 | } | 129 | } |
130 | 130 | ||
131 | int usb_device_supports_lpm(struct usb_device *udev) | 131 | static int usb_device_supports_lpm(struct usb_device *udev) |
132 | { | 132 | { |
133 | /* USB 2.1 (and greater) devices indicate LPM support through | 133 | /* USB 2.1 (and greater) devices indicate LPM support through |
134 | * their USB 2.0 Extended Capabilities BOS descriptor. | 134 | * their USB 2.0 Extended Capabilities BOS descriptor. |
@@ -149,11 +149,6 @@ int usb_device_supports_lpm(struct usb_device *udev) | |||
149 | "Power management will be impacted.\n"); | 149 | "Power management will be impacted.\n"); |
150 | return 0; | 150 | return 0; |
151 | } | 151 | } |
152 | |||
153 | /* udev is root hub */ | ||
154 | if (!udev->parent) | ||
155 | return 1; | ||
156 | |||
157 | if (udev->parent->lpm_capable) | 152 | if (udev->parent->lpm_capable) |
158 | return 1; | 153 | return 1; |
159 | 154 | ||
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index c49383669cd8..823857767a16 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h | |||
@@ -35,7 +35,6 @@ extern int usb_get_device_descriptor(struct usb_device *dev, | |||
35 | unsigned int size); | 35 | unsigned int size); |
36 | extern int usb_get_bos_descriptor(struct usb_device *dev); | 36 | extern int usb_get_bos_descriptor(struct usb_device *dev); |
37 | extern void usb_release_bos_descriptor(struct usb_device *dev); | 37 | extern void usb_release_bos_descriptor(struct usb_device *dev); |
38 | extern int usb_device_supports_lpm(struct usb_device *udev); | ||
39 | extern char *usb_cache_string(struct usb_device *udev, int index); | 38 | extern char *usb_cache_string(struct usb_device *udev, int index); |
40 | extern int usb_set_configuration(struct usb_device *dev, int configuration); | 39 | extern int usb_set_configuration(struct usb_device *dev, int configuration); |
41 | extern int usb_choose_configuration(struct usb_device *udev); | 40 | extern int usb_choose_configuration(struct usb_device *udev); |
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c index 8565d87f94b4..1d129884cc39 100644 --- a/drivers/usb/dwc2/core.c +++ b/drivers/usb/dwc2/core.c | |||
@@ -216,7 +216,7 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) | |||
216 | int retval = 0; | 216 | int retval = 0; |
217 | 217 | ||
218 | if (!select_phy) | 218 | if (!select_phy) |
219 | return -ENODEV; | 219 | return 0; |
220 | 220 | ||
221 | usbcfg = readl(hsotg->regs + GUSBCFG); | 221 | usbcfg = readl(hsotg->regs + GUSBCFG); |
222 | 222 | ||
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index f59484d43b35..4d918ed8d343 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c | |||
@@ -2565,25 +2565,14 @@ static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd, | |||
2565 | struct usb_host_endpoint *ep) | 2565 | struct usb_host_endpoint *ep) |
2566 | { | 2566 | { |
2567 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); | 2567 | struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); |
2568 | int is_control = usb_endpoint_xfer_control(&ep->desc); | ||
2569 | int is_out = usb_endpoint_dir_out(&ep->desc); | ||
2570 | int epnum = usb_endpoint_num(&ep->desc); | ||
2571 | struct usb_device *udev; | ||
2572 | unsigned long flags; | 2568 | unsigned long flags; |
2573 | 2569 | ||
2574 | dev_dbg(hsotg->dev, | 2570 | dev_dbg(hsotg->dev, |
2575 | "DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n", | 2571 | "DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n", |
2576 | ep->desc.bEndpointAddress); | 2572 | ep->desc.bEndpointAddress); |
2577 | 2573 | ||
2578 | udev = to_usb_device(hsotg->dev); | ||
2579 | |||
2580 | spin_lock_irqsave(&hsotg->lock, flags); | 2574 | spin_lock_irqsave(&hsotg->lock, flags); |
2581 | |||
2582 | usb_settoggle(udev, epnum, is_out, 0); | ||
2583 | if (is_control) | ||
2584 | usb_settoggle(udev, epnum, !is_out, 0); | ||
2585 | dwc2_hcd_endpoint_reset(hsotg, ep); | 2575 | dwc2_hcd_endpoint_reset(hsotg, ep); |
2586 | |||
2587 | spin_unlock_irqrestore(&hsotg->lock, flags); | 2576 | spin_unlock_irqrestore(&hsotg->lock, flags); |
2588 | } | 2577 | } |
2589 | 2578 | ||
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index d01d0d3f2cf0..eaba547ce26b 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c | |||
@@ -124,6 +124,9 @@ static int dwc2_driver_probe(struct platform_device *dev) | |||
124 | int retval; | 124 | int retval; |
125 | int irq; | 125 | int irq; |
126 | 126 | ||
127 | if (usb_disabled()) | ||
128 | return -ENODEV; | ||
129 | |||
127 | match = of_match_device(dwc2_of_match_table, &dev->dev); | 130 | match = of_match_device(dwc2_of_match_table, &dev->dev); |
128 | if (match && match->data) { | 131 | if (match && match->data) { |
129 | params = match->data; | 132 | params = match->data; |
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index b016d38199f2..eb009a457fb5 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c | |||
@@ -203,12 +203,12 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num) | |||
203 | addr, (unsigned int)temp); | 203 | addr, (unsigned int)temp); |
204 | 204 | ||
205 | addr = &ir_set->erst_base; | 205 | addr = &ir_set->erst_base; |
206 | temp_64 = readq(addr); | 206 | temp_64 = xhci_read_64(xhci, addr); |
207 | xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n", | 207 | xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n", |
208 | addr, temp_64); | 208 | addr, temp_64); |
209 | 209 | ||
210 | addr = &ir_set->erst_dequeue; | 210 | addr = &ir_set->erst_dequeue; |
211 | temp_64 = readq(addr); | 211 | temp_64 = xhci_read_64(xhci, addr); |
212 | xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n", | 212 | xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n", |
213 | addr, temp_64); | 213 | addr, temp_64); |
214 | } | 214 | } |
@@ -412,7 +412,7 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) | |||
412 | { | 412 | { |
413 | u64 val; | 413 | u64 val; |
414 | 414 | ||
415 | val = readq(&xhci->op_regs->cmd_ring); | 415 | val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
416 | xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n", | 416 | xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n", |
417 | lower_32_bits(val)); | 417 | lower_32_bits(val)); |
418 | xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n", | 418 | xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n", |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 873c272b3ef5..bce4391a0e7d 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -1958,7 +1958,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) | |||
1958 | xhci_warn(xhci, "WARN something wrong with SW event ring " | 1958 | xhci_warn(xhci, "WARN something wrong with SW event ring " |
1959 | "dequeue ptr.\n"); | 1959 | "dequeue ptr.\n"); |
1960 | /* Update HC event ring dequeue pointer */ | 1960 | /* Update HC event ring dequeue pointer */ |
1961 | temp = readq(&xhci->ir_set->erst_dequeue); | 1961 | temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
1962 | temp &= ERST_PTR_MASK; | 1962 | temp &= ERST_PTR_MASK; |
1963 | /* Don't clear the EHB bit (which is RW1C) because | 1963 | /* Don't clear the EHB bit (which is RW1C) because |
1964 | * there might be more events to service. | 1964 | * there might be more events to service. |
@@ -1967,7 +1967,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) | |||
1967 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 1967 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
1968 | "// Write event ring dequeue pointer, " | 1968 | "// Write event ring dequeue pointer, " |
1969 | "preserving EHB bit"); | 1969 | "preserving EHB bit"); |
1970 | writeq(((u64) deq & (u64) ~ERST_PTR_MASK) | temp, | 1970 | xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, |
1971 | &xhci->ir_set->erst_dequeue); | 1971 | &xhci->ir_set->erst_dequeue); |
1972 | } | 1972 | } |
1973 | 1973 | ||
@@ -2269,7 +2269,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
2269 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 2269 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
2270 | "// Device context base array address = 0x%llx (DMA), %p (virt)", | 2270 | "// Device context base array address = 0x%llx (DMA), %p (virt)", |
2271 | (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); | 2271 | (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); |
2272 | writeq(dma, &xhci->op_regs->dcbaa_ptr); | 2272 | xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); |
2273 | 2273 | ||
2274 | /* | 2274 | /* |
2275 | * Initialize the ring segment pool. The ring must be a contiguous | 2275 | * Initialize the ring segment pool. The ring must be a contiguous |
@@ -2312,13 +2312,13 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
2312 | (unsigned long long)xhci->cmd_ring->first_seg->dma); | 2312 | (unsigned long long)xhci->cmd_ring->first_seg->dma); |
2313 | 2313 | ||
2314 | /* Set the address in the Command Ring Control register */ | 2314 | /* Set the address in the Command Ring Control register */ |
2315 | val_64 = readq(&xhci->op_regs->cmd_ring); | 2315 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
2316 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | | 2316 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | |
2317 | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | | 2317 | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | |
2318 | xhci->cmd_ring->cycle_state; | 2318 | xhci->cmd_ring->cycle_state; |
2319 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 2319 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
2320 | "// Setting command ring address to 0x%x", val); | 2320 | "// Setting command ring address to 0x%x", val); |
2321 | writeq(val_64, &xhci->op_regs->cmd_ring); | 2321 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); |
2322 | xhci_dbg_cmd_ptrs(xhci); | 2322 | xhci_dbg_cmd_ptrs(xhci); |
2323 | 2323 | ||
2324 | xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags); | 2324 | xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags); |
@@ -2396,10 +2396,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
2396 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 2396 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
2397 | "// Set ERST base address for ir_set 0 = 0x%llx", | 2397 | "// Set ERST base address for ir_set 0 = 0x%llx", |
2398 | (unsigned long long)xhci->erst.erst_dma_addr); | 2398 | (unsigned long long)xhci->erst.erst_dma_addr); |
2399 | val_64 = readq(&xhci->ir_set->erst_base); | 2399 | val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); |
2400 | val_64 &= ERST_PTR_MASK; | 2400 | val_64 &= ERST_PTR_MASK; |
2401 | val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); | 2401 | val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); |
2402 | writeq(val_64, &xhci->ir_set->erst_base); | 2402 | xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); |
2403 | 2403 | ||
2404 | /* Set the event ring dequeue address */ | 2404 | /* Set the event ring dequeue address */ |
2405 | xhci_set_hc_event_deq(xhci); | 2405 | xhci_set_hc_event_deq(xhci); |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 3c898c12a06b..04f986d9234f 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -142,6 +142,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
142 | "QUIRK: Resetting on resume"); | 142 | "QUIRK: Resetting on resume"); |
143 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; | 143 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; |
144 | } | 144 | } |
145 | if (pdev->vendor == PCI_VENDOR_ID_RENESAS && | ||
146 | pdev->device == 0x0015 && | ||
147 | pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG && | ||
148 | pdev->subsystem_device == 0xc0cd) | ||
149 | xhci->quirks |= XHCI_RESET_ON_RESUME; | ||
145 | if (pdev->vendor == PCI_VENDOR_ID_VIA) | 150 | if (pdev->vendor == PCI_VENDOR_ID_VIA) |
146 | xhci->quirks |= XHCI_RESET_ON_RESUME; | 151 | xhci->quirks |= XHCI_RESET_ON_RESUME; |
147 | } | 152 | } |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index a0b248c34526..0ed64eb68e48 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -307,13 +307,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) | |||
307 | return 0; | 307 | return 0; |
308 | } | 308 | } |
309 | 309 | ||
310 | temp_64 = readq(&xhci->op_regs->cmd_ring); | 310 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
311 | if (!(temp_64 & CMD_RING_RUNNING)) { | 311 | if (!(temp_64 & CMD_RING_RUNNING)) { |
312 | xhci_dbg(xhci, "Command ring had been stopped\n"); | 312 | xhci_dbg(xhci, "Command ring had been stopped\n"); |
313 | return 0; | 313 | return 0; |
314 | } | 314 | } |
315 | xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; | 315 | xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; |
316 | writeq(temp_64 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); | 316 | xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, |
317 | &xhci->op_regs->cmd_ring); | ||
317 | 318 | ||
318 | /* Section 4.6.1.2 of xHCI 1.0 spec says software should | 319 | /* Section 4.6.1.2 of xHCI 1.0 spec says software should |
319 | * time the completion od all xHCI commands, including | 320 | * time the completion od all xHCI commands, including |
@@ -2864,8 +2865,9 @@ hw_died: | |||
2864 | /* Clear the event handler busy flag (RW1C); | 2865 | /* Clear the event handler busy flag (RW1C); |
2865 | * the event ring should be empty. | 2866 | * the event ring should be empty. |
2866 | */ | 2867 | */ |
2867 | temp_64 = readq(&xhci->ir_set->erst_dequeue); | 2868 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
2868 | writeq(temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); | 2869 | xhci_write_64(xhci, temp_64 | ERST_EHB, |
2870 | &xhci->ir_set->erst_dequeue); | ||
2869 | spin_unlock(&xhci->lock); | 2871 | spin_unlock(&xhci->lock); |
2870 | 2872 | ||
2871 | return IRQ_HANDLED; | 2873 | return IRQ_HANDLED; |
@@ -2877,7 +2879,7 @@ hw_died: | |||
2877 | */ | 2879 | */ |
2878 | while (xhci_handle_event(xhci) > 0) {} | 2880 | while (xhci_handle_event(xhci) > 0) {} |
2879 | 2881 | ||
2880 | temp_64 = readq(&xhci->ir_set->erst_dequeue); | 2882 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
2881 | /* If necessary, update the HW's version of the event ring deq ptr. */ | 2883 | /* If necessary, update the HW's version of the event ring deq ptr. */ |
2882 | if (event_ring_deq != xhci->event_ring->dequeue) { | 2884 | if (event_ring_deq != xhci->event_ring->dequeue) { |
2883 | deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, | 2885 | deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, |
@@ -2892,7 +2894,7 @@ hw_died: | |||
2892 | 2894 | ||
2893 | /* Clear the event handler busy flag (RW1C); event ring is empty. */ | 2895 | /* Clear the event handler busy flag (RW1C); event ring is empty. */ |
2894 | temp_64 |= ERST_EHB; | 2896 | temp_64 |= ERST_EHB; |
2895 | writeq(temp_64, &xhci->ir_set->erst_dequeue); | 2897 | xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); |
2896 | 2898 | ||
2897 | spin_unlock(&xhci->lock); | 2899 | spin_unlock(&xhci->lock); |
2898 | 2900 | ||
@@ -2965,58 +2967,8 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, | |||
2965 | } | 2967 | } |
2966 | 2968 | ||
2967 | while (1) { | 2969 | while (1) { |
2968 | if (room_on_ring(xhci, ep_ring, num_trbs)) { | 2970 | if (room_on_ring(xhci, ep_ring, num_trbs)) |
2969 | union xhci_trb *trb = ep_ring->enqueue; | 2971 | break; |
2970 | unsigned int usable = ep_ring->enq_seg->trbs + | ||
2971 | TRBS_PER_SEGMENT - 1 - trb; | ||
2972 | u32 nop_cmd; | ||
2973 | |||
2974 | /* | ||
2975 | * Section 4.11.7.1 TD Fragments states that a link | ||
2976 | * TRB must only occur at the boundary between | ||
2977 | * data bursts (eg 512 bytes for 480M). | ||
2978 | * While it is possible to split a large fragment | ||
2979 | * we don't know the size yet. | ||
2980 | * Simplest solution is to fill the trb before the | ||
2981 | * LINK with nop commands. | ||
2982 | */ | ||
2983 | if (num_trbs == 1 || num_trbs <= usable || usable == 0) | ||
2984 | break; | ||
2985 | |||
2986 | if (ep_ring->type != TYPE_BULK) | ||
2987 | /* | ||
2988 | * While isoc transfers might have a buffer that | ||
2989 | * crosses a 64k boundary it is unlikely. | ||
2990 | * Since we can't add NOPs without generating | ||
2991 | * gaps in the traffic just hope it never | ||
2992 | * happens at the end of the ring. | ||
2993 | * This could be fixed by writing a LINK TRB | ||
2994 | * instead of the first NOP - however the | ||
2995 | * TRB_TYPE_LINK_LE32() calls would all need | ||
2996 | * changing to check the ring length. | ||
2997 | */ | ||
2998 | break; | ||
2999 | |||
3000 | if (num_trbs >= TRBS_PER_SEGMENT) { | ||
3001 | xhci_err(xhci, "Too many fragments %d, max %d\n", | ||
3002 | num_trbs, TRBS_PER_SEGMENT - 1); | ||
3003 | return -EINVAL; | ||
3004 | } | ||
3005 | |||
3006 | nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) | | ||
3007 | ep_ring->cycle_state); | ||
3008 | ep_ring->num_trbs_free -= usable; | ||
3009 | do { | ||
3010 | trb->generic.field[0] = 0; | ||
3011 | trb->generic.field[1] = 0; | ||
3012 | trb->generic.field[2] = 0; | ||
3013 | trb->generic.field[3] = nop_cmd; | ||
3014 | trb++; | ||
3015 | } while (--usable); | ||
3016 | ep_ring->enqueue = trb; | ||
3017 | if (room_on_ring(xhci, ep_ring, num_trbs)) | ||
3018 | break; | ||
3019 | } | ||
3020 | 2972 | ||
3021 | if (ep_ring == xhci->cmd_ring) { | 2973 | if (ep_ring == xhci->cmd_ring) { |
3022 | xhci_err(xhci, "Do not support expand command ring\n"); | 2974 | xhci_err(xhci, "Do not support expand command ring\n"); |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index ad364394885a..6fe577d46fa2 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -611,7 +611,7 @@ int xhci_run(struct usb_hcd *hcd) | |||
611 | xhci_dbg(xhci, "Event ring:\n"); | 611 | xhci_dbg(xhci, "Event ring:\n"); |
612 | xhci_debug_ring(xhci, xhci->event_ring); | 612 | xhci_debug_ring(xhci, xhci->event_ring); |
613 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); | 613 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); |
614 | temp_64 = readq(&xhci->ir_set->erst_dequeue); | 614 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
615 | temp_64 &= ~ERST_PTR_MASK; | 615 | temp_64 &= ~ERST_PTR_MASK; |
616 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 616 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
617 | "ERST deq = 64'h%0lx", (long unsigned int) temp_64); | 617 | "ERST deq = 64'h%0lx", (long unsigned int) temp_64); |
@@ -756,11 +756,11 @@ static void xhci_save_registers(struct xhci_hcd *xhci) | |||
756 | { | 756 | { |
757 | xhci->s3.command = readl(&xhci->op_regs->command); | 757 | xhci->s3.command = readl(&xhci->op_regs->command); |
758 | xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); | 758 | xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); |
759 | xhci->s3.dcbaa_ptr = readq(&xhci->op_regs->dcbaa_ptr); | 759 | xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
760 | xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); | 760 | xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); |
761 | xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); | 761 | xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); |
762 | xhci->s3.erst_base = readq(&xhci->ir_set->erst_base); | 762 | xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); |
763 | xhci->s3.erst_dequeue = readq(&xhci->ir_set->erst_dequeue); | 763 | xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
764 | xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); | 764 | xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); |
765 | xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); | 765 | xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); |
766 | } | 766 | } |
@@ -769,11 +769,11 @@ static void xhci_restore_registers(struct xhci_hcd *xhci) | |||
769 | { | 769 | { |
770 | writel(xhci->s3.command, &xhci->op_regs->command); | 770 | writel(xhci->s3.command, &xhci->op_regs->command); |
771 | writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); | 771 | writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); |
772 | writeq(xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); | 772 | xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); |
773 | writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); | 773 | writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); |
774 | writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); | 774 | writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); |
775 | writeq(xhci->s3.erst_base, &xhci->ir_set->erst_base); | 775 | xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); |
776 | writeq(xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); | 776 | xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); |
777 | writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); | 777 | writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); |
778 | writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); | 778 | writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); |
779 | } | 779 | } |
@@ -783,7 +783,7 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) | |||
783 | u64 val_64; | 783 | u64 val_64; |
784 | 784 | ||
785 | /* step 2: initialize command ring buffer */ | 785 | /* step 2: initialize command ring buffer */ |
786 | val_64 = readq(&xhci->op_regs->cmd_ring); | 786 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
787 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | | 787 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | |
788 | (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, | 788 | (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, |
789 | xhci->cmd_ring->dequeue) & | 789 | xhci->cmd_ring->dequeue) & |
@@ -792,7 +792,7 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) | |||
792 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 792 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
793 | "// Setting command ring address to 0x%llx", | 793 | "// Setting command ring address to 0x%llx", |
794 | (long unsigned long) val_64); | 794 | (long unsigned long) val_64); |
795 | writeq(val_64, &xhci->op_regs->cmd_ring); | 795 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); |
796 | } | 796 | } |
797 | 797 | ||
798 | /* | 798 | /* |
@@ -3842,7 +3842,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, | |||
3842 | if (ret) { | 3842 | if (ret) { |
3843 | return ret; | 3843 | return ret; |
3844 | } | 3844 | } |
3845 | temp_64 = readq(&xhci->op_regs->dcbaa_ptr); | 3845 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
3846 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, | 3846 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
3847 | "Op regs DCBAA ptr = %#016llx", temp_64); | 3847 | "Op regs DCBAA ptr = %#016llx", temp_64); |
3848 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, | 3848 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
@@ -4730,11 +4730,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) | |||
4730 | struct device *dev = hcd->self.controller; | 4730 | struct device *dev = hcd->self.controller; |
4731 | int retval; | 4731 | int retval; |
4732 | 4732 | ||
4733 | /* Limit the block layer scatter-gather lists to half a segment. */ | 4733 | /* Accept arbitrarily long scatter-gather lists */ |
4734 | hcd->self.sg_tablesize = TRBS_PER_SEGMENT / 2; | 4734 | hcd->self.sg_tablesize = ~0; |
4735 | |||
4736 | /* support to build packet from discontinuous buffers */ | ||
4737 | hcd->self.no_sg_constraint = 1; | ||
4738 | 4735 | ||
4739 | /* XHCI controllers don't stop the ep queue on short packets :| */ | 4736 | /* XHCI controllers don't stop the ep queue on short packets :| */ |
4740 | hcd->self.no_stop_on_short = 1; | 4737 | hcd->self.no_stop_on_short = 1; |
@@ -4760,6 +4757,14 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) | |||
4760 | /* xHCI private pointer was set in xhci_pci_probe for the second | 4757 | /* xHCI private pointer was set in xhci_pci_probe for the second |
4761 | * registered roothub. | 4758 | * registered roothub. |
4762 | */ | 4759 | */ |
4760 | xhci = hcd_to_xhci(hcd); | ||
4761 | /* | ||
4762 | * Support arbitrarily aligned sg-list entries on hosts without | ||
4763 | * TD fragment rules (which are currently unsupported). | ||
4764 | */ | ||
4765 | if (xhci->hci_version < 0x100) | ||
4766 | hcd->self.no_sg_constraint = 1; | ||
4767 | |||
4763 | return 0; | 4768 | return 0; |
4764 | } | 4769 | } |
4765 | 4770 | ||
@@ -4788,6 +4793,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) | |||
4788 | if (xhci->hci_version > 0x96) | 4793 | if (xhci->hci_version > 0x96) |
4789 | xhci->quirks |= XHCI_SPURIOUS_SUCCESS; | 4794 | xhci->quirks |= XHCI_SPURIOUS_SUCCESS; |
4790 | 4795 | ||
4796 | if (xhci->hci_version < 0x100) | ||
4797 | hcd->self.no_sg_constraint = 1; | ||
4798 | |||
4791 | /* Make sure the HC is halted. */ | 4799 | /* Make sure the HC is halted. */ |
4792 | retval = xhci_halt(xhci); | 4800 | retval = xhci_halt(xhci); |
4793 | if (retval) | 4801 | if (retval) |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index f8416639bf31..58ed9d088e63 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -28,17 +28,6 @@ | |||
28 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
29 | #include <linux/usb/hcd.h> | 29 | #include <linux/usb/hcd.h> |
30 | 30 | ||
31 | /* | ||
32 | * Registers should always be accessed with double word or quad word accesses. | ||
33 | * | ||
34 | * Some xHCI implementations may support 64-bit address pointers. Registers | ||
35 | * with 64-bit address pointers should be written to with dword accesses by | ||
36 | * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second. | ||
37 | * xHCI implementations that do not support 64-bit address pointers will ignore | ||
38 | * the high dword, and write order is irrelevant. | ||
39 | */ | ||
40 | #include <asm-generic/io-64-nonatomic-lo-hi.h> | ||
41 | |||
42 | /* Code sharing between pci-quirks and xhci hcd */ | 31 | /* Code sharing between pci-quirks and xhci hcd */ |
43 | #include "xhci-ext-caps.h" | 32 | #include "xhci-ext-caps.h" |
44 | #include "pci-quirks.h" | 33 | #include "pci-quirks.h" |
@@ -1279,7 +1268,7 @@ union xhci_trb { | |||
1279 | * since the command ring is 64-byte aligned. | 1268 | * since the command ring is 64-byte aligned. |
1280 | * It must also be greater than 16. | 1269 | * It must also be greater than 16. |
1281 | */ | 1270 | */ |
1282 | #define TRBS_PER_SEGMENT 256 | 1271 | #define TRBS_PER_SEGMENT 64 |
1283 | /* Allow two commands + a link TRB, along with any reserved command TRBs */ | 1272 | /* Allow two commands + a link TRB, along with any reserved command TRBs */ |
1284 | #define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3) | 1273 | #define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3) |
1285 | #define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16) | 1274 | #define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16) |
@@ -1614,6 +1603,34 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci) | |||
1614 | #define xhci_warn_ratelimited(xhci, fmt, args...) \ | 1603 | #define xhci_warn_ratelimited(xhci, fmt, args...) \ |
1615 | dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args) | 1604 | dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args) |
1616 | 1605 | ||
1606 | /* | ||
1607 | * Registers should always be accessed with double word or quad word accesses. | ||
1608 | * | ||
1609 | * Some xHCI implementations may support 64-bit address pointers. Registers | ||
1610 | * with 64-bit address pointers should be written to with dword accesses by | ||
1611 | * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second. | ||
1612 | * xHCI implementations that do not support 64-bit address pointers will ignore | ||
1613 | * the high dword, and write order is irrelevant. | ||
1614 | */ | ||
1615 | static inline u64 xhci_read_64(const struct xhci_hcd *xhci, | ||
1616 | __le64 __iomem *regs) | ||
1617 | { | ||
1618 | __u32 __iomem *ptr = (__u32 __iomem *) regs; | ||
1619 | u64 val_lo = readl(ptr); | ||
1620 | u64 val_hi = readl(ptr + 1); | ||
1621 | return val_lo + (val_hi << 32); | ||
1622 | } | ||
1623 | static inline void xhci_write_64(struct xhci_hcd *xhci, | ||
1624 | const u64 val, __le64 __iomem *regs) | ||
1625 | { | ||
1626 | __u32 __iomem *ptr = (__u32 __iomem *) regs; | ||
1627 | u32 val_lo = lower_32_bits(val); | ||
1628 | u32 val_hi = upper_32_bits(val); | ||
1629 | |||
1630 | writel(val_lo, ptr); | ||
1631 | writel(val_hi, ptr + 1); | ||
1632 | } | ||
1633 | |||
1617 | static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci) | 1634 | static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci) |
1618 | { | 1635 | { |
1619 | return xhci->quirks & XHCI_LINK_TRB_QUIRK; | 1636 | return xhci->quirks & XHCI_LINK_TRB_QUIRK; |
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c index e6f61e4361df..8afa813d690b 100644 --- a/drivers/usb/phy/phy.c +++ b/drivers/usb/phy/phy.c | |||
@@ -130,7 +130,7 @@ struct usb_phy *usb_get_phy(enum usb_phy_type type) | |||
130 | 130 | ||
131 | phy = __usb_find_phy(&phy_list, type); | 131 | phy = __usb_find_phy(&phy_list, type); |
132 | if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { | 132 | if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { |
133 | pr_err("unable to find transceiver of type %s\n", | 133 | pr_debug("PHY: unable to find transceiver of type %s\n", |
134 | usb_phy_type_string(type)); | 134 | usb_phy_type_string(type)); |
135 | goto err0; | 135 | goto err0; |
136 | } | 136 | } |
@@ -228,7 +228,7 @@ struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index) | |||
228 | 228 | ||
229 | phy = __usb_find_phy_dev(dev, &phy_bind_list, index); | 229 | phy = __usb_find_phy_dev(dev, &phy_bind_list, index); |
230 | if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { | 230 | if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { |
231 | pr_err("unable to find transceiver\n"); | 231 | dev_dbg(dev, "unable to find transceiver\n"); |
232 | goto err0; | 232 | goto err0; |
233 | } | 233 | } |
234 | 234 | ||
@@ -424,10 +424,8 @@ int usb_bind_phy(const char *dev_name, u8 index, | |||
424 | unsigned long flags; | 424 | unsigned long flags; |
425 | 425 | ||
426 | phy_bind = kzalloc(sizeof(*phy_bind), GFP_KERNEL); | 426 | phy_bind = kzalloc(sizeof(*phy_bind), GFP_KERNEL); |
427 | if (!phy_bind) { | 427 | if (!phy_bind) |
428 | pr_err("phy_bind(): No memory for phy_bind"); | ||
429 | return -ENOMEM; | 428 | return -ENOMEM; |
430 | } | ||
431 | 429 | ||
432 | phy_bind->dev_name = dev_name; | 430 | phy_bind->dev_name = dev_name; |
433 | phy_bind->phy_dev_name = phy_dev_name; | 431 | phy_bind->phy_dev_name = phy_dev_name; |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index ce0d7b0db012..ee1f00f03c43 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -152,6 +152,7 @@ static const struct usb_device_id id_table_combined[] = { | |||
152 | { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, | 152 | { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, |
153 | { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, | 153 | { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, |
154 | { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) }, | 154 | { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) }, |
155 | { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) }, | ||
155 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, | 156 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, |
156 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, | 157 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, |
157 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, | 158 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, |
@@ -191,6 +192,8 @@ static const struct usb_device_id id_table_combined[] = { | |||
191 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, | 192 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, |
192 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, | 193 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, |
193 | { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, | 194 | { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, |
195 | { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_LP101_PID) }, | ||
196 | { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_P200X_PID) }, | ||
194 | { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) }, | 197 | { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) }, |
195 | { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, | 198 | { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, |
196 | { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, | 199 | { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index a7019d1e3058..1e2d369df86e 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -50,6 +50,7 @@ | |||
50 | #define TI_XDS100V2_PID 0xa6d0 | 50 | #define TI_XDS100V2_PID 0xa6d0 |
51 | 51 | ||
52 | #define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */ | 52 | #define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */ |
53 | #define FTDI_EV3CON_PID 0xABB9 /* Mindstorms EV3 Console Adapter */ | ||
53 | 54 | ||
54 | /* US Interface Navigator (http://www.usinterface.com/) */ | 55 | /* US Interface Navigator (http://www.usinterface.com/) */ |
55 | #define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */ | 56 | #define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */ |
@@ -363,6 +364,12 @@ | |||
363 | /* Sprog II (Andrew Crosland's SprogII DCC interface) */ | 364 | /* Sprog II (Andrew Crosland's SprogII DCC interface) */ |
364 | #define FTDI_SPROG_II 0xF0C8 | 365 | #define FTDI_SPROG_II 0xF0C8 |
365 | 366 | ||
367 | /* | ||
368 | * Two of the Tagsys RFID Readers | ||
369 | */ | ||
370 | #define FTDI_TAGSYS_LP101_PID 0xF0E9 /* Tagsys L-P101 RFID*/ | ||
371 | #define FTDI_TAGSYS_P200X_PID 0xF0EE /* Tagsys Medio P200x RFID*/ | ||
372 | |||
366 | /* an infrared receiver for user access control with IR tags */ | 373 | /* an infrared receiver for user access control with IR tags */ |
367 | #define FTDI_PIEGROUP_PID 0xF208 /* Product Id */ | 374 | #define FTDI_PIEGROUP_PID 0xF208 /* Product Id */ |
368 | 375 | ||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 5c86f57e4afa..216d20affba8 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -1362,7 +1362,8 @@ static const struct usb_device_id option_ids[] = { | |||
1362 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) }, | 1362 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) }, |
1363 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) }, | 1363 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) }, |
1364 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) }, | 1364 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) }, |
1365 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) }, | 1365 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff), |
1366 | .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, | ||
1366 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) }, | 1367 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) }, |
1367 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, | 1368 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, |
1368 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, | 1369 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, |
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index c65437cfd4a2..968a40201e5f 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
@@ -139,6 +139,9 @@ static const struct usb_device_id id_table[] = { | |||
139 | {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */ | 139 | {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */ |
140 | {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */ | 140 | {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */ |
141 | {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */ | 141 | {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */ |
142 | {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */ | ||
143 | {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */ | ||
144 | {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */ | ||
142 | 145 | ||
143 | { } /* Terminating entry */ | 146 | { } /* Terminating entry */ |
144 | }; | 147 | }; |
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index f112b079ddfc..fb79775447b0 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c | |||
@@ -71,7 +71,8 @@ DEVICE(hp4x, HP4X_IDS); | |||
71 | 71 | ||
72 | /* Suunto ANT+ USB Driver */ | 72 | /* Suunto ANT+ USB Driver */ |
73 | #define SUUNTO_IDS() \ | 73 | #define SUUNTO_IDS() \ |
74 | { USB_DEVICE(0x0fcf, 0x1008) } | 74 | { USB_DEVICE(0x0fcf, 0x1008) }, \ |
75 | { USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */ | ||
75 | DEVICE(suunto, SUUNTO_IDS); | 76 | DEVICE(suunto, SUUNTO_IDS); |
76 | 77 | ||
77 | /* Siemens USB/MPI adapter */ | 78 | /* Siemens USB/MPI adapter */ |
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig index 8470e1b114f2..1dd0604d1911 100644 --- a/drivers/usb/storage/Kconfig +++ b/drivers/usb/storage/Kconfig | |||
@@ -18,7 +18,9 @@ config USB_STORAGE | |||
18 | 18 | ||
19 | This option depends on 'SCSI' support being enabled, but you | 19 | This option depends on 'SCSI' support being enabled, but you |
20 | probably also need 'SCSI device support: SCSI disk support' | 20 | probably also need 'SCSI device support: SCSI disk support' |
21 | (BLK_DEV_SD) for most USB storage devices. | 21 | (BLK_DEV_SD) for most USB storage devices. Some devices also |
22 | will require 'Probe all LUNs on each SCSI device' | ||
23 | (SCSI_MULTI_LUN). | ||
22 | 24 | ||
23 | To compile this driver as a module, choose M here: the | 25 | To compile this driver as a module, choose M here: the |
24 | module will be called usb-storage. | 26 | module will be called usb-storage. |
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 18509e6c21ab..9d38ddc8da49 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c | |||
@@ -78,6 +78,8 @@ static const char* host_info(struct Scsi_Host *host) | |||
78 | 78 | ||
79 | static int slave_alloc (struct scsi_device *sdev) | 79 | static int slave_alloc (struct scsi_device *sdev) |
80 | { | 80 | { |
81 | struct us_data *us = host_to_us(sdev->host); | ||
82 | |||
81 | /* | 83 | /* |
82 | * Set the INQUIRY transfer length to 36. We don't use any of | 84 | * Set the INQUIRY transfer length to 36. We don't use any of |
83 | * the extra data and many devices choke if asked for more or | 85 | * the extra data and many devices choke if asked for more or |
@@ -102,6 +104,10 @@ static int slave_alloc (struct scsi_device *sdev) | |||
102 | */ | 104 | */ |
103 | blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); | 105 | blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); |
104 | 106 | ||
107 | /* Tell the SCSI layer if we know there is more than one LUN */ | ||
108 | if (us->protocol == USB_PR_BULK && us->max_lun > 0) | ||
109 | sdev->sdev_bflags |= BLIST_FORCELUN; | ||
110 | |||
105 | return 0; | 111 | return 0; |
106 | } | 112 | } |
107 | 113 | ||
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h index 65a6a75066a8..82e8ed0324e3 100644 --- a/drivers/usb/storage/unusual_cypress.h +++ b/drivers/usb/storage/unusual_cypress.h | |||
@@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999, | |||
31 | "Cypress ISD-300LP", | 31 | "Cypress ISD-300LP", |
32 | USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), | 32 | USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), |
33 | 33 | ||
34 | UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219, | 34 | UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160, |
35 | "Super Top", | 35 | "Super Top", |
36 | "USB 2.0 SATA BRIDGE", | 36 | "USB 2.0 SATA BRIDGE", |
37 | USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), | 37 | USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index ad06255c2ade..adbeb255616a 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -1455,6 +1455,13 @@ UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100, | |||
1455 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 1455 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
1456 | US_FL_FIX_CAPACITY ), | 1456 | US_FL_FIX_CAPACITY ), |
1457 | 1457 | ||
1458 | /* Reported by Moritz Moeller-Herrmann <moritz-kernel@moeller-herrmann.de> */ | ||
1459 | UNUSUAL_DEV( 0x0fca, 0x8004, 0x0201, 0x0201, | ||
1460 | "Research In Motion", | ||
1461 | "BlackBerry Bold 9000", | ||
1462 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
1463 | US_FL_MAX_SECTORS_64 ), | ||
1464 | |||
1458 | /* Reported by Michael Stattmann <michael@stattmann.com> */ | 1465 | /* Reported by Michael Stattmann <michael@stattmann.com> */ |
1459 | UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, | 1466 | UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, |
1460 | "Sony Ericsson", | 1467 | "Sony Ericsson", |
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 22262a3a0e2d..dade5b7699bc 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig | |||
@@ -364,7 +364,7 @@ config FB_SA1100 | |||
364 | 364 | ||
365 | config FB_IMX | 365 | config FB_IMX |
366 | tristate "Freescale i.MX1/21/25/27 LCD support" | 366 | tristate "Freescale i.MX1/21/25/27 LCD support" |
367 | depends on FB && IMX_HAVE_PLATFORM_IMX_FB | 367 | depends on FB && ARCH_MXC |
368 | select FB_CFB_FILLRECT | 368 | select FB_CFB_FILLRECT |
369 | select FB_CFB_COPYAREA | 369 | select FB_CFB_COPYAREA |
370 | select FB_CFB_IMAGEBLIT | 370 | select FB_CFB_IMAGEBLIT |
diff --git a/drivers/video/exynos/Kconfig b/drivers/video/exynos/Kconfig index 1129d0e9e640..75c8a8e7efc0 100644 --- a/drivers/video/exynos/Kconfig +++ b/drivers/video/exynos/Kconfig | |||
@@ -22,7 +22,8 @@ config EXYNOS_MIPI_DSI | |||
22 | 22 | ||
23 | config EXYNOS_LCD_S6E8AX0 | 23 | config EXYNOS_LCD_S6E8AX0 |
24 | bool "S6E8AX0 MIPI AMOLED LCD Driver" | 24 | bool "S6E8AX0 MIPI AMOLED LCD Driver" |
25 | depends on (EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE && LCD_CLASS_DEVICE) | 25 | depends on EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE |
26 | depends on (LCD_CLASS_DEVICE = y) | ||
26 | default n | 27 | default n |
27 | help | 28 | help |
28 | If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its | 29 | If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its |
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index bbeb8dd7f108..77d6221618f4 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c | |||
@@ -2160,8 +2160,8 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk, | |||
2160 | *five_taps = false; | 2160 | *five_taps = false; |
2161 | 2161 | ||
2162 | do { | 2162 | do { |
2163 | in_height = DIV_ROUND_UP(height, *decim_y); | 2163 | in_height = height / *decim_y; |
2164 | in_width = DIV_ROUND_UP(width, *decim_x); | 2164 | in_width = width / *decim_x; |
2165 | *core_clk = dispc.feat->calc_core_clk(pclk, in_width, | 2165 | *core_clk = dispc.feat->calc_core_clk(pclk, in_width, |
2166 | in_height, out_width, out_height, mem_to_mem); | 2166 | in_height, out_width, out_height, mem_to_mem); |
2167 | error = (in_width > maxsinglelinewidth || !*core_clk || | 2167 | error = (in_width > maxsinglelinewidth || !*core_clk || |
@@ -2199,8 +2199,8 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk, | |||
2199 | dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); | 2199 | dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); |
2200 | 2200 | ||
2201 | do { | 2201 | do { |
2202 | in_height = DIV_ROUND_UP(height, *decim_y); | 2202 | in_height = height / *decim_y; |
2203 | in_width = DIV_ROUND_UP(width, *decim_x); | 2203 | in_width = width / *decim_x; |
2204 | *five_taps = in_height > out_height; | 2204 | *five_taps = in_height > out_height; |
2205 | 2205 | ||
2206 | if (in_width > maxsinglelinewidth) | 2206 | if (in_width > maxsinglelinewidth) |
@@ -2268,7 +2268,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk, | |||
2268 | { | 2268 | { |
2269 | u16 in_width, in_width_max; | 2269 | u16 in_width, in_width_max; |
2270 | int decim_x_min = *decim_x; | 2270 | int decim_x_min = *decim_x; |
2271 | u16 in_height = DIV_ROUND_UP(height, *decim_y); | 2271 | u16 in_height = height / *decim_y; |
2272 | const int maxsinglelinewidth = | 2272 | const int maxsinglelinewidth = |
2273 | dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); | 2273 | dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); |
2274 | const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE); | 2274 | const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE); |
@@ -2287,7 +2287,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk, | |||
2287 | return -EINVAL; | 2287 | return -EINVAL; |
2288 | 2288 | ||
2289 | do { | 2289 | do { |
2290 | in_width = DIV_ROUND_UP(width, *decim_x); | 2290 | in_width = width / *decim_x; |
2291 | } while (*decim_x <= *x_predecim && | 2291 | } while (*decim_x <= *x_predecim && |
2292 | in_width > maxsinglelinewidth && ++*decim_x); | 2292 | in_width > maxsinglelinewidth && ++*decim_x); |
2293 | 2293 | ||
@@ -2466,8 +2466,8 @@ static int dispc_ovl_setup_common(enum omap_plane plane, | |||
2466 | if (r) | 2466 | if (r) |
2467 | return r; | 2467 | return r; |
2468 | 2468 | ||
2469 | in_width = DIV_ROUND_UP(in_width, x_predecim); | 2469 | in_width = in_width / x_predecim; |
2470 | in_height = DIV_ROUND_UP(in_height, y_predecim); | 2470 | in_height = in_height / y_predecim; |
2471 | 2471 | ||
2472 | if (color_mode == OMAP_DSS_COLOR_YUV2 || | 2472 | if (color_mode == OMAP_DSS_COLOR_YUV2 || |
2473 | color_mode == OMAP_DSS_COLOR_UYVY || | 2473 | color_mode == OMAP_DSS_COLOR_UYVY || |
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c index 7411f2674e16..23ef21ffc2c4 100644 --- a/drivers/video/omap2/dss/dpi.c +++ b/drivers/video/omap2/dss/dpi.c | |||
@@ -117,7 +117,7 @@ struct dpi_clk_calc_ctx { | |||
117 | /* outputs */ | 117 | /* outputs */ |
118 | 118 | ||
119 | struct dsi_clock_info dsi_cinfo; | 119 | struct dsi_clock_info dsi_cinfo; |
120 | unsigned long long fck; | 120 | unsigned long fck; |
121 | struct dispc_clock_info dispc_cinfo; | 121 | struct dispc_clock_info dispc_cinfo; |
122 | }; | 122 | }; |
123 | 123 | ||
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c index efb9ee9e3c96..ba806c9e7f54 100644 --- a/drivers/video/omap2/dss/sdi.c +++ b/drivers/video/omap2/dss/sdi.c | |||
@@ -46,7 +46,7 @@ static struct { | |||
46 | struct sdi_clk_calc_ctx { | 46 | struct sdi_clk_calc_ctx { |
47 | unsigned long pck_min, pck_max; | 47 | unsigned long pck_min, pck_max; |
48 | 48 | ||
49 | unsigned long long fck; | 49 | unsigned long fck; |
50 | struct dispc_clock_info dispc_cinfo; | 50 | struct dispc_clock_info dispc_cinfo; |
51 | }; | 51 | }; |
52 | 52 | ||
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c index a06edbfa95ca..1b5d48c578e1 100644 --- a/drivers/vme/bridges/vme_ca91cx42.c +++ b/drivers/vme/bridges/vme_ca91cx42.c | |||
@@ -884,7 +884,7 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image, | |||
884 | if (done == count) | 884 | if (done == count) |
885 | goto out; | 885 | goto out; |
886 | } | 886 | } |
887 | if ((uintptr_t)addr & 0x2) { | 887 | if ((uintptr_t)(addr + done) & 0x2) { |
888 | if ((count - done) < 2) { | 888 | if ((count - done) < 2) { |
889 | *(u8 *)(buf + done) = ioread8(addr + done); | 889 | *(u8 *)(buf + done) = ioread8(addr + done); |
890 | done += 1; | 890 | done += 1; |
@@ -938,7 +938,7 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image, | |||
938 | if (done == count) | 938 | if (done == count) |
939 | goto out; | 939 | goto out; |
940 | } | 940 | } |
941 | if ((uintptr_t)addr & 0x2) { | 941 | if ((uintptr_t)(addr + done) & 0x2) { |
942 | if ((count - done) < 2) { | 942 | if ((count - done) < 2) { |
943 | iowrite8(*(u8 *)(buf + done), addr + done); | 943 | iowrite8(*(u8 *)(buf + done), addr + done); |
944 | done += 1; | 944 | done += 1; |
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c index 16830d8b777c..9911cd5fddb5 100644 --- a/drivers/vme/bridges/vme_tsi148.c +++ b/drivers/vme/bridges/vme_tsi148.c | |||
@@ -1289,7 +1289,7 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf, | |||
1289 | if (done == count) | 1289 | if (done == count) |
1290 | goto out; | 1290 | goto out; |
1291 | } | 1291 | } |
1292 | if ((uintptr_t)addr & 0x2) { | 1292 | if ((uintptr_t)(addr + done) & 0x2) { |
1293 | if ((count - done) < 2) { | 1293 | if ((count - done) < 2) { |
1294 | *(u8 *)(buf + done) = ioread8(addr + done); | 1294 | *(u8 *)(buf + done) = ioread8(addr + done); |
1295 | done += 1; | 1295 | done += 1; |
@@ -1371,7 +1371,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf, | |||
1371 | if (done == count) | 1371 | if (done == count) |
1372 | goto out; | 1372 | goto out; |
1373 | } | 1373 | } |
1374 | if ((uintptr_t)addr & 0x2) { | 1374 | if ((uintptr_t)(addr + done) & 0x2) { |
1375 | if ((count - done) < 2) { | 1375 | if ((count - done) < 2) { |
1376 | iowrite8(*(u8 *)(buf + done), addr + done); | 1376 | iowrite8(*(u8 *)(buf + done), addr + done); |
1377 | done += 1; | 1377 | done += 1; |
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index d75c811bfa56..45e00afa7f2d 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
@@ -16,7 +16,6 @@ xen-pad-$(CONFIG_X86) += xen-acpi-pad.o | |||
16 | dom0-$(CONFIG_X86) += pcpu.o | 16 | dom0-$(CONFIG_X86) += pcpu.o |
17 | obj-$(CONFIG_XEN_DOM0) += $(dom0-y) | 17 | obj-$(CONFIG_XEN_DOM0) += $(dom0-y) |
18 | obj-$(CONFIG_BLOCK) += biomerge.o | 18 | obj-$(CONFIG_BLOCK) += biomerge.o |
19 | obj-$(CONFIG_XEN_XENCOMM) += xencomm.o | ||
20 | obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o | 19 | obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o |
21 | obj-$(CONFIG_XEN_SELFBALLOONING) += xen-selfballoon.o | 20 | obj-$(CONFIG_XEN_SELFBALLOONING) += xen-selfballoon.o |
22 | obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o | 21 | obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o |
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 4672e003c0ad..f4a9e3311297 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
@@ -862,6 +862,8 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
862 | irq = ret; | 862 | irq = ret; |
863 | goto out; | 863 | goto out; |
864 | } | 864 | } |
865 | /* New interdomain events are bound to VCPU 0. */ | ||
866 | bind_evtchn_to_cpu(evtchn, 0); | ||
865 | } else { | 867 | } else { |
866 | struct irq_info *info = info_for_irq(irq); | 868 | struct irq_info *info = info_for_irq(irq); |
867 | WARN_ON(info == NULL || info->type != IRQT_EVTCHN); | 869 | WARN_ON(info == NULL || info->type != IRQT_EVTCHN); |
diff --git a/drivers/xen/xencomm.c b/drivers/xen/xencomm.c deleted file mode 100644 index 4793fc594549..000000000000 --- a/drivers/xen/xencomm.c +++ /dev/null | |||
@@ -1,219 +0,0 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
15 | * | ||
16 | * Copyright (C) IBM Corp. 2006 | ||
17 | * | ||
18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
19 | */ | ||
20 | |||
21 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
22 | |||
23 | #include <linux/mm.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <asm/page.h> | ||
26 | #include <xen/xencomm.h> | ||
27 | #include <xen/interface/xen.h> | ||
28 | #include <asm/xen/xencomm.h> /* for xencomm_is_phys_contiguous() */ | ||
29 | |||
30 | static int xencomm_init(struct xencomm_desc *desc, | ||
31 | void *buffer, unsigned long bytes) | ||
32 | { | ||
33 | unsigned long recorded = 0; | ||
34 | int i = 0; | ||
35 | |||
36 | while ((recorded < bytes) && (i < desc->nr_addrs)) { | ||
37 | unsigned long vaddr = (unsigned long)buffer + recorded; | ||
38 | unsigned long paddr; | ||
39 | int offset; | ||
40 | int chunksz; | ||
41 | |||
42 | offset = vaddr % PAGE_SIZE; /* handle partial pages */ | ||
43 | chunksz = min(PAGE_SIZE - offset, bytes - recorded); | ||
44 | |||
45 | paddr = xencomm_vtop(vaddr); | ||
46 | if (paddr == ~0UL) { | ||
47 | printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n", | ||
48 | __func__, vaddr); | ||
49 | return -EINVAL; | ||
50 | } | ||
51 | |||
52 | desc->address[i++] = paddr; | ||
53 | recorded += chunksz; | ||
54 | } | ||
55 | |||
56 | if (recorded < bytes) { | ||
57 | printk(KERN_DEBUG | ||
58 | "%s: could only translate %ld of %ld bytes\n", | ||
59 | __func__, recorded, bytes); | ||
60 | return -ENOSPC; | ||
61 | } | ||
62 | |||
63 | /* mark remaining addresses invalid (just for safety) */ | ||
64 | while (i < desc->nr_addrs) | ||
65 | desc->address[i++] = XENCOMM_INVALID; | ||
66 | |||
67 | desc->magic = XENCOMM_MAGIC; | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask, | ||
73 | void *buffer, unsigned long bytes) | ||
74 | { | ||
75 | struct xencomm_desc *desc; | ||
76 | unsigned long buffer_ulong = (unsigned long)buffer; | ||
77 | unsigned long start = buffer_ulong & PAGE_MASK; | ||
78 | unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK; | ||
79 | unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT; | ||
80 | unsigned long size = sizeof(*desc) + | ||
81 | sizeof(desc->address[0]) * nr_addrs; | ||
82 | |||
83 | /* | ||
84 | * slab allocator returns at least sizeof(void*) aligned pointer. | ||
85 | * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might | ||
86 | * cross page boundary. | ||
87 | */ | ||
88 | if (sizeof(*desc) > sizeof(void *)) { | ||
89 | unsigned long order = get_order(size); | ||
90 | desc = (struct xencomm_desc *)__get_free_pages(gfp_mask, | ||
91 | order); | ||
92 | if (desc == NULL) | ||
93 | return NULL; | ||
94 | |||
95 | desc->nr_addrs = | ||
96 | ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) / | ||
97 | sizeof(*desc->address); | ||
98 | } else { | ||
99 | desc = kmalloc(size, gfp_mask); | ||
100 | if (desc == NULL) | ||
101 | return NULL; | ||
102 | |||
103 | desc->nr_addrs = nr_addrs; | ||
104 | } | ||
105 | return desc; | ||
106 | } | ||
107 | |||
108 | void xencomm_free(struct xencomm_handle *desc) | ||
109 | { | ||
110 | if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) { | ||
111 | struct xencomm_desc *desc__ = (struct xencomm_desc *)desc; | ||
112 | if (sizeof(*desc__) > sizeof(void *)) { | ||
113 | unsigned long size = sizeof(*desc__) + | ||
114 | sizeof(desc__->address[0]) * desc__->nr_addrs; | ||
115 | unsigned long order = get_order(size); | ||
116 | free_pages((unsigned long)__va(desc), order); | ||
117 | } else | ||
118 | kfree(__va(desc)); | ||
119 | } | ||
120 | } | ||
121 | |||
122 | static int xencomm_create(void *buffer, unsigned long bytes, | ||
123 | struct xencomm_desc **ret, gfp_t gfp_mask) | ||
124 | { | ||
125 | struct xencomm_desc *desc; | ||
126 | int rc; | ||
127 | |||
128 | pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes); | ||
129 | |||
130 | if (bytes == 0) { | ||
131 | /* don't create a descriptor; Xen recognizes NULL. */ | ||
132 | BUG_ON(buffer != NULL); | ||
133 | *ret = NULL; | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | BUG_ON(buffer == NULL); /* 'bytes' is non-zero */ | ||
138 | |||
139 | desc = xencomm_alloc(gfp_mask, buffer, bytes); | ||
140 | if (!desc) { | ||
141 | printk(KERN_DEBUG "%s failure\n", "xencomm_alloc"); | ||
142 | return -ENOMEM; | ||
143 | } | ||
144 | |||
145 | rc = xencomm_init(desc, buffer, bytes); | ||
146 | if (rc) { | ||
147 | printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc); | ||
148 | xencomm_free((struct xencomm_handle *)__pa(desc)); | ||
149 | return rc; | ||
150 | } | ||
151 | |||
152 | *ret = desc; | ||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | static struct xencomm_handle *xencomm_create_inline(void *ptr) | ||
157 | { | ||
158 | unsigned long paddr; | ||
159 | |||
160 | BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr)); | ||
161 | |||
162 | paddr = (unsigned long)xencomm_pa(ptr); | ||
163 | BUG_ON(paddr & XENCOMM_INLINE_FLAG); | ||
164 | return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG); | ||
165 | } | ||
166 | |||
167 | /* "mini" routine, for stack-based communications: */ | ||
168 | static int xencomm_create_mini(void *buffer, | ||
169 | unsigned long bytes, struct xencomm_mini *xc_desc, | ||
170 | struct xencomm_desc **ret) | ||
171 | { | ||
172 | int rc = 0; | ||
173 | struct xencomm_desc *desc; | ||
174 | BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0); | ||
175 | |||
176 | desc = (void *)xc_desc; | ||
177 | |||
178 | desc->nr_addrs = XENCOMM_MINI_ADDRS; | ||
179 | |||
180 | rc = xencomm_init(desc, buffer, bytes); | ||
181 | if (!rc) | ||
182 | *ret = desc; | ||
183 | |||
184 | return rc; | ||
185 | } | ||
186 | |||
187 | struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes) | ||
188 | { | ||
189 | int rc; | ||
190 | struct xencomm_desc *desc; | ||
191 | |||
192 | if (xencomm_is_phys_contiguous((unsigned long)ptr)) | ||
193 | return xencomm_create_inline(ptr); | ||
194 | |||
195 | rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL); | ||
196 | |||
197 | if (rc || desc == NULL) | ||
198 | return NULL; | ||
199 | |||
200 | return xencomm_pa(desc); | ||
201 | } | ||
202 | |||
203 | struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes, | ||
204 | struct xencomm_mini *xc_desc) | ||
205 | { | ||
206 | int rc; | ||
207 | struct xencomm_desc *desc = NULL; | ||
208 | |||
209 | if (xencomm_is_phys_contiguous((unsigned long)ptr)) | ||
210 | return xencomm_create_inline(ptr); | ||
211 | |||
212 | rc = xencomm_create_mini(ptr, bytes, xc_desc, | ||
213 | &desc); | ||
214 | |||
215 | if (rc) | ||
216 | return NULL; | ||
217 | |||
218 | return xencomm_pa(desc); | ||
219 | } | ||