diff options
author | David S. Miller <davem@davemloft.net> | 2014-12-10 15:48:20 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-12-10 15:48:20 -0500 |
commit | 22f10923dd39141785273e423b9acf347297e15f (patch) | |
tree | cc1f19103817433a426b3e329d6326f5e9d8e8f7 | |
parent | 785c20a08bead1e58ad53f2dc324782da7a0c9ea (diff) | |
parent | 69204cf7eb9c5a72067ce6922d4699378251d053 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
drivers/net/ethernet/renesas/sh_eth.c
Overlapping changes in both conflict cases.
Signed-off-by: David S. Miller <davem@davemloft.net>
77 files changed, 540 insertions, 330 deletions
diff --git a/Documentation/Changes b/Documentation/Changes index 1de131bb49fb..74bdda9272a4 100644 --- a/Documentation/Changes +++ b/Documentation/Changes | |||
@@ -383,7 +383,7 @@ o <http://www.iptables.org/downloads.html> | |||
383 | 383 | ||
384 | Ip-route2 | 384 | Ip-route2 |
385 | --------- | 385 | --------- |
386 | o <ftp://ftp.tux.org/pub/net/ip-routing/iproute2-2.2.4-now-ss991023.tar.gz> | 386 | o <https://www.kernel.org/pub/linux/utils/net/iproute2/> |
387 | 387 | ||
388 | OProfile | 388 | OProfile |
389 | -------- | 389 | -------- |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 18 | 2 | PATCHLEVEL = 18 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc6 | 4 | EXTRAVERSION = -rc7 |
5 | NAME = Diseased Newt | 5 | NAME = Diseased Newt |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 9b55143d19db..9fd6834a2172 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c | |||
@@ -426,7 +426,7 @@ static inline void emit_mod(unsigned int dst, unsigned int src, | |||
426 | u32 *p = &ctx->target[ctx->idx]; | 426 | u32 *p = &ctx->target[ctx->idx]; |
427 | uasm_i_divu(&p, dst, src); | 427 | uasm_i_divu(&p, dst, src); |
428 | p = &ctx->target[ctx->idx + 1]; | 428 | p = &ctx->target[ctx->idx + 1]; |
429 | uasm_i_mflo(&p, dst); | 429 | uasm_i_mfhi(&p, dst); |
430 | } | 430 | } |
431 | ctx->idx += 2; /* 2 insts */ | 431 | ctx->idx += 2; /* 2 insts */ |
432 | } | 432 | } |
@@ -971,7 +971,7 @@ load_ind: | |||
971 | break; | 971 | break; |
972 | case BPF_ALU | BPF_MOD | BPF_K: | 972 | case BPF_ALU | BPF_MOD | BPF_K: |
973 | /* A %= k */ | 973 | /* A %= k */ |
974 | if (k == 1 || optimize_div(&k)) { | 974 | if (k == 1) { |
975 | ctx->flags |= SEEN_A; | 975 | ctx->flags |= SEEN_A; |
976 | emit_jit_reg_move(r_A, r_zero, ctx); | 976 | emit_jit_reg_move(r_A, r_zero, ctx); |
977 | } else { | 977 | } else { |
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index dd1c24ceda50..3f51cf4e8f02 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c | |||
@@ -54,12 +54,8 @@ void s390_handle_mcck(void) | |||
54 | */ | 54 | */ |
55 | local_irq_save(flags); | 55 | local_irq_save(flags); |
56 | local_mcck_disable(); | 56 | local_mcck_disable(); |
57 | /* | 57 | mcck = *this_cpu_ptr(&cpu_mcck); |
58 | * Ummm... Does this make sense at all? Copying the percpu struct | 58 | memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck)); |
59 | * and then zapping it one statement later? | ||
60 | */ | ||
61 | memcpy(&mcck, this_cpu_ptr(&cpu_mcck), sizeof(mcck)); | ||
62 | memset(&mcck, 0, sizeof(struct mcck_struct)); | ||
63 | clear_cpu_flag(CIF_MCCK_PENDING); | 59 | clear_cpu_flag(CIF_MCCK_PENDING); |
64 | local_mcck_enable(); | 60 | local_mcck_enable(); |
65 | local_irq_restore(flags); | 61 | local_irq_restore(flags); |
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 626e01377a01..987514396c1e 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c | |||
@@ -178,7 +178,7 @@ static void jit_fill_hole(void *area, unsigned int size) | |||
178 | } | 178 | } |
179 | 179 | ||
180 | struct jit_context { | 180 | struct jit_context { |
181 | unsigned int cleanup_addr; /* epilogue code offset */ | 181 | int cleanup_addr; /* epilogue code offset */ |
182 | bool seen_ld_abs; | 182 | bool seen_ld_abs; |
183 | }; | 183 | }; |
184 | 184 | ||
@@ -192,6 +192,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, | |||
192 | struct bpf_insn *insn = bpf_prog->insnsi; | 192 | struct bpf_insn *insn = bpf_prog->insnsi; |
193 | int insn_cnt = bpf_prog->len; | 193 | int insn_cnt = bpf_prog->len; |
194 | bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0); | 194 | bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0); |
195 | bool seen_exit = false; | ||
195 | u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; | 196 | u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; |
196 | int i; | 197 | int i; |
197 | int proglen = 0; | 198 | int proglen = 0; |
@@ -854,10 +855,11 @@ common_load: | |||
854 | goto common_load; | 855 | goto common_load; |
855 | 856 | ||
856 | case BPF_JMP | BPF_EXIT: | 857 | case BPF_JMP | BPF_EXIT: |
857 | if (i != insn_cnt - 1) { | 858 | if (seen_exit) { |
858 | jmp_offset = ctx->cleanup_addr - addrs[i]; | 859 | jmp_offset = ctx->cleanup_addr - addrs[i]; |
859 | goto emit_jmp; | 860 | goto emit_jmp; |
860 | } | 861 | } |
862 | seen_exit = true; | ||
861 | /* update cleanup_addr */ | 863 | /* update cleanup_addr */ |
862 | ctx->cleanup_addr = proglen; | 864 | ctx->cleanup_addr = proglen; |
863 | /* mov rbx, qword ptr [rbp-X] */ | 865 | /* mov rbx, qword ptr [rbp-X] */ |
diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 0984232e429f..5cbd5d9ea61d 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c | |||
@@ -216,9 +216,10 @@ static int bio_integrity_process(struct bio *bio, | |||
216 | { | 216 | { |
217 | struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); | 217 | struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); |
218 | struct blk_integrity_iter iter; | 218 | struct blk_integrity_iter iter; |
219 | struct bio_vec *bv; | 219 | struct bvec_iter bviter; |
220 | struct bio_vec bv; | ||
220 | struct bio_integrity_payload *bip = bio_integrity(bio); | 221 | struct bio_integrity_payload *bip = bio_integrity(bio); |
221 | unsigned int i, ret = 0; | 222 | unsigned int ret = 0; |
222 | void *prot_buf = page_address(bip->bip_vec->bv_page) + | 223 | void *prot_buf = page_address(bip->bip_vec->bv_page) + |
223 | bip->bip_vec->bv_offset; | 224 | bip->bip_vec->bv_offset; |
224 | 225 | ||
@@ -227,11 +228,11 @@ static int bio_integrity_process(struct bio *bio, | |||
227 | iter.seed = bip_get_seed(bip); | 228 | iter.seed = bip_get_seed(bip); |
228 | iter.prot_buf = prot_buf; | 229 | iter.prot_buf = prot_buf; |
229 | 230 | ||
230 | bio_for_each_segment_all(bv, bio, i) { | 231 | bio_for_each_segment(bv, bio, bviter) { |
231 | void *kaddr = kmap_atomic(bv->bv_page); | 232 | void *kaddr = kmap_atomic(bv.bv_page); |
232 | 233 | ||
233 | iter.data_buf = kaddr + bv->bv_offset; | 234 | iter.data_buf = kaddr + bv.bv_offset; |
234 | iter.data_size = bv->bv_len; | 235 | iter.data_size = bv.bv_len; |
235 | 236 | ||
236 | ret = proc_fn(&iter); | 237 | ret = proc_fn(&iter); |
237 | if (ret) { | 238 | if (ret) { |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f0a1a56406eb..8bcdb981d540 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -9408,6 +9408,10 @@ static bool page_flip_finished(struct intel_crtc *crtc) | |||
9408 | struct drm_device *dev = crtc->base.dev; | 9408 | struct drm_device *dev = crtc->base.dev; |
9409 | struct drm_i915_private *dev_priv = dev->dev_private; | 9409 | struct drm_i915_private *dev_priv = dev->dev_private; |
9410 | 9410 | ||
9411 | if (i915_reset_in_progress(&dev_priv->gpu_error) || | ||
9412 | crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) | ||
9413 | return true; | ||
9414 | |||
9411 | /* | 9415 | /* |
9412 | * The relevant registers doen't exist on pre-ctg. | 9416 | * The relevant registers doen't exist on pre-ctg. |
9413 | * As the flip done interrupt doesn't trigger for mmio | 9417 | * As the flip done interrupt doesn't trigger for mmio |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 5ad45bfff3fe..4bcd91757321 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -4450,6 +4450,7 @@ static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) | |||
4450 | * vdd might still be enabled do to the delayed vdd off. | 4450 | * vdd might still be enabled do to the delayed vdd off. |
4451 | * Make sure vdd is actually turned off here. | 4451 | * Make sure vdd is actually turned off here. |
4452 | */ | 4452 | */ |
4453 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); | ||
4453 | pps_lock(intel_dp); | 4454 | pps_lock(intel_dp); |
4454 | edp_panel_vdd_off_sync(intel_dp); | 4455 | edp_panel_vdd_off_sync(intel_dp); |
4455 | pps_unlock(intel_dp); | 4456 | pps_unlock(intel_dp); |
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c index cd05677ad4b7..72a40f95d048 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c | |||
@@ -218,7 +218,6 @@ nvc0_identify(struct nouveau_device *device) | |||
218 | device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; | 218 | device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; |
219 | device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; | 219 | device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; |
220 | device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; | 220 | device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; |
221 | device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; | ||
222 | device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass; | 221 | device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass; |
223 | device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; | 222 | device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; |
224 | break; | 223 | break; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c index 5ae6a43893b5..1931057f9962 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c | |||
@@ -551,8 +551,8 @@ nv04_fifo_intr(struct nouveau_subdev *subdev) | |||
551 | } | 551 | } |
552 | 552 | ||
553 | if (status & 0x40000000) { | 553 | if (status & 0x40000000) { |
554 | nouveau_fifo_uevent(&priv->base); | ||
555 | nv_wr32(priv, 0x002100, 0x40000000); | 554 | nv_wr32(priv, 0x002100, 0x40000000); |
555 | nouveau_fifo_uevent(&priv->base); | ||
556 | status &= ~0x40000000; | 556 | status &= ~0x40000000; |
557 | } | 557 | } |
558 | } | 558 | } |
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c index 1fe1f8fbda0c..074d434c3077 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c | |||
@@ -740,6 +740,8 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn) | |||
740 | u32 inte = nv_rd32(priv, 0x002628); | 740 | u32 inte = nv_rd32(priv, 0x002628); |
741 | u32 unkn; | 741 | u32 unkn; |
742 | 742 | ||
743 | nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr); | ||
744 | |||
743 | for (unkn = 0; unkn < 8; unkn++) { | 745 | for (unkn = 0; unkn < 8; unkn++) { |
744 | u32 ints = (intr >> (unkn * 0x04)) & inte; | 746 | u32 ints = (intr >> (unkn * 0x04)) & inte; |
745 | if (ints & 0x1) { | 747 | if (ints & 0x1) { |
@@ -751,8 +753,6 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn) | |||
751 | nv_mask(priv, 0x002628, ints, 0); | 753 | nv_mask(priv, 0x002628, ints, 0); |
752 | } | 754 | } |
753 | } | 755 | } |
754 | |||
755 | nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr); | ||
756 | } | 756 | } |
757 | 757 | ||
758 | static void | 758 | static void |
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c index d2f0fd39c145..f8734eb74eaa 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c | |||
@@ -952,8 +952,8 @@ nve0_fifo_intr(struct nouveau_subdev *subdev) | |||
952 | } | 952 | } |
953 | 953 | ||
954 | if (stat & 0x80000000) { | 954 | if (stat & 0x80000000) { |
955 | nve0_fifo_intr_engine(priv); | ||
956 | nv_wr32(priv, 0x002100, 0x80000000); | 955 | nv_wr32(priv, 0x002100, 0x80000000); |
956 | nve0_fifo_intr_engine(priv); | ||
957 | stat &= ~0x80000000; | 957 | stat &= ~0x80000000; |
958 | } | 958 | } |
959 | 959 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 57238076049f..62b97c4eef8d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -629,7 +629,6 @@ int nouveau_pmops_suspend(struct device *dev) | |||
629 | 629 | ||
630 | pci_save_state(pdev); | 630 | pci_save_state(pdev); |
631 | pci_disable_device(pdev); | 631 | pci_disable_device(pdev); |
632 | pci_ignore_hotplug(pdev); | ||
633 | pci_set_power_state(pdev, PCI_D3hot); | 632 | pci_set_power_state(pdev, PCI_D3hot); |
634 | return 0; | 633 | return 0; |
635 | } | 634 | } |
@@ -933,6 +932,7 @@ static int nouveau_pmops_runtime_suspend(struct device *dev) | |||
933 | ret = nouveau_do_suspend(drm_dev, true); | 932 | ret = nouveau_do_suspend(drm_dev, true); |
934 | pci_save_state(pdev); | 933 | pci_save_state(pdev); |
935 | pci_disable_device(pdev); | 934 | pci_disable_device(pdev); |
935 | pci_ignore_hotplug(pdev); | ||
936 | pci_set_power_state(pdev, PCI_D3cold); | 936 | pci_set_power_state(pdev, PCI_D3cold); |
937 | drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; | 937 | drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; |
938 | return ret; | 938 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 515cd9aebb99..f32a434724e3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -52,20 +52,24 @@ nouveau_fctx(struct nouveau_fence *fence) | |||
52 | return container_of(fence->base.lock, struct nouveau_fence_chan, lock); | 52 | return container_of(fence->base.lock, struct nouveau_fence_chan, lock); |
53 | } | 53 | } |
54 | 54 | ||
55 | static void | 55 | static int |
56 | nouveau_fence_signal(struct nouveau_fence *fence) | 56 | nouveau_fence_signal(struct nouveau_fence *fence) |
57 | { | 57 | { |
58 | int drop = 0; | ||
59 | |||
58 | fence_signal_locked(&fence->base); | 60 | fence_signal_locked(&fence->base); |
59 | list_del(&fence->head); | 61 | list_del(&fence->head); |
62 | rcu_assign_pointer(fence->channel, NULL); | ||
60 | 63 | ||
61 | if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) { | 64 | if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) { |
62 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); | 65 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); |
63 | 66 | ||
64 | if (!--fctx->notify_ref) | 67 | if (!--fctx->notify_ref) |
65 | nvif_notify_put(&fctx->notify); | 68 | drop = 1; |
66 | } | 69 | } |
67 | 70 | ||
68 | fence_put(&fence->base); | 71 | fence_put(&fence->base); |
72 | return drop; | ||
69 | } | 73 | } |
70 | 74 | ||
71 | static struct nouveau_fence * | 75 | static struct nouveau_fence * |
@@ -88,16 +92,23 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx) | |||
88 | { | 92 | { |
89 | struct nouveau_fence *fence; | 93 | struct nouveau_fence *fence; |
90 | 94 | ||
91 | nvif_notify_fini(&fctx->notify); | ||
92 | |||
93 | spin_lock_irq(&fctx->lock); | 95 | spin_lock_irq(&fctx->lock); |
94 | while (!list_empty(&fctx->pending)) { | 96 | while (!list_empty(&fctx->pending)) { |
95 | fence = list_entry(fctx->pending.next, typeof(*fence), head); | 97 | fence = list_entry(fctx->pending.next, typeof(*fence), head); |
96 | 98 | ||
97 | nouveau_fence_signal(fence); | 99 | if (nouveau_fence_signal(fence)) |
98 | fence->channel = NULL; | 100 | nvif_notify_put(&fctx->notify); |
99 | } | 101 | } |
100 | spin_unlock_irq(&fctx->lock); | 102 | spin_unlock_irq(&fctx->lock); |
103 | |||
104 | nvif_notify_fini(&fctx->notify); | ||
105 | fctx->dead = 1; | ||
106 | |||
107 | /* | ||
108 | * Ensure that all accesses to fence->channel complete before freeing | ||
109 | * the channel. | ||
110 | */ | ||
111 | synchronize_rcu(); | ||
101 | } | 112 | } |
102 | 113 | ||
103 | static void | 114 | static void |
@@ -112,21 +123,23 @@ nouveau_fence_context_free(struct nouveau_fence_chan *fctx) | |||
112 | kref_put(&fctx->fence_ref, nouveau_fence_context_put); | 123 | kref_put(&fctx->fence_ref, nouveau_fence_context_put); |
113 | } | 124 | } |
114 | 125 | ||
115 | static void | 126 | static int |
116 | nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx) | 127 | nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx) |
117 | { | 128 | { |
118 | struct nouveau_fence *fence; | 129 | struct nouveau_fence *fence; |
119 | 130 | int drop = 0; | |
120 | u32 seq = fctx->read(chan); | 131 | u32 seq = fctx->read(chan); |
121 | 132 | ||
122 | while (!list_empty(&fctx->pending)) { | 133 | while (!list_empty(&fctx->pending)) { |
123 | fence = list_entry(fctx->pending.next, typeof(*fence), head); | 134 | fence = list_entry(fctx->pending.next, typeof(*fence), head); |
124 | 135 | ||
125 | if ((int)(seq - fence->base.seqno) < 0) | 136 | if ((int)(seq - fence->base.seqno) < 0) |
126 | return; | 137 | break; |
127 | 138 | ||
128 | nouveau_fence_signal(fence); | 139 | drop |= nouveau_fence_signal(fence); |
129 | } | 140 | } |
141 | |||
142 | return drop; | ||
130 | } | 143 | } |
131 | 144 | ||
132 | static int | 145 | static int |
@@ -135,18 +148,21 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify) | |||
135 | struct nouveau_fence_chan *fctx = | 148 | struct nouveau_fence_chan *fctx = |
136 | container_of(notify, typeof(*fctx), notify); | 149 | container_of(notify, typeof(*fctx), notify); |
137 | unsigned long flags; | 150 | unsigned long flags; |
151 | int ret = NVIF_NOTIFY_KEEP; | ||
138 | 152 | ||
139 | spin_lock_irqsave(&fctx->lock, flags); | 153 | spin_lock_irqsave(&fctx->lock, flags); |
140 | if (!list_empty(&fctx->pending)) { | 154 | if (!list_empty(&fctx->pending)) { |
141 | struct nouveau_fence *fence; | 155 | struct nouveau_fence *fence; |
156 | struct nouveau_channel *chan; | ||
142 | 157 | ||
143 | fence = list_entry(fctx->pending.next, typeof(*fence), head); | 158 | fence = list_entry(fctx->pending.next, typeof(*fence), head); |
144 | nouveau_fence_update(fence->channel, fctx); | 159 | chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock)); |
160 | if (nouveau_fence_update(fence->channel, fctx)) | ||
161 | ret = NVIF_NOTIFY_DROP; | ||
145 | } | 162 | } |
146 | spin_unlock_irqrestore(&fctx->lock, flags); | 163 | spin_unlock_irqrestore(&fctx->lock, flags); |
147 | 164 | ||
148 | /* Always return keep here. NVIF refcount is handled with nouveau_fence_update */ | 165 | return ret; |
149 | return NVIF_NOTIFY_KEEP; | ||
150 | } | 166 | } |
151 | 167 | ||
152 | void | 168 | void |
@@ -262,7 +278,10 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) | |||
262 | if (!ret) { | 278 | if (!ret) { |
263 | fence_get(&fence->base); | 279 | fence_get(&fence->base); |
264 | spin_lock_irq(&fctx->lock); | 280 | spin_lock_irq(&fctx->lock); |
265 | nouveau_fence_update(chan, fctx); | 281 | |
282 | if (nouveau_fence_update(chan, fctx)) | ||
283 | nvif_notify_put(&fctx->notify); | ||
284 | |||
266 | list_add_tail(&fence->head, &fctx->pending); | 285 | list_add_tail(&fence->head, &fctx->pending); |
267 | spin_unlock_irq(&fctx->lock); | 286 | spin_unlock_irq(&fctx->lock); |
268 | } | 287 | } |
@@ -276,13 +295,16 @@ nouveau_fence_done(struct nouveau_fence *fence) | |||
276 | if (fence->base.ops == &nouveau_fence_ops_legacy || | 295 | if (fence->base.ops == &nouveau_fence_ops_legacy || |
277 | fence->base.ops == &nouveau_fence_ops_uevent) { | 296 | fence->base.ops == &nouveau_fence_ops_uevent) { |
278 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); | 297 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); |
298 | struct nouveau_channel *chan; | ||
279 | unsigned long flags; | 299 | unsigned long flags; |
280 | 300 | ||
281 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) | 301 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) |
282 | return true; | 302 | return true; |
283 | 303 | ||
284 | spin_lock_irqsave(&fctx->lock, flags); | 304 | spin_lock_irqsave(&fctx->lock, flags); |
285 | nouveau_fence_update(fence->channel, fctx); | 305 | chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock)); |
306 | if (chan && nouveau_fence_update(chan, fctx)) | ||
307 | nvif_notify_put(&fctx->notify); | ||
286 | spin_unlock_irqrestore(&fctx->lock, flags); | 308 | spin_unlock_irqrestore(&fctx->lock, flags); |
287 | } | 309 | } |
288 | return fence_is_signaled(&fence->base); | 310 | return fence_is_signaled(&fence->base); |
@@ -387,12 +409,18 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e | |||
387 | 409 | ||
388 | if (fence && (!exclusive || !fobj || !fobj->shared_count)) { | 410 | if (fence && (!exclusive || !fobj || !fobj->shared_count)) { |
389 | struct nouveau_channel *prev = NULL; | 411 | struct nouveau_channel *prev = NULL; |
412 | bool must_wait = true; | ||
390 | 413 | ||
391 | f = nouveau_local_fence(fence, chan->drm); | 414 | f = nouveau_local_fence(fence, chan->drm); |
392 | if (f) | 415 | if (f) { |
393 | prev = f->channel; | 416 | rcu_read_lock(); |
417 | prev = rcu_dereference(f->channel); | ||
418 | if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0)) | ||
419 | must_wait = false; | ||
420 | rcu_read_unlock(); | ||
421 | } | ||
394 | 422 | ||
395 | if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) | 423 | if (must_wait) |
396 | ret = fence_wait(fence, intr); | 424 | ret = fence_wait(fence, intr); |
397 | 425 | ||
398 | return ret; | 426 | return ret; |
@@ -403,19 +431,22 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e | |||
403 | 431 | ||
404 | for (i = 0; i < fobj->shared_count && !ret; ++i) { | 432 | for (i = 0; i < fobj->shared_count && !ret; ++i) { |
405 | struct nouveau_channel *prev = NULL; | 433 | struct nouveau_channel *prev = NULL; |
434 | bool must_wait = true; | ||
406 | 435 | ||
407 | fence = rcu_dereference_protected(fobj->shared[i], | 436 | fence = rcu_dereference_protected(fobj->shared[i], |
408 | reservation_object_held(resv)); | 437 | reservation_object_held(resv)); |
409 | 438 | ||
410 | f = nouveau_local_fence(fence, chan->drm); | 439 | f = nouveau_local_fence(fence, chan->drm); |
411 | if (f) | 440 | if (f) { |
412 | prev = f->channel; | 441 | rcu_read_lock(); |
442 | prev = rcu_dereference(f->channel); | ||
443 | if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0)) | ||
444 | must_wait = false; | ||
445 | rcu_read_unlock(); | ||
446 | } | ||
413 | 447 | ||
414 | if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) | 448 | if (must_wait) |
415 | ret = fence_wait(fence, intr); | 449 | ret = fence_wait(fence, intr); |
416 | |||
417 | if (ret) | ||
418 | break; | ||
419 | } | 450 | } |
420 | 451 | ||
421 | return ret; | 452 | return ret; |
@@ -463,7 +494,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f) | |||
463 | struct nouveau_fence *fence = from_fence(f); | 494 | struct nouveau_fence *fence = from_fence(f); |
464 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); | 495 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); |
465 | 496 | ||
466 | return fence->channel ? fctx->name : "dead channel"; | 497 | return !fctx->dead ? fctx->name : "dead channel"; |
467 | } | 498 | } |
468 | 499 | ||
469 | /* | 500 | /* |
@@ -476,9 +507,16 @@ static bool nouveau_fence_is_signaled(struct fence *f) | |||
476 | { | 507 | { |
477 | struct nouveau_fence *fence = from_fence(f); | 508 | struct nouveau_fence *fence = from_fence(f); |
478 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); | 509 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); |
479 | struct nouveau_channel *chan = fence->channel; | 510 | struct nouveau_channel *chan; |
511 | bool ret = false; | ||
512 | |||
513 | rcu_read_lock(); | ||
514 | chan = rcu_dereference(fence->channel); | ||
515 | if (chan) | ||
516 | ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0; | ||
517 | rcu_read_unlock(); | ||
480 | 518 | ||
481 | return (int)(fctx->read(chan) - fence->base.seqno) >= 0; | 519 | return ret; |
482 | } | 520 | } |
483 | 521 | ||
484 | static bool nouveau_fence_no_signaling(struct fence *f) | 522 | static bool nouveau_fence_no_signaling(struct fence *f) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index 943b0b17b1fc..96e461c6f68f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h | |||
@@ -14,7 +14,7 @@ struct nouveau_fence { | |||
14 | 14 | ||
15 | bool sysmem; | 15 | bool sysmem; |
16 | 16 | ||
17 | struct nouveau_channel *channel; | 17 | struct nouveau_channel __rcu *channel; |
18 | unsigned long timeout; | 18 | unsigned long timeout; |
19 | }; | 19 | }; |
20 | 20 | ||
@@ -47,7 +47,7 @@ struct nouveau_fence_chan { | |||
47 | char name[32]; | 47 | char name[32]; |
48 | 48 | ||
49 | struct nvif_notify notify; | 49 | struct nvif_notify notify; |
50 | int notify_ref; | 50 | int notify_ref, dead; |
51 | }; | 51 | }; |
52 | 52 | ||
53 | struct nouveau_fence_priv { | 53 | struct nouveau_fence_priv { |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 300c4b3d4669..26baa9c05f6c 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -322,6 +322,12 @@ static void radeon_connector_get_edid(struct drm_connector *connector) | |||
322 | } | 322 | } |
323 | 323 | ||
324 | if (!radeon_connector->edid) { | 324 | if (!radeon_connector->edid) { |
325 | /* don't fetch the edid from the vbios if ddc fails and runpm is | ||
326 | * enabled so we report disconnected. | ||
327 | */ | ||
328 | if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) | ||
329 | return; | ||
330 | |||
325 | if (rdev->is_atom_bios) { | 331 | if (rdev->is_atom_bios) { |
326 | /* some laptops provide a hardcoded edid in rom for LCDs */ | 332 | /* some laptops provide a hardcoded edid in rom for LCDs */ |
327 | if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) || | 333 | if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) || |
@@ -826,6 +832,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector, | |||
826 | static enum drm_connector_status | 832 | static enum drm_connector_status |
827 | radeon_lvds_detect(struct drm_connector *connector, bool force) | 833 | radeon_lvds_detect(struct drm_connector *connector, bool force) |
828 | { | 834 | { |
835 | struct drm_device *dev = connector->dev; | ||
836 | struct radeon_device *rdev = dev->dev_private; | ||
829 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 837 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
830 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); | 838 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); |
831 | enum drm_connector_status ret = connector_status_disconnected; | 839 | enum drm_connector_status ret = connector_status_disconnected; |
@@ -842,7 +850,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) | |||
842 | /* check if panel is valid */ | 850 | /* check if panel is valid */ |
843 | if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) | 851 | if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) |
844 | ret = connector_status_connected; | 852 | ret = connector_status_connected; |
845 | 853 | /* don't fetch the edid from the vbios if ddc fails and runpm is | |
854 | * enabled so we report disconnected. | ||
855 | */ | ||
856 | if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) | ||
857 | ret = connector_status_disconnected; | ||
846 | } | 858 | } |
847 | 859 | ||
848 | /* check for edid as well */ | 860 | /* check for edid as well */ |
@@ -1589,6 +1601,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
1589 | /* check if panel is valid */ | 1601 | /* check if panel is valid */ |
1590 | if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) | 1602 | if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) |
1591 | ret = connector_status_connected; | 1603 | ret = connector_status_connected; |
1604 | /* don't fetch the edid from the vbios if ddc fails and runpm is | ||
1605 | * enabled so we report disconnected. | ||
1606 | */ | ||
1607 | if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) | ||
1608 | ret = connector_status_disconnected; | ||
1592 | } | 1609 | } |
1593 | /* eDP is always DP */ | 1610 | /* eDP is always DP */ |
1594 | radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; | 1611 | radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index a3e7aed7e680..6f377de099f9 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -251,22 +251,19 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority | |||
251 | 251 | ||
252 | static int radeon_cs_sync_rings(struct radeon_cs_parser *p) | 252 | static int radeon_cs_sync_rings(struct radeon_cs_parser *p) |
253 | { | 253 | { |
254 | int i, r = 0; | 254 | struct radeon_cs_reloc *reloc; |
255 | int r; | ||
255 | 256 | ||
256 | for (i = 0; i < p->nrelocs; i++) { | 257 | list_for_each_entry(reloc, &p->validated, tv.head) { |
257 | struct reservation_object *resv; | 258 | struct reservation_object *resv; |
258 | 259 | ||
259 | if (!p->relocs[i].robj) | 260 | resv = reloc->robj->tbo.resv; |
260 | continue; | ||
261 | |||
262 | resv = p->relocs[i].robj->tbo.resv; | ||
263 | r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv, | 261 | r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv, |
264 | p->relocs[i].tv.shared); | 262 | reloc->tv.shared); |
265 | |||
266 | if (r) | 263 | if (r) |
267 | break; | 264 | return r; |
268 | } | 265 | } |
269 | return r; | 266 | return 0; |
270 | } | 267 | } |
271 | 268 | ||
272 | /* XXX: note that this is called from the legacy UMS CS ioctl as well */ | 269 | /* XXX: note that this is called from the legacy UMS CS ioctl as well */ |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 8309b11e674d..03586763ee86 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -795,6 +795,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, | |||
795 | 795 | ||
796 | /* Get associated drm_crtc: */ | 796 | /* Get associated drm_crtc: */ |
797 | drmcrtc = &rdev->mode_info.crtcs[crtc]->base; | 797 | drmcrtc = &rdev->mode_info.crtcs[crtc]->base; |
798 | if (!drmcrtc) | ||
799 | return -EINVAL; | ||
798 | 800 | ||
799 | /* Helper routine in DRM core does all the work: */ | 801 | /* Helper routine in DRM core does all the work: */ |
800 | return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, | 802 | return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 99a960a4f302..4c0d786d5c7a 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -213,6 +213,13 @@ int radeon_bo_create(struct radeon_device *rdev, | |||
213 | if (!(rdev->flags & RADEON_IS_PCIE)) | 213 | if (!(rdev->flags & RADEON_IS_PCIE)) |
214 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); | 214 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
215 | 215 | ||
216 | #ifdef CONFIG_X86_32 | ||
217 | /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit | ||
218 | * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 | ||
219 | */ | ||
220 | bo->flags &= ~RADEON_GEM_GTT_WC; | ||
221 | #endif | ||
222 | |||
216 | radeon_ttm_placement_from_domain(bo, domain); | 223 | radeon_ttm_placement_from_domain(bo, domain); |
217 | /* Kernel allocation are uninterruptible */ | 224 | /* Kernel allocation are uninterruptible */ |
218 | down_read(&rdev->pm.mclk_lock); | 225 | down_read(&rdev->pm.mclk_lock); |
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index edca99dbba23..23628b7bfb8d 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c | |||
@@ -359,7 +359,7 @@ int i2c_dw_init(struct dw_i2c_dev *dev) | |||
359 | } | 359 | } |
360 | 360 | ||
361 | /* Configure Tx/Rx FIFO threshold levels */ | 361 | /* Configure Tx/Rx FIFO threshold levels */ |
362 | dw_writel(dev, dev->tx_fifo_depth - 1, DW_IC_TX_TL); | 362 | dw_writel(dev, dev->tx_fifo_depth / 2, DW_IC_TX_TL); |
363 | dw_writel(dev, 0, DW_IC_RX_TL); | 363 | dw_writel(dev, 0, DW_IC_RX_TL); |
364 | 364 | ||
365 | /* configure the i2c master */ | 365 | /* configure the i2c master */ |
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 26942c159de1..277a2288d4a8 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
@@ -922,14 +922,12 @@ omap_i2c_isr_thread(int this_irq, void *dev_id) | |||
922 | if (stat & OMAP_I2C_STAT_NACK) { | 922 | if (stat & OMAP_I2C_STAT_NACK) { |
923 | err |= OMAP_I2C_STAT_NACK; | 923 | err |= OMAP_I2C_STAT_NACK; |
924 | omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK); | 924 | omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK); |
925 | break; | ||
926 | } | 925 | } |
927 | 926 | ||
928 | if (stat & OMAP_I2C_STAT_AL) { | 927 | if (stat & OMAP_I2C_STAT_AL) { |
929 | dev_err(dev->dev, "Arbitration lost\n"); | 928 | dev_err(dev->dev, "Arbitration lost\n"); |
930 | err |= OMAP_I2C_STAT_AL; | 929 | err |= OMAP_I2C_STAT_AL; |
931 | omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL); | 930 | omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL); |
932 | break; | ||
933 | } | 931 | } |
934 | 932 | ||
935 | /* | 933 | /* |
@@ -954,11 +952,13 @@ omap_i2c_isr_thread(int this_irq, void *dev_id) | |||
954 | if (dev->fifo_size) | 952 | if (dev->fifo_size) |
955 | num_bytes = dev->buf_len; | 953 | num_bytes = dev->buf_len; |
956 | 954 | ||
957 | omap_i2c_receive_data(dev, num_bytes, true); | 955 | if (dev->errata & I2C_OMAP_ERRATA_I207) { |
958 | |||
959 | if (dev->errata & I2C_OMAP_ERRATA_I207) | ||
960 | i2c_omap_errata_i207(dev, stat); | 956 | i2c_omap_errata_i207(dev, stat); |
957 | num_bytes = (omap_i2c_read_reg(dev, | ||
958 | OMAP_I2C_BUFSTAT_REG) >> 8) & 0x3F; | ||
959 | } | ||
961 | 960 | ||
961 | omap_i2c_receive_data(dev, num_bytes, true); | ||
962 | omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR); | 962 | omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR); |
963 | continue; | 963 | continue; |
964 | } | 964 | } |
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index bc203485716d..8afa28e4570e 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c | |||
@@ -421,7 +421,7 @@ static int evdev_open(struct inode *inode, struct file *file) | |||
421 | 421 | ||
422 | err_free_client: | 422 | err_free_client: |
423 | evdev_detach_client(evdev, client); | 423 | evdev_detach_client(evdev, client); |
424 | kfree(client); | 424 | kvfree(client); |
425 | return error; | 425 | return error; |
426 | } | 426 | } |
427 | 427 | ||
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index 3e6eebd5be50..7b1124366011 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c | |||
@@ -225,7 +225,12 @@ static int bond_changelink(struct net_device *bond_dev, | |||
225 | 225 | ||
226 | bond_option_arp_ip_targets_clear(bond); | 226 | bond_option_arp_ip_targets_clear(bond); |
227 | nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) { | 227 | nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) { |
228 | __be32 target = nla_get_be32(attr); | 228 | __be32 target; |
229 | |||
230 | if (nla_len(attr) < sizeof(target)) | ||
231 | return -EINVAL; | ||
232 | |||
233 | target = nla_get_be32(attr); | ||
229 | 234 | ||
230 | bond_opt_initval(&newval, (__force u64)target); | 235 | bond_opt_initval(&newval, (__force u64)target); |
231 | err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS, | 236 | err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS, |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 925ab8ec9329..4e1659d07979 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c | |||
@@ -316,7 +316,7 @@ static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number) | |||
316 | if (err) { | 316 | if (err) { |
317 | netdev_err(dev->netdev, "getting serial failure: %d\n", err); | 317 | netdev_err(dev->netdev, "getting serial failure: %d\n", err); |
318 | } else if (serial_number) { | 318 | } else if (serial_number) { |
319 | u32 tmp32; | 319 | __le32 tmp32; |
320 | 320 | ||
321 | memcpy(&tmp32, args, 4); | 321 | memcpy(&tmp32, args, 4); |
322 | *serial_number = le32_to_cpu(tmp32); | 322 | *serial_number = le32_to_cpu(tmp32); |
@@ -347,7 +347,7 @@ static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id) | |||
347 | */ | 347 | */ |
348 | static int pcan_usb_update_ts(struct pcan_usb_msg_context *mc) | 348 | static int pcan_usb_update_ts(struct pcan_usb_msg_context *mc) |
349 | { | 349 | { |
350 | u16 tmp16; | 350 | __le16 tmp16; |
351 | 351 | ||
352 | if ((mc->ptr+2) > mc->end) | 352 | if ((mc->ptr+2) > mc->end) |
353 | return -EINVAL; | 353 | return -EINVAL; |
@@ -371,7 +371,7 @@ static int pcan_usb_decode_ts(struct pcan_usb_msg_context *mc, u8 first_packet) | |||
371 | { | 371 | { |
372 | /* only 1st packet supplies a word timestamp */ | 372 | /* only 1st packet supplies a word timestamp */ |
373 | if (first_packet) { | 373 | if (first_packet) { |
374 | u16 tmp16; | 374 | __le16 tmp16; |
375 | 375 | ||
376 | if ((mc->ptr + 2) > mc->end) | 376 | if ((mc->ptr + 2) > mc->end) |
377 | return -EINVAL; | 377 | return -EINVAL; |
@@ -614,7 +614,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) | |||
614 | return -ENOMEM; | 614 | return -ENOMEM; |
615 | 615 | ||
616 | if (status_len & PCAN_USB_STATUSLEN_EXT_ID) { | 616 | if (status_len & PCAN_USB_STATUSLEN_EXT_ID) { |
617 | u32 tmp32; | 617 | __le32 tmp32; |
618 | 618 | ||
619 | if ((mc->ptr + 4) > mc->end) | 619 | if ((mc->ptr + 4) > mc->end) |
620 | goto decode_failed; | 620 | goto decode_failed; |
@@ -622,9 +622,9 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) | |||
622 | memcpy(&tmp32, mc->ptr, 4); | 622 | memcpy(&tmp32, mc->ptr, 4); |
623 | mc->ptr += 4; | 623 | mc->ptr += 4; |
624 | 624 | ||
625 | cf->can_id = le32_to_cpu(tmp32 >> 3) | CAN_EFF_FLAG; | 625 | cf->can_id = (le32_to_cpu(tmp32) >> 3) | CAN_EFF_FLAG; |
626 | } else { | 626 | } else { |
627 | u16 tmp16; | 627 | __le16 tmp16; |
628 | 628 | ||
629 | if ((mc->ptr + 2) > mc->end) | 629 | if ((mc->ptr + 2) > mc->end) |
630 | goto decode_failed; | 630 | goto decode_failed; |
@@ -632,7 +632,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) | |||
632 | memcpy(&tmp16, mc->ptr, 2); | 632 | memcpy(&tmp16, mc->ptr, 2); |
633 | mc->ptr += 2; | 633 | mc->ptr += 2; |
634 | 634 | ||
635 | cf->can_id = le16_to_cpu(tmp16 >> 5); | 635 | cf->can_id = le16_to_cpu(tmp16) >> 5; |
636 | } | 636 | } |
637 | 637 | ||
638 | cf->can_dlc = get_can_dlc(rec_len); | 638 | cf->can_dlc = get_can_dlc(rec_len); |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 644e6ab8a489..c62f48a1161d 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c | |||
@@ -735,7 +735,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, | |||
735 | dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); | 735 | dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); |
736 | if (!dev->cmd_buf) { | 736 | if (!dev->cmd_buf) { |
737 | err = -ENOMEM; | 737 | err = -ENOMEM; |
738 | goto lbl_set_intf_data; | 738 | goto lbl_free_candev; |
739 | } | 739 | } |
740 | 740 | ||
741 | dev->udev = usb_dev; | 741 | dev->udev = usb_dev; |
@@ -775,7 +775,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, | |||
775 | err = register_candev(netdev); | 775 | err = register_candev(netdev); |
776 | if (err) { | 776 | if (err) { |
777 | dev_err(&intf->dev, "couldn't register CAN device: %d\n", err); | 777 | dev_err(&intf->dev, "couldn't register CAN device: %d\n", err); |
778 | goto lbl_free_cmd_buf; | 778 | goto lbl_restore_intf_data; |
779 | } | 779 | } |
780 | 780 | ||
781 | if (dev->prev_siblings) | 781 | if (dev->prev_siblings) |
@@ -788,14 +788,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, | |||
788 | if (dev->adapter->dev_init) { | 788 | if (dev->adapter->dev_init) { |
789 | err = dev->adapter->dev_init(dev); | 789 | err = dev->adapter->dev_init(dev); |
790 | if (err) | 790 | if (err) |
791 | goto lbl_free_cmd_buf; | 791 | goto lbl_unregister_candev; |
792 | } | 792 | } |
793 | 793 | ||
794 | /* set bus off */ | 794 | /* set bus off */ |
795 | if (dev->adapter->dev_set_bus) { | 795 | if (dev->adapter->dev_set_bus) { |
796 | err = dev->adapter->dev_set_bus(dev, 0); | 796 | err = dev->adapter->dev_set_bus(dev, 0); |
797 | if (err) | 797 | if (err) |
798 | goto lbl_free_cmd_buf; | 798 | goto lbl_unregister_candev; |
799 | } | 799 | } |
800 | 800 | ||
801 | /* get device number early */ | 801 | /* get device number early */ |
@@ -807,11 +807,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, | |||
807 | 807 | ||
808 | return 0; | 808 | return 0; |
809 | 809 | ||
810 | lbl_free_cmd_buf: | 810 | lbl_unregister_candev: |
811 | kfree(dev->cmd_buf); | 811 | unregister_candev(netdev); |
812 | 812 | ||
813 | lbl_set_intf_data: | 813 | lbl_restore_intf_data: |
814 | usb_set_intfdata(intf, dev->prev_siblings); | 814 | usb_set_intfdata(intf, dev->prev_siblings); |
815 | kfree(dev->cmd_buf); | ||
816 | |||
817 | lbl_free_candev: | ||
815 | free_candev(netdev); | 818 | free_candev(netdev); |
816 | 819 | ||
817 | return err; | 820 | return err; |
@@ -853,6 +856,7 @@ static int peak_usb_probe(struct usb_interface *intf, | |||
853 | const struct usb_device_id *id) | 856 | const struct usb_device_id *id) |
854 | { | 857 | { |
855 | struct usb_device *usb_dev = interface_to_usbdev(intf); | 858 | struct usb_device *usb_dev = interface_to_usbdev(intf); |
859 | const u16 usb_id_product = le16_to_cpu(usb_dev->descriptor.idProduct); | ||
856 | struct peak_usb_adapter *peak_usb_adapter, **pp; | 860 | struct peak_usb_adapter *peak_usb_adapter, **pp; |
857 | int i, err = -ENOMEM; | 861 | int i, err = -ENOMEM; |
858 | 862 | ||
@@ -860,7 +864,7 @@ static int peak_usb_probe(struct usb_interface *intf, | |||
860 | 864 | ||
861 | /* get corresponding PCAN-USB adapter */ | 865 | /* get corresponding PCAN-USB adapter */ |
862 | for (pp = peak_usb_adapters_list; *pp; pp++) | 866 | for (pp = peak_usb_adapters_list; *pp; pp++) |
863 | if ((*pp)->device_id == usb_dev->descriptor.idProduct) | 867 | if ((*pp)->device_id == usb_id_product) |
864 | break; | 868 | break; |
865 | 869 | ||
866 | peak_usb_adapter = *pp; | 870 | peak_usb_adapter = *pp; |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 263dd921edc4..4cfa3b8605b1 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c | |||
@@ -78,8 +78,8 @@ struct pcan_usb_pro_msg { | |||
78 | int rec_buffer_size; | 78 | int rec_buffer_size; |
79 | int rec_buffer_len; | 79 | int rec_buffer_len; |
80 | union { | 80 | union { |
81 | u16 *rec_cnt_rd; | 81 | __le16 *rec_cnt_rd; |
82 | u32 *rec_cnt; | 82 | __le32 *rec_cnt; |
83 | u8 *rec_buffer; | 83 | u8 *rec_buffer; |
84 | } u; | 84 | } u; |
85 | }; | 85 | }; |
@@ -155,7 +155,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...) | |||
155 | *pc++ = va_arg(ap, int); | 155 | *pc++ = va_arg(ap, int); |
156 | *pc++ = va_arg(ap, int); | 156 | *pc++ = va_arg(ap, int); |
157 | *pc++ = va_arg(ap, int); | 157 | *pc++ = va_arg(ap, int); |
158 | *(u32 *)pc = cpu_to_le32(va_arg(ap, u32)); | 158 | *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32)); |
159 | pc += 4; | 159 | pc += 4; |
160 | memcpy(pc, va_arg(ap, int *), i); | 160 | memcpy(pc, va_arg(ap, int *), i); |
161 | pc += i; | 161 | pc += i; |
@@ -165,7 +165,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...) | |||
165 | case PCAN_USBPRO_GETDEVID: | 165 | case PCAN_USBPRO_GETDEVID: |
166 | *pc++ = va_arg(ap, int); | 166 | *pc++ = va_arg(ap, int); |
167 | pc += 2; | 167 | pc += 2; |
168 | *(u32 *)pc = cpu_to_le32(va_arg(ap, u32)); | 168 | *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32)); |
169 | pc += 4; | 169 | pc += 4; |
170 | break; | 170 | break; |
171 | 171 | ||
@@ -173,21 +173,21 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...) | |||
173 | case PCAN_USBPRO_SETBUSACT: | 173 | case PCAN_USBPRO_SETBUSACT: |
174 | case PCAN_USBPRO_SETSILENT: | 174 | case PCAN_USBPRO_SETSILENT: |
175 | *pc++ = va_arg(ap, int); | 175 | *pc++ = va_arg(ap, int); |
176 | *(u16 *)pc = cpu_to_le16(va_arg(ap, int)); | 176 | *(__le16 *)pc = cpu_to_le16(va_arg(ap, int)); |
177 | pc += 2; | 177 | pc += 2; |
178 | break; | 178 | break; |
179 | 179 | ||
180 | case PCAN_USBPRO_SETLED: | 180 | case PCAN_USBPRO_SETLED: |
181 | *pc++ = va_arg(ap, int); | 181 | *pc++ = va_arg(ap, int); |
182 | *(u16 *)pc = cpu_to_le16(va_arg(ap, int)); | 182 | *(__le16 *)pc = cpu_to_le16(va_arg(ap, int)); |
183 | pc += 2; | 183 | pc += 2; |
184 | *(u32 *)pc = cpu_to_le32(va_arg(ap, u32)); | 184 | *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32)); |
185 | pc += 4; | 185 | pc += 4; |
186 | break; | 186 | break; |
187 | 187 | ||
188 | case PCAN_USBPRO_SETTS: | 188 | case PCAN_USBPRO_SETTS: |
189 | pc++; | 189 | pc++; |
190 | *(u16 *)pc = cpu_to_le16(va_arg(ap, int)); | 190 | *(__le16 *)pc = cpu_to_le16(va_arg(ap, int)); |
191 | pc += 2; | 191 | pc += 2; |
192 | break; | 192 | break; |
193 | 193 | ||
@@ -200,7 +200,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...) | |||
200 | 200 | ||
201 | len = pc - pm->rec_ptr; | 201 | len = pc - pm->rec_ptr; |
202 | if (len > 0) { | 202 | if (len > 0) { |
203 | *pm->u.rec_cnt = cpu_to_le32(*pm->u.rec_cnt+1); | 203 | *pm->u.rec_cnt = cpu_to_le32(le32_to_cpu(*pm->u.rec_cnt) + 1); |
204 | *pm->rec_ptr = id; | 204 | *pm->rec_ptr = id; |
205 | 205 | ||
206 | pm->rec_ptr = pc; | 206 | pm->rec_ptr = pc; |
@@ -333,8 +333,6 @@ static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id, | |||
333 | if (!(dev->state & PCAN_USB_STATE_CONNECTED)) | 333 | if (!(dev->state & PCAN_USB_STATE_CONNECTED)) |
334 | return 0; | 334 | return 0; |
335 | 335 | ||
336 | memset(req_addr, '\0', req_size); | ||
337 | |||
338 | req_type = USB_TYPE_VENDOR | USB_RECIP_OTHER; | 336 | req_type = USB_TYPE_VENDOR | USB_RECIP_OTHER; |
339 | 337 | ||
340 | switch (req_id) { | 338 | switch (req_id) { |
@@ -345,6 +343,7 @@ static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id, | |||
345 | default: | 343 | default: |
346 | p = usb_rcvctrlpipe(dev->udev, 0); | 344 | p = usb_rcvctrlpipe(dev->udev, 0); |
347 | req_type |= USB_DIR_IN; | 345 | req_type |= USB_DIR_IN; |
346 | memset(req_addr, '\0', req_size); | ||
348 | break; | 347 | break; |
349 | } | 348 | } |
350 | 349 | ||
@@ -572,7 +571,7 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, | |||
572 | static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, | 571 | static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, |
573 | struct pcan_usb_pro_rxstatus *er) | 572 | struct pcan_usb_pro_rxstatus *er) |
574 | { | 573 | { |
575 | const u32 raw_status = le32_to_cpu(er->status); | 574 | const u16 raw_status = le16_to_cpu(er->status); |
576 | const unsigned int ctrl_idx = (er->channel >> 4) & 0x0f; | 575 | const unsigned int ctrl_idx = (er->channel >> 4) & 0x0f; |
577 | struct peak_usb_device *dev = usb_if->dev[ctrl_idx]; | 576 | struct peak_usb_device *dev = usb_if->dev[ctrl_idx]; |
578 | struct net_device *netdev = dev->netdev; | 577 | struct net_device *netdev = dev->netdev; |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h index 32275af547e0..837cee267132 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h | |||
@@ -33,27 +33,27 @@ | |||
33 | 33 | ||
34 | /* PCAN_USBPRO_INFO_BL vendor request record type */ | 34 | /* PCAN_USBPRO_INFO_BL vendor request record type */ |
35 | struct __packed pcan_usb_pro_blinfo { | 35 | struct __packed pcan_usb_pro_blinfo { |
36 | u32 ctrl_type; | 36 | __le32 ctrl_type; |
37 | u8 version[4]; | 37 | u8 version[4]; |
38 | u8 day; | 38 | u8 day; |
39 | u8 month; | 39 | u8 month; |
40 | u8 year; | 40 | u8 year; |
41 | u8 dummy; | 41 | u8 dummy; |
42 | u32 serial_num_hi; | 42 | __le32 serial_num_hi; |
43 | u32 serial_num_lo; | 43 | __le32 serial_num_lo; |
44 | u32 hw_type; | 44 | __le32 hw_type; |
45 | u32 hw_rev; | 45 | __le32 hw_rev; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | /* PCAN_USBPRO_INFO_FW vendor request record type */ | 48 | /* PCAN_USBPRO_INFO_FW vendor request record type */ |
49 | struct __packed pcan_usb_pro_fwinfo { | 49 | struct __packed pcan_usb_pro_fwinfo { |
50 | u32 ctrl_type; | 50 | __le32 ctrl_type; |
51 | u8 version[4]; | 51 | u8 version[4]; |
52 | u8 day; | 52 | u8 day; |
53 | u8 month; | 53 | u8 month; |
54 | u8 year; | 54 | u8 year; |
55 | u8 dummy; | 55 | u8 dummy; |
56 | u32 fw_type; | 56 | __le32 fw_type; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | /* | 59 | /* |
@@ -80,46 +80,46 @@ struct __packed pcan_usb_pro_fwinfo { | |||
80 | struct __packed pcan_usb_pro_btr { | 80 | struct __packed pcan_usb_pro_btr { |
81 | u8 data_type; | 81 | u8 data_type; |
82 | u8 channel; | 82 | u8 channel; |
83 | u16 dummy; | 83 | __le16 dummy; |
84 | u32 CCBT; | 84 | __le32 CCBT; |
85 | }; | 85 | }; |
86 | 86 | ||
87 | struct __packed pcan_usb_pro_busact { | 87 | struct __packed pcan_usb_pro_busact { |
88 | u8 data_type; | 88 | u8 data_type; |
89 | u8 channel; | 89 | u8 channel; |
90 | u16 onoff; | 90 | __le16 onoff; |
91 | }; | 91 | }; |
92 | 92 | ||
93 | struct __packed pcan_usb_pro_silent { | 93 | struct __packed pcan_usb_pro_silent { |
94 | u8 data_type; | 94 | u8 data_type; |
95 | u8 channel; | 95 | u8 channel; |
96 | u16 onoff; | 96 | __le16 onoff; |
97 | }; | 97 | }; |
98 | 98 | ||
99 | struct __packed pcan_usb_pro_filter { | 99 | struct __packed pcan_usb_pro_filter { |
100 | u8 data_type; | 100 | u8 data_type; |
101 | u8 dummy; | 101 | u8 dummy; |
102 | u16 filter_mode; | 102 | __le16 filter_mode; |
103 | }; | 103 | }; |
104 | 104 | ||
105 | struct __packed pcan_usb_pro_setts { | 105 | struct __packed pcan_usb_pro_setts { |
106 | u8 data_type; | 106 | u8 data_type; |
107 | u8 dummy; | 107 | u8 dummy; |
108 | u16 mode; | 108 | __le16 mode; |
109 | }; | 109 | }; |
110 | 110 | ||
111 | struct __packed pcan_usb_pro_devid { | 111 | struct __packed pcan_usb_pro_devid { |
112 | u8 data_type; | 112 | u8 data_type; |
113 | u8 channel; | 113 | u8 channel; |
114 | u16 dummy; | 114 | __le16 dummy; |
115 | u32 serial_num; | 115 | __le32 serial_num; |
116 | }; | 116 | }; |
117 | 117 | ||
118 | struct __packed pcan_usb_pro_setled { | 118 | struct __packed pcan_usb_pro_setled { |
119 | u8 data_type; | 119 | u8 data_type; |
120 | u8 channel; | 120 | u8 channel; |
121 | u16 mode; | 121 | __le16 mode; |
122 | u32 timeout; | 122 | __le32 timeout; |
123 | }; | 123 | }; |
124 | 124 | ||
125 | struct __packed pcan_usb_pro_rxmsg { | 125 | struct __packed pcan_usb_pro_rxmsg { |
@@ -127,8 +127,8 @@ struct __packed pcan_usb_pro_rxmsg { | |||
127 | u8 client; | 127 | u8 client; |
128 | u8 flags; | 128 | u8 flags; |
129 | u8 len; | 129 | u8 len; |
130 | u32 ts32; | 130 | __le32 ts32; |
131 | u32 id; | 131 | __le32 id; |
132 | 132 | ||
133 | u8 data[8]; | 133 | u8 data[8]; |
134 | }; | 134 | }; |
@@ -141,15 +141,15 @@ struct __packed pcan_usb_pro_rxmsg { | |||
141 | struct __packed pcan_usb_pro_rxstatus { | 141 | struct __packed pcan_usb_pro_rxstatus { |
142 | u8 data_type; | 142 | u8 data_type; |
143 | u8 channel; | 143 | u8 channel; |
144 | u16 status; | 144 | __le16 status; |
145 | u32 ts32; | 145 | __le32 ts32; |
146 | u32 err_frm; | 146 | __le32 err_frm; |
147 | }; | 147 | }; |
148 | 148 | ||
149 | struct __packed pcan_usb_pro_rxts { | 149 | struct __packed pcan_usb_pro_rxts { |
150 | u8 data_type; | 150 | u8 data_type; |
151 | u8 dummy[3]; | 151 | u8 dummy[3]; |
152 | u32 ts64[2]; | 152 | __le32 ts64[2]; |
153 | }; | 153 | }; |
154 | 154 | ||
155 | struct __packed pcan_usb_pro_txmsg { | 155 | struct __packed pcan_usb_pro_txmsg { |
@@ -157,7 +157,7 @@ struct __packed pcan_usb_pro_txmsg { | |||
157 | u8 client; | 157 | u8 client; |
158 | u8 flags; | 158 | u8 flags; |
159 | u8 len; | 159 | u8 len; |
160 | u32 id; | 160 | __le32 id; |
161 | u8 data[8]; | 161 | u8 data[8]; |
162 | }; | 162 | }; |
163 | 163 | ||
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index 51b68d1299fe..a50891f52197 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c | |||
@@ -473,7 +473,6 @@ static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata, | |||
473 | memset(&rdata->tx, 0, sizeof(rdata->tx)); | 473 | memset(&rdata->tx, 0, sizeof(rdata->tx)); |
474 | memset(&rdata->rx, 0, sizeof(rdata->rx)); | 474 | memset(&rdata->rx, 0, sizeof(rdata->rx)); |
475 | 475 | ||
476 | rdata->interrupt = 0; | ||
477 | rdata->mapped_as_page = 0; | 476 | rdata->mapped_as_page = 0; |
478 | 477 | ||
479 | if (rdata->state_saved) { | 478 | if (rdata->state_saved) { |
@@ -597,7 +596,11 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) | |||
597 | } | 596 | } |
598 | } | 597 | } |
599 | 598 | ||
600 | /* Save the skb address in the last entry */ | 599 | /* Save the skb address in the last entry. We always have some data |
600 | * that has been mapped so rdata is always advanced past the last | ||
601 | * piece of mapped data - use the entry pointed to by cur_index - 1. | ||
602 | */ | ||
603 | rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1); | ||
601 | rdata->skb = skb; | 604 | rdata->skb = skb; |
602 | 605 | ||
603 | /* Save the number of descriptor entries used */ | 606 | /* Save the number of descriptor entries used */ |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index bf6bf1118b0f..7bb5f07dbeef 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c | |||
@@ -1839,7 +1839,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) | |||
1839 | spin_lock_irqsave(&ring->lock, flags); | 1839 | spin_lock_irqsave(&ring->lock, flags); |
1840 | 1840 | ||
1841 | while ((processed < XGBE_TX_DESC_MAX_PROC) && | 1841 | while ((processed < XGBE_TX_DESC_MAX_PROC) && |
1842 | (ring->dirty < ring->cur)) { | 1842 | (ring->dirty != ring->cur)) { |
1843 | rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); | 1843 | rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); |
1844 | rdesc = rdata->rdesc; | 1844 | rdesc = rdata->rdesc; |
1845 | 1845 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 549549eaf580..778e4cd32571 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | |||
@@ -8119,10 +8119,11 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
8119 | case SFP_EEPROM_CON_TYPE_VAL_LC: | 8119 | case SFP_EEPROM_CON_TYPE_VAL_LC: |
8120 | case SFP_EEPROM_CON_TYPE_VAL_RJ45: | 8120 | case SFP_EEPROM_CON_TYPE_VAL_RJ45: |
8121 | check_limiting_mode = 1; | 8121 | check_limiting_mode = 1; |
8122 | if ((val[SFP_EEPROM_10G_COMP_CODE_ADDR] & | 8122 | if (((val[SFP_EEPROM_10G_COMP_CODE_ADDR] & |
8123 | (SFP_EEPROM_10G_COMP_CODE_SR_MASK | | 8123 | (SFP_EEPROM_10G_COMP_CODE_SR_MASK | |
8124 | SFP_EEPROM_10G_COMP_CODE_LR_MASK | | 8124 | SFP_EEPROM_10G_COMP_CODE_LR_MASK | |
8125 | SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) { | 8125 | SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) && |
8126 | (val[SFP_EEPROM_1G_COMP_CODE_ADDR] != 0)) { | ||
8126 | DP(NETIF_MSG_LINK, "1G SFP module detected\n"); | 8127 | DP(NETIF_MSG_LINK, "1G SFP module detected\n"); |
8127 | phy->media_type = ETH_PHY_SFP_1G_FIBER; | 8128 | phy->media_type = ETH_PHY_SFP_1G_FIBER; |
8128 | if (phy->req_line_speed != SPEED_1000) { | 8129 | if (phy->req_line_speed != SPEED_1000) { |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 07c636815127..691f0bf09ee1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <net/ip.h> | 45 | #include <net/ip.h> |
46 | #include <net/ipv6.h> | 46 | #include <net/ipv6.h> |
47 | #include <net/tcp.h> | 47 | #include <net/tcp.h> |
48 | #include <net/vxlan.h> | ||
48 | #include <net/checksum.h> | 49 | #include <net/checksum.h> |
49 | #include <net/ip6_checksum.h> | 50 | #include <net/ip6_checksum.h> |
50 | #include <linux/workqueue.h> | 51 | #include <linux/workqueue.h> |
@@ -12552,6 +12553,11 @@ static int bnx2x_get_phys_port_id(struct net_device *netdev, | |||
12552 | return 0; | 12553 | return 0; |
12553 | } | 12554 | } |
12554 | 12555 | ||
12556 | static bool bnx2x_gso_check(struct sk_buff *skb, struct net_device *dev) | ||
12557 | { | ||
12558 | return vxlan_gso_check(skb); | ||
12559 | } | ||
12560 | |||
12555 | static const struct net_device_ops bnx2x_netdev_ops = { | 12561 | static const struct net_device_ops bnx2x_netdev_ops = { |
12556 | .ndo_open = bnx2x_open, | 12562 | .ndo_open = bnx2x_open, |
12557 | .ndo_stop = bnx2x_close, | 12563 | .ndo_stop = bnx2x_close, |
@@ -12583,6 +12589,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { | |||
12583 | #endif | 12589 | #endif |
12584 | .ndo_get_phys_port_id = bnx2x_get_phys_port_id, | 12590 | .ndo_get_phys_port_id = bnx2x_get_phys_port_id, |
12585 | .ndo_set_vf_link_state = bnx2x_set_vf_link_state, | 12591 | .ndo_set_vf_link_state = bnx2x_set_vf_link_state, |
12592 | .ndo_gso_check = bnx2x_gso_check, | ||
12586 | }; | 12593 | }; |
12587 | 12594 | ||
12588 | static int bnx2x_set_coherency_mask(struct bnx2x *bp) | 12595 | static int bnx2x_set_coherency_mask(struct bnx2x *bp) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index c38a93607ea2..2c37e1bf253a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
@@ -50,13 +50,13 @@ | |||
50 | #include "cxgb4_uld.h" | 50 | #include "cxgb4_uld.h" |
51 | 51 | ||
52 | #define T4FW_VERSION_MAJOR 0x01 | 52 | #define T4FW_VERSION_MAJOR 0x01 |
53 | #define T4FW_VERSION_MINOR 0x0B | 53 | #define T4FW_VERSION_MINOR 0x0C |
54 | #define T4FW_VERSION_MICRO 0x1B | 54 | #define T4FW_VERSION_MICRO 0x19 |
55 | #define T4FW_VERSION_BUILD 0x00 | 55 | #define T4FW_VERSION_BUILD 0x00 |
56 | 56 | ||
57 | #define T5FW_VERSION_MAJOR 0x01 | 57 | #define T5FW_VERSION_MAJOR 0x01 |
58 | #define T5FW_VERSION_MINOR 0x0B | 58 | #define T5FW_VERSION_MINOR 0x0C |
59 | #define T5FW_VERSION_MICRO 0x1B | 59 | #define T5FW_VERSION_MICRO 0x19 |
60 | #define T5FW_VERSION_BUILD 0x00 | 60 | #define T5FW_VERSION_BUILD 0x00 |
61 | 61 | ||
62 | #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) | 62 | #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 4c26be97fc9a..c8c5b3d36d4e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -2354,9 +2354,13 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) | |||
2354 | SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | | 2354 | SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | |
2355 | SUPPORTED_10000baseKX4_Full; | 2355 | SUPPORTED_10000baseKX4_Full; |
2356 | else if (type == FW_PORT_TYPE_FIBER_XFI || | 2356 | else if (type == FW_PORT_TYPE_FIBER_XFI || |
2357 | type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) | 2357 | type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) { |
2358 | v |= SUPPORTED_FIBRE; | 2358 | v |= SUPPORTED_FIBRE; |
2359 | else if (type == FW_PORT_TYPE_BP40_BA) | 2359 | if (caps & FW_PORT_CAP_SPEED_1G) |
2360 | v |= SUPPORTED_1000baseT_Full; | ||
2361 | if (caps & FW_PORT_CAP_SPEED_10G) | ||
2362 | v |= SUPPORTED_10000baseT_Full; | ||
2363 | } else if (type == FW_PORT_TYPE_BP40_BA) | ||
2360 | v |= SUPPORTED_40000baseSR4_Full; | 2364 | v |= SUPPORTED_40000baseSR4_Full; |
2361 | 2365 | ||
2362 | if (caps & FW_PORT_CAP_ANEG) | 2366 | if (caps & FW_PORT_CAP_ANEG) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index a9323bdb3585..67345c73e570 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -1132,6 +1132,27 @@ unsigned int t4_flash_cfg_addr(struct adapter *adapter) | |||
1132 | return FLASH_CFG_START; | 1132 | return FLASH_CFG_START; |
1133 | } | 1133 | } |
1134 | 1134 | ||
1135 | /* Return TRUE if the specified firmware matches the adapter. I.e. T4 | ||
1136 | * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead | ||
1137 | * and emit an error message for mismatched firmware to save our caller the | ||
1138 | * effort ... | ||
1139 | */ | ||
1140 | static bool t4_fw_matches_chip(const struct adapter *adap, | ||
1141 | const struct fw_hdr *hdr) | ||
1142 | { | ||
1143 | /* The expression below will return FALSE for any unsupported adapter | ||
1144 | * which will keep us "honest" in the future ... | ||
1145 | */ | ||
1146 | if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) || | ||
1147 | (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5)) | ||
1148 | return true; | ||
1149 | |||
1150 | dev_err(adap->pdev_dev, | ||
1151 | "FW image (%d) is not suitable for this adapter (%d)\n", | ||
1152 | hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip)); | ||
1153 | return false; | ||
1154 | } | ||
1155 | |||
1135 | /** | 1156 | /** |
1136 | * t4_load_fw - download firmware | 1157 | * t4_load_fw - download firmware |
1137 | * @adap: the adapter | 1158 | * @adap: the adapter |
@@ -1171,6 +1192,8 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) | |||
1171 | FW_MAX_SIZE); | 1192 | FW_MAX_SIZE); |
1172 | return -EFBIG; | 1193 | return -EFBIG; |
1173 | } | 1194 | } |
1195 | if (!t4_fw_matches_chip(adap, hdr)) | ||
1196 | return -EINVAL; | ||
1174 | 1197 | ||
1175 | for (csum = 0, i = 0; i < size / sizeof(csum); i++) | 1198 | for (csum = 0, i = 0; i < size / sizeof(csum); i++) |
1176 | csum += ntohl(p[i]); | 1199 | csum += ntohl(p[i]); |
@@ -3083,6 +3106,9 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, | |||
3083 | const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; | 3106 | const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; |
3084 | int reset, ret; | 3107 | int reset, ret; |
3085 | 3108 | ||
3109 | if (!t4_fw_matches_chip(adap, fw_hdr)) | ||
3110 | return -EINVAL; | ||
3111 | |||
3086 | ret = t4_fw_halt(adap, mbox, force); | 3112 | ret = t4_fw_halt(adap, mbox, force); |
3087 | if (ret < 0 && !force) | 3113 | if (ret < 0 && !force) |
3088 | return ret; | 3114 | return ret; |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index ccc3ce2e8c8c..96208f17bb53 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -216,7 +216,7 @@ | |||
216 | /* Various constants */ | 216 | /* Various constants */ |
217 | 217 | ||
218 | /* Coalescing */ | 218 | /* Coalescing */ |
219 | #define MVNETA_TXDONE_COAL_PKTS 16 | 219 | #define MVNETA_TXDONE_COAL_PKTS 1 |
220 | #define MVNETA_RX_COAL_PKTS 32 | 220 | #define MVNETA_RX_COAL_PKTS 32 |
221 | #define MVNETA_RX_COAL_USEC 100 | 221 | #define MVNETA_RX_COAL_USEC 100 |
222 | 222 | ||
@@ -1721,6 +1721,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) | |||
1721 | u16 txq_id = skb_get_queue_mapping(skb); | 1721 | u16 txq_id = skb_get_queue_mapping(skb); |
1722 | struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; | 1722 | struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; |
1723 | struct mvneta_tx_desc *tx_desc; | 1723 | struct mvneta_tx_desc *tx_desc; |
1724 | int len = skb->len; | ||
1724 | int frags = 0; | 1725 | int frags = 0; |
1725 | u32 tx_cmd; | 1726 | u32 tx_cmd; |
1726 | 1727 | ||
@@ -1788,7 +1789,7 @@ out: | |||
1788 | 1789 | ||
1789 | u64_stats_update_begin(&stats->syncp); | 1790 | u64_stats_update_begin(&stats->syncp); |
1790 | stats->tx_packets++; | 1791 | stats->tx_packets++; |
1791 | stats->tx_bytes += skb->len; | 1792 | stats->tx_bytes += len; |
1792 | u64_stats_update_end(&stats->syncp); | 1793 | u64_stats_update_end(&stats->syncp); |
1793 | } else { | 1794 | } else { |
1794 | dev->stats.tx_dropped++; | 1795 | dev->stats.tx_dropped++; |
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 38f7ceee77d2..af829c578400 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c | |||
@@ -1173,8 +1173,8 @@ static int pxa168_eth_open(struct net_device *dev) | |||
1173 | pep->rx_used_desc_q = 0; | 1173 | pep->rx_used_desc_q = 0; |
1174 | pep->rx_curr_desc_q = 0; | 1174 | pep->rx_curr_desc_q = 0; |
1175 | netif_carrier_off(dev); | 1175 | netif_carrier_off(dev); |
1176 | eth_port_start(dev); | ||
1177 | napi_enable(&pep->napi); | 1176 | napi_enable(&pep->napi); |
1177 | eth_port_start(dev); | ||
1178 | return 0; | 1178 | return 0; |
1179 | out_free_rx_skb: | 1179 | out_free_rx_skb: |
1180 | rxq_deinit(dev); | 1180 | rxq_deinit(dev); |
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 264eab7d3b26..7173836fe361 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c | |||
@@ -3433,10 +3433,9 @@ static irqreturn_t skge_intr(int irq, void *dev_id) | |||
3433 | 3433 | ||
3434 | if (status & IS_HW_ERR) | 3434 | if (status & IS_HW_ERR) |
3435 | skge_error_irq(hw); | 3435 | skge_error_irq(hw); |
3436 | 3436 | out: | |
3437 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 3437 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
3438 | skge_read32(hw, B0_IMSK); | 3438 | skge_read32(hw, B0_IMSK); |
3439 | out: | ||
3440 | spin_unlock(&hw->hw_lock); | 3439 | spin_unlock(&hw->hw_lock); |
3441 | 3440 | ||
3442 | return IRQ_RETVAL(handled); | 3441 | return IRQ_RETVAL(handled); |
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index f8ab220bd72c..867a6a3ef81f 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c | |||
@@ -2416,6 +2416,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) | |||
2416 | 2416 | ||
2417 | imask = sky2_read32(hw, B0_IMSK); | 2417 | imask = sky2_read32(hw, B0_IMSK); |
2418 | sky2_write32(hw, B0_IMSK, 0); | 2418 | sky2_write32(hw, B0_IMSK, 0); |
2419 | sky2_read32(hw, B0_IMSK); | ||
2419 | 2420 | ||
2420 | dev->trans_start = jiffies; /* prevent tx timeout */ | 2421 | dev->trans_start = jiffies; /* prevent tx timeout */ |
2421 | napi_disable(&hw->napi); | 2422 | napi_disable(&hw->napi); |
@@ -3484,8 +3485,8 @@ static void sky2_all_down(struct sky2_hw *hw) | |||
3484 | int i; | 3485 | int i; |
3485 | 3486 | ||
3486 | if (hw->flags & SKY2_HW_IRQ_SETUP) { | 3487 | if (hw->flags & SKY2_HW_IRQ_SETUP) { |
3487 | sky2_read32(hw, B0_IMSK); | ||
3488 | sky2_write32(hw, B0_IMSK, 0); | 3488 | sky2_write32(hw, B0_IMSK, 0); |
3489 | sky2_read32(hw, B0_IMSK); | ||
3489 | 3490 | ||
3490 | synchronize_irq(hw->pdev->irq); | 3491 | synchronize_irq(hw->pdev->irq); |
3491 | napi_disable(&hw->napi); | 3492 | napi_disable(&hw->napi); |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index ad2e285aefd4..c29ba80ae02b 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -918,21 +918,13 @@ static int sh_eth_reset(struct net_device *ndev) | |||
918 | return ret; | 918 | return ret; |
919 | } | 919 | } |
920 | 920 | ||
921 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | ||
922 | static void sh_eth_set_receive_align(struct sk_buff *skb) | 921 | static void sh_eth_set_receive_align(struct sk_buff *skb) |
923 | { | 922 | { |
924 | int reserve; | 923 | uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1); |
925 | 924 | ||
926 | reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1)); | ||
927 | if (reserve) | 925 | if (reserve) |
928 | skb_reserve(skb, reserve); | 926 | skb_reserve(skb, SH_ETH_RX_ALIGN - reserve); |
929 | } | 927 | } |
930 | #else | ||
931 | static void sh_eth_set_receive_align(struct sk_buff *skb) | ||
932 | { | ||
933 | skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN); | ||
934 | } | ||
935 | #endif | ||
936 | 928 | ||
937 | 929 | ||
938 | /* CPU <-> EDMAC endian convert */ | 930 | /* CPU <-> EDMAC endian convert */ |
@@ -1120,6 +1112,7 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
1120 | struct sh_eth_txdesc *txdesc = NULL; | 1112 | struct sh_eth_txdesc *txdesc = NULL; |
1121 | int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; | 1113 | int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; |
1122 | int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; | 1114 | int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; |
1115 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; | ||
1123 | 1116 | ||
1124 | mdp->cur_rx = 0; | 1117 | mdp->cur_rx = 0; |
1125 | mdp->cur_tx = 0; | 1118 | mdp->cur_tx = 0; |
@@ -1132,21 +1125,21 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
1132 | for (i = 0; i < mdp->num_rx_ring; i++) { | 1125 | for (i = 0; i < mdp->num_rx_ring; i++) { |
1133 | /* skb */ | 1126 | /* skb */ |
1134 | mdp->rx_skbuff[i] = NULL; | 1127 | mdp->rx_skbuff[i] = NULL; |
1135 | skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); | 1128 | skb = netdev_alloc_skb(ndev, skbuff_size); |
1136 | mdp->rx_skbuff[i] = skb; | 1129 | mdp->rx_skbuff[i] = skb; |
1137 | if (skb == NULL) | 1130 | if (skb == NULL) |
1138 | break; | 1131 | break; |
1139 | dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, | ||
1140 | DMA_FROM_DEVICE); | ||
1141 | sh_eth_set_receive_align(skb); | 1132 | sh_eth_set_receive_align(skb); |
1142 | 1133 | ||
1143 | /* RX descriptor */ | 1134 | /* RX descriptor */ |
1144 | rxdesc = &mdp->rx_ring[i]; | 1135 | rxdesc = &mdp->rx_ring[i]; |
1136 | /* The size of the buffer is a multiple of 16 bytes. */ | ||
1137 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); | ||
1138 | dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, | ||
1139 | DMA_FROM_DEVICE); | ||
1145 | rxdesc->addr = virt_to_phys(skb->data); | 1140 | rxdesc->addr = virt_to_phys(skb->data); |
1146 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); | 1141 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); |
1147 | 1142 | ||
1148 | /* The size of the buffer is 16 byte boundary. */ | ||
1149 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); | ||
1150 | /* Rx descriptor address set */ | 1143 | /* Rx descriptor address set */ |
1151 | if (i == 0) { | 1144 | if (i == 0) { |
1152 | sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); | 1145 | sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); |
@@ -1399,6 +1392,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1399 | struct sk_buff *skb; | 1392 | struct sk_buff *skb; |
1400 | u16 pkt_len = 0; | 1393 | u16 pkt_len = 0; |
1401 | u32 desc_status; | 1394 | u32 desc_status; |
1395 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; | ||
1402 | 1396 | ||
1403 | boguscnt = min(boguscnt, *quota); | 1397 | boguscnt = min(boguscnt, *quota); |
1404 | limit = boguscnt; | 1398 | limit = boguscnt; |
@@ -1447,7 +1441,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1447 | if (mdp->cd->rpadir) | 1441 | if (mdp->cd->rpadir) |
1448 | skb_reserve(skb, NET_IP_ALIGN); | 1442 | skb_reserve(skb, NET_IP_ALIGN); |
1449 | dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, | 1443 | dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, |
1450 | mdp->rx_buf_sz, | 1444 | ALIGN(mdp->rx_buf_sz, 16), |
1451 | DMA_FROM_DEVICE); | 1445 | DMA_FROM_DEVICE); |
1452 | skb_put(skb, pkt_len); | 1446 | skb_put(skb, pkt_len); |
1453 | skb->protocol = eth_type_trans(skb, ndev); | 1447 | skb->protocol = eth_type_trans(skb, ndev); |
@@ -1467,13 +1461,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1467 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); | 1461 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); |
1468 | 1462 | ||
1469 | if (mdp->rx_skbuff[entry] == NULL) { | 1463 | if (mdp->rx_skbuff[entry] == NULL) { |
1470 | skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); | 1464 | skb = netdev_alloc_skb(ndev, skbuff_size); |
1471 | mdp->rx_skbuff[entry] = skb; | 1465 | mdp->rx_skbuff[entry] = skb; |
1472 | if (skb == NULL) | 1466 | if (skb == NULL) |
1473 | break; /* Better luck next round. */ | 1467 | break; /* Better luck next round. */ |
1474 | dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, | ||
1475 | DMA_FROM_DEVICE); | ||
1476 | sh_eth_set_receive_align(skb); | 1468 | sh_eth_set_receive_align(skb); |
1469 | dma_map_single(&ndev->dev, skb->data, | ||
1470 | rxdesc->buffer_length, DMA_FROM_DEVICE); | ||
1477 | 1471 | ||
1478 | skb_checksum_none_assert(skb); | 1472 | skb_checksum_none_assert(skb); |
1479 | rxdesc->addr = virt_to_phys(skb->data); | 1473 | rxdesc->addr = virt_to_phys(skb->data); |
@@ -2043,6 +2037,8 @@ static int sh_eth_open(struct net_device *ndev) | |||
2043 | if (ret) | 2037 | if (ret) |
2044 | goto out_free_irq; | 2038 | goto out_free_irq; |
2045 | 2039 | ||
2040 | mdp->is_opened = 1; | ||
2041 | |||
2046 | return ret; | 2042 | return ret; |
2047 | 2043 | ||
2048 | out_free_irq: | 2044 | out_free_irq: |
@@ -2132,6 +2128,36 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
2132 | return NETDEV_TX_OK; | 2128 | return NETDEV_TX_OK; |
2133 | } | 2129 | } |
2134 | 2130 | ||
2131 | static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) | ||
2132 | { | ||
2133 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
2134 | |||
2135 | if (sh_eth_is_rz_fast_ether(mdp)) | ||
2136 | return &ndev->stats; | ||
2137 | |||
2138 | if (!mdp->is_opened) | ||
2139 | return &ndev->stats; | ||
2140 | |||
2141 | ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR); | ||
2142 | sh_eth_write(ndev, 0, TROCR); /* (write clear) */ | ||
2143 | ndev->stats.collisions += sh_eth_read(ndev, CDCR); | ||
2144 | sh_eth_write(ndev, 0, CDCR); /* (write clear) */ | ||
2145 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); | ||
2146 | sh_eth_write(ndev, 0, LCCR); /* (write clear) */ | ||
2147 | |||
2148 | if (sh_eth_is_gether(mdp)) { | ||
2149 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); | ||
2150 | sh_eth_write(ndev, 0, CERCR); /* (write clear) */ | ||
2151 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); | ||
2152 | sh_eth_write(ndev, 0, CEECR); /* (write clear) */ | ||
2153 | } else { | ||
2154 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); | ||
2155 | sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ | ||
2156 | } | ||
2157 | |||
2158 | return &ndev->stats; | ||
2159 | } | ||
2160 | |||
2135 | /* device close function */ | 2161 | /* device close function */ |
2136 | static int sh_eth_close(struct net_device *ndev) | 2162 | static int sh_eth_close(struct net_device *ndev) |
2137 | { | 2163 | { |
@@ -2146,6 +2172,7 @@ static int sh_eth_close(struct net_device *ndev) | |||
2146 | sh_eth_write(ndev, 0, EDTRR); | 2172 | sh_eth_write(ndev, 0, EDTRR); |
2147 | sh_eth_write(ndev, 0, EDRRR); | 2173 | sh_eth_write(ndev, 0, EDRRR); |
2148 | 2174 | ||
2175 | sh_eth_get_stats(ndev); | ||
2149 | /* PHY Disconnect */ | 2176 | /* PHY Disconnect */ |
2150 | if (mdp->phydev) { | 2177 | if (mdp->phydev) { |
2151 | phy_stop(mdp->phydev); | 2178 | phy_stop(mdp->phydev); |
@@ -2164,36 +2191,9 @@ static int sh_eth_close(struct net_device *ndev) | |||
2164 | 2191 | ||
2165 | pm_runtime_put_sync(&mdp->pdev->dev); | 2192 | pm_runtime_put_sync(&mdp->pdev->dev); |
2166 | 2193 | ||
2167 | return 0; | 2194 | mdp->is_opened = 0; |
2168 | } | ||
2169 | |||
2170 | static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) | ||
2171 | { | ||
2172 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
2173 | |||
2174 | if (sh_eth_is_rz_fast_ether(mdp)) | ||
2175 | return &ndev->stats; | ||
2176 | 2195 | ||
2177 | pm_runtime_get_sync(&mdp->pdev->dev); | 2196 | return 0; |
2178 | |||
2179 | ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR); | ||
2180 | sh_eth_write(ndev, 0, TROCR); /* (write clear) */ | ||
2181 | ndev->stats.collisions += sh_eth_read(ndev, CDCR); | ||
2182 | sh_eth_write(ndev, 0, CDCR); /* (write clear) */ | ||
2183 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); | ||
2184 | sh_eth_write(ndev, 0, LCCR); /* (write clear) */ | ||
2185 | if (sh_eth_is_gether(mdp)) { | ||
2186 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); | ||
2187 | sh_eth_write(ndev, 0, CERCR); /* (write clear) */ | ||
2188 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); | ||
2189 | sh_eth_write(ndev, 0, CEECR); /* (write clear) */ | ||
2190 | } else { | ||
2191 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); | ||
2192 | sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ | ||
2193 | } | ||
2194 | pm_runtime_put_sync(&mdp->pdev->dev); | ||
2195 | |||
2196 | return &ndev->stats; | ||
2197 | } | 2197 | } |
2198 | 2198 | ||
2199 | /* ioctl to device function */ | 2199 | /* ioctl to device function */ |
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index b37c427144ee..22301bf9c21d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
@@ -162,9 +162,9 @@ enum { | |||
162 | 162 | ||
163 | /* Driver's parameters */ | 163 | /* Driver's parameters */ |
164 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | 164 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) |
165 | #define SH4_SKB_RX_ALIGN 32 | 165 | #define SH_ETH_RX_ALIGN 32 |
166 | #else | 166 | #else |
167 | #define SH2_SH3_SKB_RX_ALIGN 2 | 167 | #define SH_ETH_RX_ALIGN 2 |
168 | #endif | 168 | #endif |
169 | 169 | ||
170 | /* Register's bits | 170 | /* Register's bits |
@@ -522,6 +522,7 @@ struct sh_eth_private { | |||
522 | 522 | ||
523 | unsigned no_ether_link:1; | 523 | unsigned no_ether_link:1; |
524 | unsigned ether_link_active_low:1; | 524 | unsigned ether_link_active_low:1; |
525 | unsigned is_opened:1; | ||
525 | }; | 526 | }; |
526 | 527 | ||
527 | static inline void sh_eth_soft_swap(char *src, int len) | 528 | static inline void sh_eth_soft_swap(char *src, int len) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 3a08a1f78c73..771cda2a48b2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | |||
@@ -696,7 +696,7 @@ static int stmmac_set_coalesce(struct net_device *dev, | |||
696 | (ec->tx_max_coalesced_frames == 0)) | 696 | (ec->tx_max_coalesced_frames == 0)) |
697 | return -EINVAL; | 697 | return -EINVAL; |
698 | 698 | ||
699 | if ((ec->tx_coalesce_usecs > STMMAC_COAL_TX_TIMER) || | 699 | if ((ec->tx_coalesce_usecs > STMMAC_MAX_COAL_TX_TICK) || |
700 | (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES)) | 700 | (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES)) |
701 | return -EINVAL; | 701 | return -EINVAL; |
702 | 702 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 07054ce84ba8..4032b170fe24 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -276,6 +276,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) | |||
276 | 276 | ||
277 | plat_dat = dev_get_platdata(&pdev->dev); | 277 | plat_dat = dev_get_platdata(&pdev->dev); |
278 | 278 | ||
279 | if (!plat_dat) | ||
280 | plat_dat = devm_kzalloc(&pdev->dev, | ||
281 | sizeof(struct plat_stmmacenet_data), | ||
282 | GFP_KERNEL); | ||
283 | if (!plat_dat) { | ||
284 | pr_err("%s: ERROR: no memory", __func__); | ||
285 | return -ENOMEM; | ||
286 | } | ||
287 | |||
279 | /* Set default value for multicast hash bins */ | 288 | /* Set default value for multicast hash bins */ |
280 | plat_dat->multicast_filter_bins = HASH_TABLE_SIZE; | 289 | plat_dat->multicast_filter_bins = HASH_TABLE_SIZE; |
281 | 290 | ||
@@ -283,15 +292,6 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) | |||
283 | plat_dat->unicast_filter_entries = 1; | 292 | plat_dat->unicast_filter_entries = 1; |
284 | 293 | ||
285 | if (pdev->dev.of_node) { | 294 | if (pdev->dev.of_node) { |
286 | if (!plat_dat) | ||
287 | plat_dat = devm_kzalloc(&pdev->dev, | ||
288 | sizeof(struct plat_stmmacenet_data), | ||
289 | GFP_KERNEL); | ||
290 | if (!plat_dat) { | ||
291 | pr_err("%s: ERROR: no memory", __func__); | ||
292 | return -ENOMEM; | ||
293 | } | ||
294 | |||
295 | ret = stmmac_probe_config_dt(pdev, plat_dat, &mac); | 295 | ret = stmmac_probe_config_dt(pdev, plat_dat, &mac); |
296 | if (ret) { | 296 | if (ret) { |
297 | pr_err("%s: main dt probe failed", __func__); | 297 | pr_err("%s: main dt probe failed", __func__); |
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index fab0d4b42f58..d44cd19169bd 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c | |||
@@ -404,6 +404,7 @@ static int backend_create_xenvif(struct backend_info *be) | |||
404 | int err; | 404 | int err; |
405 | long handle; | 405 | long handle; |
406 | struct xenbus_device *dev = be->dev; | 406 | struct xenbus_device *dev = be->dev; |
407 | struct xenvif *vif; | ||
407 | 408 | ||
408 | if (be->vif != NULL) | 409 | if (be->vif != NULL) |
409 | return 0; | 410 | return 0; |
@@ -414,13 +415,13 @@ static int backend_create_xenvif(struct backend_info *be) | |||
414 | return (err < 0) ? err : -EINVAL; | 415 | return (err < 0) ? err : -EINVAL; |
415 | } | 416 | } |
416 | 417 | ||
417 | be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle); | 418 | vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle); |
418 | if (IS_ERR(be->vif)) { | 419 | if (IS_ERR(vif)) { |
419 | err = PTR_ERR(be->vif); | 420 | err = PTR_ERR(vif); |
420 | be->vif = NULL; | ||
421 | xenbus_dev_fatal(dev, err, "creating interface"); | 421 | xenbus_dev_fatal(dev, err, "creating interface"); |
422 | return err; | 422 | return err; |
423 | } | 423 | } |
424 | be->vif = vif; | ||
424 | 425 | ||
425 | kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); | 426 | kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); |
426 | return 0; | 427 | return 0; |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 88a70f5ed594..2f0a9ce9ff73 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -473,9 +473,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue, | |||
473 | len = skb_frag_size(frag); | 473 | len = skb_frag_size(frag); |
474 | offset = frag->page_offset; | 474 | offset = frag->page_offset; |
475 | 475 | ||
476 | /* Data must not cross a page boundary. */ | ||
477 | BUG_ON(len + offset > PAGE_SIZE<<compound_order(page)); | ||
478 | |||
479 | /* Skip unused frames from start of page */ | 476 | /* Skip unused frames from start of page */ |
480 | page += offset >> PAGE_SHIFT; | 477 | page += offset >> PAGE_SHIFT; |
481 | offset &= ~PAGE_MASK; | 478 | offset &= ~PAGE_MASK; |
@@ -483,8 +480,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue, | |||
483 | while (len > 0) { | 480 | while (len > 0) { |
484 | unsigned long bytes; | 481 | unsigned long bytes; |
485 | 482 | ||
486 | BUG_ON(offset >= PAGE_SIZE); | ||
487 | |||
488 | bytes = PAGE_SIZE - offset; | 483 | bytes = PAGE_SIZE - offset; |
489 | if (bytes > len) | 484 | if (bytes > len) |
490 | bytes = len; | 485 | bytes = len; |
@@ -609,6 +604,9 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
609 | slots, skb->len); | 604 | slots, skb->len); |
610 | if (skb_linearize(skb)) | 605 | if (skb_linearize(skb)) |
611 | goto drop; | 606 | goto drop; |
607 | data = skb->data; | ||
608 | offset = offset_in_page(data); | ||
609 | len = skb_headlen(skb); | ||
612 | } | 610 | } |
613 | 611 | ||
614 | spin_lock_irqsave(&queue->tx_lock, flags); | 612 | spin_lock_irqsave(&queue->tx_lock, flags); |
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 30e97bcc4f88..d134710de96d 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
@@ -964,8 +964,6 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) | |||
964 | int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base, | 964 | int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base, |
965 | phys_addr_t size, bool nomap) | 965 | phys_addr_t size, bool nomap) |
966 | { | 966 | { |
967 | if (memblock_is_region_reserved(base, size)) | ||
968 | return -EBUSY; | ||
969 | if (nomap) | 967 | if (nomap) |
970 | return memblock_remove(base, size); | 968 | return memblock_remove(base, size); |
971 | return memblock_reserve(base, size); | 969 | return memblock_reserve(base, size); |
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 3d43874319be..19bb19c7db4a 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c | |||
@@ -276,6 +276,7 @@ struct tegra_pcie { | |||
276 | 276 | ||
277 | struct resource all; | 277 | struct resource all; |
278 | struct resource io; | 278 | struct resource io; |
279 | struct resource pio; | ||
279 | struct resource mem; | 280 | struct resource mem; |
280 | struct resource prefetch; | 281 | struct resource prefetch; |
281 | struct resource busn; | 282 | struct resource busn; |
@@ -658,7 +659,6 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) | |||
658 | { | 659 | { |
659 | struct tegra_pcie *pcie = sys_to_pcie(sys); | 660 | struct tegra_pcie *pcie = sys_to_pcie(sys); |
660 | int err; | 661 | int err; |
661 | phys_addr_t io_start; | ||
662 | 662 | ||
663 | err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem); | 663 | err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem); |
664 | if (err < 0) | 664 | if (err < 0) |
@@ -668,14 +668,12 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) | |||
668 | if (err) | 668 | if (err) |
669 | return err; | 669 | return err; |
670 | 670 | ||
671 | io_start = pci_pio_to_address(pcie->io.start); | ||
672 | |||
673 | pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); | 671 | pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); |
674 | pci_add_resource_offset(&sys->resources, &pcie->prefetch, | 672 | pci_add_resource_offset(&sys->resources, &pcie->prefetch, |
675 | sys->mem_offset); | 673 | sys->mem_offset); |
676 | pci_add_resource(&sys->resources, &pcie->busn); | 674 | pci_add_resource(&sys->resources, &pcie->busn); |
677 | 675 | ||
678 | pci_ioremap_io(nr * SZ_64K, io_start); | 676 | pci_ioremap_io(pcie->pio.start, pcie->io.start); |
679 | 677 | ||
680 | return 1; | 678 | return 1; |
681 | } | 679 | } |
@@ -786,7 +784,6 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg) | |||
786 | static void tegra_pcie_setup_translations(struct tegra_pcie *pcie) | 784 | static void tegra_pcie_setup_translations(struct tegra_pcie *pcie) |
787 | { | 785 | { |
788 | u32 fpci_bar, size, axi_address; | 786 | u32 fpci_bar, size, axi_address; |
789 | phys_addr_t io_start = pci_pio_to_address(pcie->io.start); | ||
790 | 787 | ||
791 | /* Bar 0: type 1 extended configuration space */ | 788 | /* Bar 0: type 1 extended configuration space */ |
792 | fpci_bar = 0xfe100000; | 789 | fpci_bar = 0xfe100000; |
@@ -799,7 +796,7 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie) | |||
799 | /* Bar 1: downstream IO bar */ | 796 | /* Bar 1: downstream IO bar */ |
800 | fpci_bar = 0xfdfc0000; | 797 | fpci_bar = 0xfdfc0000; |
801 | size = resource_size(&pcie->io); | 798 | size = resource_size(&pcie->io); |
802 | axi_address = io_start; | 799 | axi_address = pcie->io.start; |
803 | afi_writel(pcie, axi_address, AFI_AXI_BAR1_START); | 800 | afi_writel(pcie, axi_address, AFI_AXI_BAR1_START); |
804 | afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ); | 801 | afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ); |
805 | afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1); | 802 | afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1); |
@@ -1690,8 +1687,23 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) | |||
1690 | 1687 | ||
1691 | switch (res.flags & IORESOURCE_TYPE_BITS) { | 1688 | switch (res.flags & IORESOURCE_TYPE_BITS) { |
1692 | case IORESOURCE_IO: | 1689 | case IORESOURCE_IO: |
1693 | memcpy(&pcie->io, &res, sizeof(res)); | 1690 | memcpy(&pcie->pio, &res, sizeof(res)); |
1694 | pcie->io.name = np->full_name; | 1691 | pcie->pio.name = np->full_name; |
1692 | |||
1693 | /* | ||
1694 | * The Tegra PCIe host bridge uses this to program the | ||
1695 | * mapping of the I/O space to the physical address, | ||
1696 | * so we override the .start and .end fields here that | ||
1697 | * of_pci_range_to_resource() converted to I/O space. | ||
1698 | * We also set the IORESOURCE_MEM type to clarify that | ||
1699 | * the resource is in the physical memory space. | ||
1700 | */ | ||
1701 | pcie->io.start = range.cpu_addr; | ||
1702 | pcie->io.end = range.cpu_addr + range.size - 1; | ||
1703 | pcie->io.flags = IORESOURCE_MEM; | ||
1704 | pcie->io.name = "I/O"; | ||
1705 | |||
1706 | memcpy(&res, &pcie->io, sizeof(res)); | ||
1695 | break; | 1707 | break; |
1696 | 1708 | ||
1697 | case IORESOURCE_MEM: | 1709 | case IORESOURCE_MEM: |
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index d3220d31d3cb..dcd9be32ac57 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
@@ -1011,8 +1011,6 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, | |||
1011 | bytes = min(bytes, working_bytes); | 1011 | bytes = min(bytes, working_bytes); |
1012 | kaddr = kmap_atomic(page_out); | 1012 | kaddr = kmap_atomic(page_out); |
1013 | memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); | 1013 | memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); |
1014 | if (*pg_index == (vcnt - 1) && *pg_offset == 0) | ||
1015 | memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes); | ||
1016 | kunmap_atomic(kaddr); | 1014 | kunmap_atomic(kaddr); |
1017 | flush_dcache_page(page_out); | 1015 | flush_dcache_page(page_out); |
1018 | 1016 | ||
@@ -1054,3 +1052,34 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, | |||
1054 | 1052 | ||
1055 | return 1; | 1053 | return 1; |
1056 | } | 1054 | } |
1055 | |||
1056 | /* | ||
1057 | * When uncompressing data, we need to make sure and zero any parts of | ||
1058 | * the biovec that were not filled in by the decompression code. pg_index | ||
1059 | * and pg_offset indicate the last page and the last offset of that page | ||
1060 | * that have been filled in. This will zero everything remaining in the | ||
1061 | * biovec. | ||
1062 | */ | ||
1063 | void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt, | ||
1064 | unsigned long pg_index, | ||
1065 | unsigned long pg_offset) | ||
1066 | { | ||
1067 | while (pg_index < vcnt) { | ||
1068 | struct page *page = bvec[pg_index].bv_page; | ||
1069 | unsigned long off = bvec[pg_index].bv_offset; | ||
1070 | unsigned long len = bvec[pg_index].bv_len; | ||
1071 | |||
1072 | if (pg_offset < off) | ||
1073 | pg_offset = off; | ||
1074 | if (pg_offset < off + len) { | ||
1075 | unsigned long bytes = off + len - pg_offset; | ||
1076 | char *kaddr; | ||
1077 | |||
1078 | kaddr = kmap_atomic(page); | ||
1079 | memset(kaddr + pg_offset, 0, bytes); | ||
1080 | kunmap_atomic(kaddr); | ||
1081 | } | ||
1082 | pg_index++; | ||
1083 | pg_offset = 0; | ||
1084 | } | ||
1085 | } | ||
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 0c803b4fbf93..d181f70caae0 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h | |||
@@ -45,7 +45,9 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
45 | unsigned long nr_pages); | 45 | unsigned long nr_pages); |
46 | int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | 46 | int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, |
47 | int mirror_num, unsigned long bio_flags); | 47 | int mirror_num, unsigned long bio_flags); |
48 | 48 | void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt, | |
49 | unsigned long pg_index, | ||
50 | unsigned long pg_offset); | ||
49 | struct btrfs_compress_op { | 51 | struct btrfs_compress_op { |
50 | struct list_head *(*alloc_workspace)(void); | 52 | struct list_head *(*alloc_workspace)(void); |
51 | 53 | ||
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 78285f30909e..617553cdb7d3 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c | |||
@@ -373,6 +373,8 @@ cont: | |||
373 | } | 373 | } |
374 | done: | 374 | done: |
375 | kunmap(pages_in[page_in_index]); | 375 | kunmap(pages_in[page_in_index]); |
376 | if (!ret) | ||
377 | btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset); | ||
376 | return ret; | 378 | return ret; |
377 | } | 379 | } |
378 | 380 | ||
@@ -410,10 +412,23 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in, | |||
410 | goto out; | 412 | goto out; |
411 | } | 413 | } |
412 | 414 | ||
415 | /* | ||
416 | * the caller is already checking against PAGE_SIZE, but lets | ||
417 | * move this check closer to the memcpy/memset | ||
418 | */ | ||
419 | destlen = min_t(unsigned long, destlen, PAGE_SIZE); | ||
413 | bytes = min_t(unsigned long, destlen, out_len - start_byte); | 420 | bytes = min_t(unsigned long, destlen, out_len - start_byte); |
414 | 421 | ||
415 | kaddr = kmap_atomic(dest_page); | 422 | kaddr = kmap_atomic(dest_page); |
416 | memcpy(kaddr, workspace->buf + start_byte, bytes); | 423 | memcpy(kaddr, workspace->buf + start_byte, bytes); |
424 | |||
425 | /* | ||
426 | * btrfs_getblock is doing a zero on the tail of the page too, | ||
427 | * but this will cover anything missing from the decompressed | ||
428 | * data. | ||
429 | */ | ||
430 | if (bytes < destlen) | ||
431 | memset(kaddr+bytes, 0, destlen-bytes); | ||
417 | kunmap_atomic(kaddr); | 432 | kunmap_atomic(kaddr); |
418 | out: | 433 | out: |
419 | return ret; | 434 | return ret; |
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 759fa4e2de8f..fb22fd8d8fb8 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c | |||
@@ -299,6 +299,8 @@ done: | |||
299 | zlib_inflateEnd(&workspace->strm); | 299 | zlib_inflateEnd(&workspace->strm); |
300 | if (data_in) | 300 | if (data_in) |
301 | kunmap(pages_in[page_in_index]); | 301 | kunmap(pages_in[page_in_index]); |
302 | if (!ret) | ||
303 | btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset); | ||
302 | return ret; | 304 | return ret; |
303 | } | 305 | } |
304 | 306 | ||
@@ -310,10 +312,14 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in, | |||
310 | struct workspace *workspace = list_entry(ws, struct workspace, list); | 312 | struct workspace *workspace = list_entry(ws, struct workspace, list); |
311 | int ret = 0; | 313 | int ret = 0; |
312 | int wbits = MAX_WBITS; | 314 | int wbits = MAX_WBITS; |
313 | unsigned long bytes_left = destlen; | 315 | unsigned long bytes_left; |
314 | unsigned long total_out = 0; | 316 | unsigned long total_out = 0; |
317 | unsigned long pg_offset = 0; | ||
315 | char *kaddr; | 318 | char *kaddr; |
316 | 319 | ||
320 | destlen = min_t(unsigned long, destlen, PAGE_SIZE); | ||
321 | bytes_left = destlen; | ||
322 | |||
317 | workspace->strm.next_in = data_in; | 323 | workspace->strm.next_in = data_in; |
318 | workspace->strm.avail_in = srclen; | 324 | workspace->strm.avail_in = srclen; |
319 | workspace->strm.total_in = 0; | 325 | workspace->strm.total_in = 0; |
@@ -341,7 +347,6 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in, | |||
341 | unsigned long buf_start; | 347 | unsigned long buf_start; |
342 | unsigned long buf_offset; | 348 | unsigned long buf_offset; |
343 | unsigned long bytes; | 349 | unsigned long bytes; |
344 | unsigned long pg_offset = 0; | ||
345 | 350 | ||
346 | ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH); | 351 | ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH); |
347 | if (ret != Z_OK && ret != Z_STREAM_END) | 352 | if (ret != Z_OK && ret != Z_STREAM_END) |
@@ -384,6 +389,17 @@ next: | |||
384 | ret = 0; | 389 | ret = 0; |
385 | 390 | ||
386 | zlib_inflateEnd(&workspace->strm); | 391 | zlib_inflateEnd(&workspace->strm); |
392 | |||
393 | /* | ||
394 | * this should only happen if zlib returned fewer bytes than we | ||
395 | * expected. btrfs_get_block is responsible for zeroing from the | ||
396 | * end of the inline extent (destlen) to the end of the page | ||
397 | */ | ||
398 | if (pg_offset < destlen) { | ||
399 | kaddr = kmap_atomic(dest_page); | ||
400 | memset(kaddr + pg_offset, 0, destlen - pg_offset); | ||
401 | kunmap_atomic(kaddr); | ||
402 | } | ||
387 | return ret; | 403 | return ret; |
388 | } | 404 | } |
389 | 405 | ||
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index 6df8d3d885e5..b8b92c2f9683 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c | |||
@@ -736,7 +736,12 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry, | |||
736 | } | 736 | } |
737 | 737 | ||
738 | alias = d_find_alias(inode); | 738 | alias = d_find_alias(inode); |
739 | if (alias && !vfat_d_anon_disconn(alias)) { | 739 | /* |
740 | * Checking "alias->d_parent == dentry->d_parent" to make sure | ||
741 | * FS is not corrupted (especially double linked dir). | ||
742 | */ | ||
743 | if (alias && alias->d_parent == dentry->d_parent && | ||
744 | !vfat_d_anon_disconn(alias)) { | ||
740 | /* | 745 | /* |
741 | * This inode has non anonymous-DCACHE_DISCONNECTED | 746 | * This inode has non anonymous-DCACHE_DISCONNECTED |
742 | * dentry. This means, the user did ->lookup() by an | 747 | * dentry. This means, the user did ->lookup() by an |
@@ -755,12 +760,9 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry, | |||
755 | 760 | ||
756 | out: | 761 | out: |
757 | mutex_unlock(&MSDOS_SB(sb)->s_lock); | 762 | mutex_unlock(&MSDOS_SB(sb)->s_lock); |
758 | dentry->d_time = dentry->d_parent->d_inode->i_version; | 763 | if (!inode) |
759 | dentry = d_splice_alias(inode, dentry); | 764 | dentry->d_time = dir->i_version; |
760 | if (dentry) | 765 | return d_splice_alias(inode, dentry); |
761 | dentry->d_time = dentry->d_parent->d_inode->i_version; | ||
762 | return dentry; | ||
763 | |||
764 | error: | 766 | error: |
765 | mutex_unlock(&MSDOS_SB(sb)->s_lock); | 767 | mutex_unlock(&MSDOS_SB(sb)->s_lock); |
766 | return ERR_PTR(err); | 768 | return ERR_PTR(err); |
@@ -793,7 +795,6 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode, | |||
793 | inode->i_mtime = inode->i_atime = inode->i_ctime = ts; | 795 | inode->i_mtime = inode->i_atime = inode->i_ctime = ts; |
794 | /* timestamp is already written, so mark_inode_dirty() is unneeded. */ | 796 | /* timestamp is already written, so mark_inode_dirty() is unneeded. */ |
795 | 797 | ||
796 | dentry->d_time = dentry->d_parent->d_inode->i_version; | ||
797 | d_instantiate(dentry, inode); | 798 | d_instantiate(dentry, inode); |
798 | out: | 799 | out: |
799 | mutex_unlock(&MSDOS_SB(sb)->s_lock); | 800 | mutex_unlock(&MSDOS_SB(sb)->s_lock); |
@@ -824,6 +825,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry) | |||
824 | clear_nlink(inode); | 825 | clear_nlink(inode); |
825 | inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC; | 826 | inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC; |
826 | fat_detach(inode); | 827 | fat_detach(inode); |
828 | dentry->d_time = dir->i_version; | ||
827 | out: | 829 | out: |
828 | mutex_unlock(&MSDOS_SB(sb)->s_lock); | 830 | mutex_unlock(&MSDOS_SB(sb)->s_lock); |
829 | 831 | ||
@@ -849,6 +851,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry) | |||
849 | clear_nlink(inode); | 851 | clear_nlink(inode); |
850 | inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC; | 852 | inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC; |
851 | fat_detach(inode); | 853 | fat_detach(inode); |
854 | dentry->d_time = dir->i_version; | ||
852 | out: | 855 | out: |
853 | mutex_unlock(&MSDOS_SB(sb)->s_lock); | 856 | mutex_unlock(&MSDOS_SB(sb)->s_lock); |
854 | 857 | ||
@@ -889,7 +892,6 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
889 | inode->i_mtime = inode->i_atime = inode->i_ctime = ts; | 892 | inode->i_mtime = inode->i_atime = inode->i_ctime = ts; |
890 | /* timestamp is already written, so mark_inode_dirty() is unneeded. */ | 893 | /* timestamp is already written, so mark_inode_dirty() is unneeded. */ |
891 | 894 | ||
892 | dentry->d_time = dentry->d_parent->d_inode->i_version; | ||
893 | d_instantiate(dentry, inode); | 895 | d_instantiate(dentry, inode); |
894 | 896 | ||
895 | mutex_unlock(&MSDOS_SB(sb)->s_lock); | 897 | mutex_unlock(&MSDOS_SB(sb)->s_lock); |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index e4dc74713a43..1df94fabe4eb 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
@@ -1853,13 +1853,12 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat, | |||
1853 | journal->j_chksum_driver = NULL; | 1853 | journal->j_chksum_driver = NULL; |
1854 | return 0; | 1854 | return 0; |
1855 | } | 1855 | } |
1856 | } | ||
1857 | 1856 | ||
1858 | /* Precompute checksum seed for all metadata */ | 1857 | /* Precompute checksum seed for all metadata */ |
1859 | if (jbd2_journal_has_csum_v2or3(journal)) | ||
1860 | journal->j_csum_seed = jbd2_chksum(journal, ~0, | 1858 | journal->j_csum_seed = jbd2_chksum(journal, ~0, |
1861 | sb->s_uuid, | 1859 | sb->s_uuid, |
1862 | sizeof(sb->s_uuid)); | 1860 | sizeof(sb->s_uuid)); |
1861 | } | ||
1863 | } | 1862 | } |
1864 | 1863 | ||
1865 | /* If enabling v1 checksums, downgrade superblock */ | 1864 | /* If enabling v1 checksums, downgrade superblock */ |
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index dc9d2a27c315..09a819ee2151 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h | |||
@@ -201,8 +201,8 @@ void fib_free_table(struct fib_table *tb); | |||
201 | 201 | ||
202 | #ifndef CONFIG_IP_MULTIPLE_TABLES | 202 | #ifndef CONFIG_IP_MULTIPLE_TABLES |
203 | 203 | ||
204 | #define TABLE_LOCAL_INDEX 0 | 204 | #define TABLE_LOCAL_INDEX (RT_TABLE_LOCAL & (FIB_TABLE_HASHSZ - 1)) |
205 | #define TABLE_MAIN_INDEX 1 | 205 | #define TABLE_MAIN_INDEX (RT_TABLE_MAIN & (FIB_TABLE_HASHSZ - 1)) |
206 | 206 | ||
207 | static inline struct fib_table *fib_get_table(struct net *net, u32 id) | 207 | static inline struct fib_table *fib_get_table(struct net *net, u32 id) |
208 | { | 208 | { |
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index a1e8175cc488..5b6a36cb77fb 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild | |||
@@ -429,7 +429,7 @@ header-y += virtio_net.h | |||
429 | header-y += virtio_pci.h | 429 | header-y += virtio_pci.h |
430 | header-y += virtio_ring.h | 430 | header-y += virtio_ring.h |
431 | header-y += virtio_rng.h | 431 | header-y += virtio_rng.h |
432 | header=y += vm_sockets.h | 432 | header-y += vm_sockets.h |
433 | header-y += vt.h | 433 | header-y += vt.h |
434 | header-y += wait.h | 434 | header-y += wait.h |
435 | header-y += wanrouter.h | 435 | header-y += wanrouter.h |
@@ -507,13 +507,6 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) | |||
507 | return retval; | 507 | return retval; |
508 | } | 508 | } |
509 | 509 | ||
510 | id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); | ||
511 | if (id < 0) { | ||
512 | ipc_rcu_putref(sma, sem_rcu_free); | ||
513 | return id; | ||
514 | } | ||
515 | ns->used_sems += nsems; | ||
516 | |||
517 | sma->sem_base = (struct sem *) &sma[1]; | 510 | sma->sem_base = (struct sem *) &sma[1]; |
518 | 511 | ||
519 | for (i = 0; i < nsems; i++) { | 512 | for (i = 0; i < nsems; i++) { |
@@ -528,6 +521,14 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) | |||
528 | INIT_LIST_HEAD(&sma->list_id); | 521 | INIT_LIST_HEAD(&sma->list_id); |
529 | sma->sem_nsems = nsems; | 522 | sma->sem_nsems = nsems; |
530 | sma->sem_ctime = get_seconds(); | 523 | sma->sem_ctime = get_seconds(); |
524 | |||
525 | id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); | ||
526 | if (id < 0) { | ||
527 | ipc_rcu_putref(sma, sem_rcu_free); | ||
528 | return id; | ||
529 | } | ||
530 | ns->used_sems += nsems; | ||
531 | |||
531 | sem_unlock(sma, -1); | 532 | sem_unlock(sma, -1); |
532 | rcu_read_unlock(); | 533 | rcu_read_unlock(); |
533 | 534 | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 24beb9bb4c3e..89e7283015a6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2874,10 +2874,14 @@ asmlinkage __visible void __sched schedule_user(void) | |||
2874 | * or we have been woken up remotely but the IPI has not yet arrived, | 2874 | * or we have been woken up remotely but the IPI has not yet arrived, |
2875 | * we haven't yet exited the RCU idle mode. Do it here manually until | 2875 | * we haven't yet exited the RCU idle mode. Do it here manually until |
2876 | * we find a better solution. | 2876 | * we find a better solution. |
2877 | * | ||
2878 | * NB: There are buggy callers of this function. Ideally we | ||
2879 | * should warn if prev_state != IN_USER, but that will trigger | ||
2880 | * too frequently to make sense yet. | ||
2877 | */ | 2881 | */ |
2878 | user_exit(); | 2882 | enum ctx_state prev_state = exception_enter(); |
2879 | schedule(); | 2883 | schedule(); |
2880 | user_enter(); | 2884 | exception_exit(prev_state); |
2881 | } | 2885 | } |
2882 | #endif | 2886 | #endif |
2883 | 2887 | ||
diff --git a/lib/genalloc.c b/lib/genalloc.c index cce4dd68c40d..2e65d206b01c 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
@@ -598,6 +598,7 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, | |||
598 | 598 | ||
599 | return pool; | 599 | return pool; |
600 | } | 600 | } |
601 | EXPORT_SYMBOL(devm_gen_pool_create); | ||
601 | 602 | ||
602 | /** | 603 | /** |
603 | * dev_get_gen_pool - Obtain the gen_pool (if any) for a device | 604 | * dev_get_gen_pool - Obtain the gen_pool (if any) for a device |
diff --git a/lib/show_mem.c b/lib/show_mem.c index 09225796991a..5e256271b47b 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c | |||
@@ -28,7 +28,7 @@ void show_mem(unsigned int filter) | |||
28 | continue; | 28 | continue; |
29 | 29 | ||
30 | total += zone->present_pages; | 30 | total += zone->present_pages; |
31 | reserved = zone->present_pages - zone->managed_pages; | 31 | reserved += zone->present_pages - zone->managed_pages; |
32 | 32 | ||
33 | if (is_highmem_idx(zoneid)) | 33 | if (is_highmem_idx(zoneid)) |
34 | highmem += zone->present_pages; | 34 | highmem += zone->present_pages; |
diff --git a/mm/frontswap.c b/mm/frontswap.c index c30eec536f03..f2a3571c6e22 100644 --- a/mm/frontswap.c +++ b/mm/frontswap.c | |||
@@ -244,8 +244,10 @@ int __frontswap_store(struct page *page) | |||
244 | the (older) page from frontswap | 244 | the (older) page from frontswap |
245 | */ | 245 | */ |
246 | inc_frontswap_failed_stores(); | 246 | inc_frontswap_failed_stores(); |
247 | if (dup) | 247 | if (dup) { |
248 | __frontswap_clear(sis, offset); | 248 | __frontswap_clear(sis, offset); |
249 | frontswap_ops->invalidate_page(type, offset); | ||
250 | } | ||
249 | } | 251 | } |
250 | if (frontswap_writethrough_enabled) | 252 | if (frontswap_writethrough_enabled) |
251 | /* report failure so swap also writes to swap device */ | 253 | /* report failure so swap also writes to swap device */ |
diff --git a/mm/memory.c b/mm/memory.c index 3e503831e042..d5f2ae9c4a23 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -815,20 +815,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
815 | if (!pte_file(pte)) { | 815 | if (!pte_file(pte)) { |
816 | swp_entry_t entry = pte_to_swp_entry(pte); | 816 | swp_entry_t entry = pte_to_swp_entry(pte); |
817 | 817 | ||
818 | if (swap_duplicate(entry) < 0) | 818 | if (likely(!non_swap_entry(entry))) { |
819 | return entry.val; | 819 | if (swap_duplicate(entry) < 0) |
820 | 820 | return entry.val; | |
821 | /* make sure dst_mm is on swapoff's mmlist. */ | 821 | |
822 | if (unlikely(list_empty(&dst_mm->mmlist))) { | 822 | /* make sure dst_mm is on swapoff's mmlist. */ |
823 | spin_lock(&mmlist_lock); | 823 | if (unlikely(list_empty(&dst_mm->mmlist))) { |
824 | if (list_empty(&dst_mm->mmlist)) | 824 | spin_lock(&mmlist_lock); |
825 | list_add(&dst_mm->mmlist, | 825 | if (list_empty(&dst_mm->mmlist)) |
826 | &src_mm->mmlist); | 826 | list_add(&dst_mm->mmlist, |
827 | spin_unlock(&mmlist_lock); | 827 | &src_mm->mmlist); |
828 | } | 828 | spin_unlock(&mmlist_lock); |
829 | if (likely(!non_swap_entry(entry))) | 829 | } |
830 | rss[MM_SWAPENTS]++; | 830 | rss[MM_SWAPENTS]++; |
831 | else if (is_migration_entry(entry)) { | 831 | } else if (is_migration_entry(entry)) { |
832 | page = migration_entry_to_page(entry); | 832 | page = migration_entry_to_page(entry); |
833 | 833 | ||
834 | if (PageAnon(page)) | 834 | if (PageAnon(page)) |
@@ -776,8 +776,11 @@ again: remove_next = 1 + (end > next->vm_end); | |||
776 | * shrinking vma had, to cover any anon pages imported. | 776 | * shrinking vma had, to cover any anon pages imported. |
777 | */ | 777 | */ |
778 | if (exporter && exporter->anon_vma && !importer->anon_vma) { | 778 | if (exporter && exporter->anon_vma && !importer->anon_vma) { |
779 | if (anon_vma_clone(importer, exporter)) | 779 | int error; |
780 | return -ENOMEM; | 780 | |
781 | error = anon_vma_clone(importer, exporter); | ||
782 | if (error) | ||
783 | return error; | ||
781 | importer->anon_vma = exporter->anon_vma; | 784 | importer->anon_vma = exporter->anon_vma; |
782 | } | 785 | } |
783 | } | 786 | } |
@@ -2469,7 +2472,8 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2469 | if (err) | 2472 | if (err) |
2470 | goto out_free_vma; | 2473 | goto out_free_vma; |
2471 | 2474 | ||
2472 | if (anon_vma_clone(new, vma)) | 2475 | err = anon_vma_clone(new, vma); |
2476 | if (err) | ||
2473 | goto out_free_mpol; | 2477 | goto out_free_mpol; |
2474 | 2478 | ||
2475 | if (new->vm_file) | 2479 | if (new->vm_file) |
@@ -274,6 +274,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) | |||
274 | { | 274 | { |
275 | struct anon_vma_chain *avc; | 275 | struct anon_vma_chain *avc; |
276 | struct anon_vma *anon_vma; | 276 | struct anon_vma *anon_vma; |
277 | int error; | ||
277 | 278 | ||
278 | /* Don't bother if the parent process has no anon_vma here. */ | 279 | /* Don't bother if the parent process has no anon_vma here. */ |
279 | if (!pvma->anon_vma) | 280 | if (!pvma->anon_vma) |
@@ -283,8 +284,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) | |||
283 | * First, attach the new VMA to the parent VMA's anon_vmas, | 284 | * First, attach the new VMA to the parent VMA's anon_vmas, |
284 | * so rmap can find non-COWed pages in child processes. | 285 | * so rmap can find non-COWed pages in child processes. |
285 | */ | 286 | */ |
286 | if (anon_vma_clone(vma, pvma)) | 287 | error = anon_vma_clone(vma, pvma); |
287 | return -ENOMEM; | 288 | if (error) |
289 | return error; | ||
288 | 290 | ||
289 | /* Then add our own anon_vma. */ | 291 | /* Then add our own anon_vma. */ |
290 | anon_vma = anon_vma_alloc(); | 292 | anon_vma = anon_vma_alloc(); |
@@ -3076,7 +3076,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, | |||
3076 | void *obj; | 3076 | void *obj; |
3077 | int x; | 3077 | int x; |
3078 | 3078 | ||
3079 | VM_BUG_ON(nodeid > num_online_nodes()); | 3079 | VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES); |
3080 | n = get_node(cachep, nodeid); | 3080 | n = get_node(cachep, nodeid); |
3081 | BUG_ON(!n); | 3081 | BUG_ON(!n); |
3082 | 3082 | ||
diff --git a/mm/vmpressure.c b/mm/vmpressure.c index d4042e75f7c7..c5afd573d7da 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c | |||
@@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work) | |||
165 | unsigned long scanned; | 165 | unsigned long scanned; |
166 | unsigned long reclaimed; | 166 | unsigned long reclaimed; |
167 | 167 | ||
168 | spin_lock(&vmpr->sr_lock); | ||
168 | /* | 169 | /* |
169 | * Several contexts might be calling vmpressure(), so it is | 170 | * Several contexts might be calling vmpressure(), so it is |
170 | * possible that the work was rescheduled again before the old | 171 | * possible that the work was rescheduled again before the old |
@@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work) | |||
173 | * here. No need for any locks here since we don't care if | 174 | * here. No need for any locks here since we don't care if |
174 | * vmpr->reclaimed is in sync. | 175 | * vmpr->reclaimed is in sync. |
175 | */ | 176 | */ |
176 | if (!vmpr->scanned) | 177 | scanned = vmpr->scanned; |
178 | if (!scanned) { | ||
179 | spin_unlock(&vmpr->sr_lock); | ||
177 | return; | 180 | return; |
181 | } | ||
178 | 182 | ||
179 | spin_lock(&vmpr->sr_lock); | ||
180 | scanned = vmpr->scanned; | ||
181 | reclaimed = vmpr->reclaimed; | 183 | reclaimed = vmpr->reclaimed; |
182 | vmpr->scanned = 0; | 184 | vmpr->scanned = 0; |
183 | vmpr->reclaimed = 0; | 185 | vmpr->reclaimed = 0; |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index eaa057f14bcd..1ad61a27ee58 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -1523,6 +1523,7 @@ static int do_setlink(const struct sk_buff *skb, | |||
1523 | goto errout; | 1523 | goto errout; |
1524 | } | 1524 | } |
1525 | if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { | 1525 | if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { |
1526 | put_net(net); | ||
1526 | err = -EPERM; | 1527 | err = -EPERM; |
1527 | goto errout; | 1528 | goto errout; |
1528 | } | 1529 | } |
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index bb5947b0ce2d..51973ddc05a6 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c | |||
@@ -247,6 +247,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff) | |||
247 | err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); | 247 | err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); |
248 | 248 | ||
249 | rcu_read_unlock(); | 249 | rcu_read_unlock(); |
250 | |||
251 | skb_set_inner_mac_header(skb, nhoff + grehlen); | ||
252 | |||
250 | return err; | 253 | return err; |
251 | } | 254 | } |
252 | 255 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 33f5ff068c79..a3f72d7fc06c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -623,6 +623,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
623 | arg.iov[0].iov_base = (unsigned char *)&rep; | 623 | arg.iov[0].iov_base = (unsigned char *)&rep; |
624 | arg.iov[0].iov_len = sizeof(rep.th); | 624 | arg.iov[0].iov_len = sizeof(rep.th); |
625 | 625 | ||
626 | net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); | ||
626 | #ifdef CONFIG_TCP_MD5SIG | 627 | #ifdef CONFIG_TCP_MD5SIG |
627 | hash_location = tcp_parse_md5sig_option(th); | 628 | hash_location = tcp_parse_md5sig_option(th); |
628 | if (!sk && hash_location) { | 629 | if (!sk && hash_location) { |
@@ -633,7 +634,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
633 | * Incoming packet is checked with md5 hash with finding key, | 634 | * Incoming packet is checked with md5 hash with finding key, |
634 | * no RST generated if md5 hash doesn't match. | 635 | * no RST generated if md5 hash doesn't match. |
635 | */ | 636 | */ |
636 | sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev), | 637 | sk1 = __inet_lookup_listener(net, |
637 | &tcp_hashinfo, ip_hdr(skb)->saddr, | 638 | &tcp_hashinfo, ip_hdr(skb)->saddr, |
638 | th->source, ip_hdr(skb)->daddr, | 639 | th->source, ip_hdr(skb)->daddr, |
639 | ntohs(th->source), inet_iif(skb)); | 640 | ntohs(th->source), inet_iif(skb)); |
@@ -681,7 +682,6 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
681 | if (sk) | 682 | if (sk) |
682 | arg.bound_dev_if = sk->sk_bound_dev_if; | 683 | arg.bound_dev_if = sk->sk_bound_dev_if; |
683 | 684 | ||
684 | net = dev_net(skb_dst(skb)->dev); | ||
685 | arg.tos = ip_hdr(skb)->tos; | 685 | arg.tos = ip_hdr(skb)->tos; |
686 | ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, | 686 | ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt, |
687 | ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, | 687 | ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index d06af89162f1..5ff87805258e 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -787,16 +787,16 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { | |||
787 | .queue_hash_add = inet6_csk_reqsk_queue_hash_add, | 787 | .queue_hash_add = inet6_csk_reqsk_queue_hash_add, |
788 | }; | 788 | }; |
789 | 789 | ||
790 | static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | 790 | static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq, |
791 | u32 tsval, u32 tsecr, int oif, | 791 | u32 ack, u32 win, u32 tsval, u32 tsecr, |
792 | struct tcp_md5sig_key *key, int rst, u8 tclass, | 792 | int oif, struct tcp_md5sig_key *key, int rst, |
793 | u32 label) | 793 | u8 tclass, u32 label) |
794 | { | 794 | { |
795 | const struct tcphdr *th = tcp_hdr(skb); | 795 | const struct tcphdr *th = tcp_hdr(skb); |
796 | struct tcphdr *t1; | 796 | struct tcphdr *t1; |
797 | struct sk_buff *buff; | 797 | struct sk_buff *buff; |
798 | struct flowi6 fl6; | 798 | struct flowi6 fl6; |
799 | struct net *net = dev_net(skb_dst(skb)->dev); | 799 | struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); |
800 | struct sock *ctl_sk = net->ipv6.tcp_sk; | 800 | struct sock *ctl_sk = net->ipv6.tcp_sk; |
801 | unsigned int tot_len = sizeof(struct tcphdr); | 801 | unsigned int tot_len = sizeof(struct tcphdr); |
802 | struct dst_entry *dst; | 802 | struct dst_entry *dst; |
@@ -946,7 +946,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) | |||
946 | (th->doff << 2); | 946 | (th->doff << 2); |
947 | 947 | ||
948 | oif = sk ? sk->sk_bound_dev_if : 0; | 948 | oif = sk ? sk->sk_bound_dev_if : 0; |
949 | tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0); | 949 | tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0); |
950 | 950 | ||
951 | #ifdef CONFIG_TCP_MD5SIG | 951 | #ifdef CONFIG_TCP_MD5SIG |
952 | release_sk1: | 952 | release_sk1: |
@@ -957,13 +957,13 @@ release_sk1: | |||
957 | #endif | 957 | #endif |
958 | } | 958 | } |
959 | 959 | ||
960 | static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, | 960 | static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq, |
961 | u32 win, u32 tsval, u32 tsecr, int oif, | 961 | u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, |
962 | struct tcp_md5sig_key *key, u8 tclass, | 962 | struct tcp_md5sig_key *key, u8 tclass, |
963 | u32 label) | 963 | u32 label) |
964 | { | 964 | { |
965 | tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, oif, key, 0, tclass, | 965 | tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, |
966 | label); | 966 | tclass, label); |
967 | } | 967 | } |
968 | 968 | ||
969 | static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) | 969 | static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) |
@@ -971,7 +971,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) | |||
971 | struct inet_timewait_sock *tw = inet_twsk(sk); | 971 | struct inet_timewait_sock *tw = inet_twsk(sk); |
972 | struct tcp_timewait_sock *tcptw = tcp_twsk(sk); | 972 | struct tcp_timewait_sock *tcptw = tcp_twsk(sk); |
973 | 973 | ||
974 | tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, | 974 | tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, |
975 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, | 975 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, |
976 | tcp_time_stamp + tcptw->tw_ts_offset, | 976 | tcp_time_stamp + tcptw->tw_ts_offset, |
977 | tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), | 977 | tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), |
@@ -986,10 +986,10 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, | |||
986 | /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV | 986 | /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV |
987 | * sk->sk_state == TCP_SYN_RECV -> for Fast Open. | 987 | * sk->sk_state == TCP_SYN_RECV -> for Fast Open. |
988 | */ | 988 | */ |
989 | tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ? | 989 | tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ? |
990 | tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, | 990 | tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, |
991 | tcp_rsk(req)->rcv_nxt, | 991 | tcp_rsk(req)->rcv_nxt, req->rcv_wnd, |
992 | req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, | 992 | tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, |
993 | tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), | 993 | tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), |
994 | 0, 0); | 994 | 0, 0); |
995 | } | 995 | } |
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index df3c7f20fcee..9645a21d9eaa 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
@@ -140,7 +140,7 @@ static bool match_validate(const struct sw_flow_match *match, | |||
140 | if (match->key->eth.type == htons(ETH_P_ARP) | 140 | if (match->key->eth.type == htons(ETH_P_ARP) |
141 | || match->key->eth.type == htons(ETH_P_RARP)) { | 141 | || match->key->eth.type == htons(ETH_P_RARP)) { |
142 | key_expected |= 1 << OVS_KEY_ATTR_ARP; | 142 | key_expected |= 1 << OVS_KEY_ATTR_ARP; |
143 | if (match->mask && (match->mask->key.tp.src == htons(0xff))) | 143 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) |
144 | mask_allowed |= 1 << OVS_KEY_ATTR_ARP; | 144 | mask_allowed |= 1 << OVS_KEY_ATTR_ARP; |
145 | } | 145 | } |
146 | 146 | ||
@@ -221,7 +221,7 @@ static bool match_validate(const struct sw_flow_match *match, | |||
221 | htons(NDISC_NEIGHBOUR_SOLICITATION) || | 221 | htons(NDISC_NEIGHBOUR_SOLICITATION) || |
222 | match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { | 222 | match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { |
223 | key_expected |= 1 << OVS_KEY_ATTR_ND; | 223 | key_expected |= 1 << OVS_KEY_ATTR_ND; |
224 | if (match->mask && (match->mask->key.tp.src == htons(0xffff))) | 224 | if (match->mask && (match->mask->key.tp.src == htons(0xff))) |
225 | mask_allowed |= 1 << OVS_KEY_ATTR_ND; | 225 | mask_allowed |= 1 << OVS_KEY_ATTR_ND; |
226 | } | 226 | } |
227 | } | 227 | } |
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 88618f8b794c..c54c9d9d1ffb 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig | |||
@@ -22,8 +22,9 @@ menuconfig NET_SCHED | |||
22 | This code is considered to be experimental. | 22 | This code is considered to be experimental. |
23 | 23 | ||
24 | To administer these schedulers, you'll need the user-level utilities | 24 | To administer these schedulers, you'll need the user-level utilities |
25 | from the package iproute2+tc at <ftp://ftp.tux.org/pub/net/ip-routing/>. | 25 | from the package iproute2+tc at |
26 | That package also contains some documentation; for more, check out | 26 | <https://www.kernel.org/pub/linux/utils/net/iproute2/>. That package |
27 | also contains some documentation; for more, check out | ||
27 | <http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2>. | 28 | <http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2>. |
28 | 29 | ||
29 | This Quality of Service (QoS) support will enable you to use | 30 | This Quality of Service (QoS) support will enable you to use |
@@ -336,7 +337,7 @@ config NET_SCH_PLUG | |||
336 | of virtual machines by allowing the generated network output to be rolled | 337 | of virtual machines by allowing the generated network output to be rolled |
337 | back if needed. | 338 | back if needed. |
338 | 339 | ||
339 | For more information, please refer to http://wiki.xensource.com/xenwiki/Remus | 340 | For more information, please refer to <http://wiki.xenproject.org/wiki/Remus> |
340 | 341 | ||
341 | Say Y here if you are using this kernel for Xen dom0 and | 342 | Say Y here if you are using this kernel for Xen dom0 and |
342 | want to protect Xen guests with Remus. | 343 | want to protect Xen guests with Remus. |
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index b9ca32ebc1de..1e52decb7b59 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
@@ -94,7 +94,7 @@ static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, | |||
94 | TC_H_MIN(skb->priority) <= q->flows_cnt) | 94 | TC_H_MIN(skb->priority) <= q->flows_cnt) |
95 | return TC_H_MIN(skb->priority); | 95 | return TC_H_MIN(skb->priority); |
96 | 96 | ||
97 | filter = rcu_dereference(q->filter_list); | 97 | filter = rcu_dereference_bh(q->filter_list); |
98 | if (!filter) | 98 | if (!filter) |
99 | return fq_codel_hash(q, skb) + 1; | 99 | return fq_codel_hash(q, skb) + 1; |
100 | 100 | ||
diff --git a/net/sctp/output.c b/net/sctp/output.c index 42dffd428389..fc5e45b8a832 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -401,12 +401,12 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
401 | sk = chunk->skb->sk; | 401 | sk = chunk->skb->sk; |
402 | 402 | ||
403 | /* Allocate the new skb. */ | 403 | /* Allocate the new skb. */ |
404 | nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC); | 404 | nskb = alloc_skb(packet->size + MAX_HEADER, GFP_ATOMIC); |
405 | if (!nskb) | 405 | if (!nskb) |
406 | goto nomem; | 406 | goto nomem; |
407 | 407 | ||
408 | /* Make sure the outbound skb has enough header room reserved. */ | 408 | /* Make sure the outbound skb has enough header room reserved. */ |
409 | skb_reserve(nskb, packet->overhead + LL_MAX_HEADER); | 409 | skb_reserve(nskb, packet->overhead + MAX_HEADER); |
410 | 410 | ||
411 | /* Set the owning socket so that we know where to get the | 411 | /* Set the owning socket so that we know where to get the |
412 | * destination IP address. | 412 | * destination IP address. |
diff --git a/security/keys/internal.h b/security/keys/internal.h index b8960c4959a5..200e37867336 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h | |||
@@ -117,6 +117,7 @@ struct keyring_search_context { | |||
117 | #define KEYRING_SEARCH_NO_UPDATE_TIME 0x0004 /* Don't update times */ | 117 | #define KEYRING_SEARCH_NO_UPDATE_TIME 0x0004 /* Don't update times */ |
118 | #define KEYRING_SEARCH_NO_CHECK_PERM 0x0008 /* Don't check permissions */ | 118 | #define KEYRING_SEARCH_NO_CHECK_PERM 0x0008 /* Don't check permissions */ |
119 | #define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0010 /* Give an error on excessive depth */ | 119 | #define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0010 /* Give an error on excessive depth */ |
120 | #define KEYRING_SEARCH_SKIP_EXPIRED 0x0020 /* Ignore expired keys (intention to replace) */ | ||
120 | 121 | ||
121 | int (*iterator)(const void *object, void *iterator_data); | 122 | int (*iterator)(const void *object, void *iterator_data); |
122 | 123 | ||
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index eff88a5f5d40..4743d71e4aa6 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c | |||
@@ -26,6 +26,8 @@ | |||
26 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
27 | #include "internal.h" | 27 | #include "internal.h" |
28 | 28 | ||
29 | #define KEY_MAX_DESC_SIZE 4096 | ||
30 | |||
29 | static int key_get_type_from_user(char *type, | 31 | static int key_get_type_from_user(char *type, |
30 | const char __user *_type, | 32 | const char __user *_type, |
31 | unsigned len) | 33 | unsigned len) |
@@ -78,7 +80,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type, | |||
78 | 80 | ||
79 | description = NULL; | 81 | description = NULL; |
80 | if (_description) { | 82 | if (_description) { |
81 | description = strndup_user(_description, PAGE_SIZE); | 83 | description = strndup_user(_description, KEY_MAX_DESC_SIZE); |
82 | if (IS_ERR(description)) { | 84 | if (IS_ERR(description)) { |
83 | ret = PTR_ERR(description); | 85 | ret = PTR_ERR(description); |
84 | goto error; | 86 | goto error; |
@@ -177,7 +179,7 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type, | |||
177 | goto error; | 179 | goto error; |
178 | 180 | ||
179 | /* pull the description into kernel space */ | 181 | /* pull the description into kernel space */ |
180 | description = strndup_user(_description, PAGE_SIZE); | 182 | description = strndup_user(_description, KEY_MAX_DESC_SIZE); |
181 | if (IS_ERR(description)) { | 183 | if (IS_ERR(description)) { |
182 | ret = PTR_ERR(description); | 184 | ret = PTR_ERR(description); |
183 | goto error; | 185 | goto error; |
@@ -287,7 +289,7 @@ long keyctl_join_session_keyring(const char __user *_name) | |||
287 | /* fetch the name from userspace */ | 289 | /* fetch the name from userspace */ |
288 | name = NULL; | 290 | name = NULL; |
289 | if (_name) { | 291 | if (_name) { |
290 | name = strndup_user(_name, PAGE_SIZE); | 292 | name = strndup_user(_name, KEY_MAX_DESC_SIZE); |
291 | if (IS_ERR(name)) { | 293 | if (IS_ERR(name)) { |
292 | ret = PTR_ERR(name); | 294 | ret = PTR_ERR(name); |
293 | goto error; | 295 | goto error; |
@@ -562,8 +564,9 @@ long keyctl_describe_key(key_serial_t keyid, | |||
562 | { | 564 | { |
563 | struct key *key, *instkey; | 565 | struct key *key, *instkey; |
564 | key_ref_t key_ref; | 566 | key_ref_t key_ref; |
565 | char *tmpbuf; | 567 | char *infobuf; |
566 | long ret; | 568 | long ret; |
569 | int desclen, infolen; | ||
567 | 570 | ||
568 | key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW); | 571 | key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW); |
569 | if (IS_ERR(key_ref)) { | 572 | if (IS_ERR(key_ref)) { |
@@ -586,38 +589,31 @@ long keyctl_describe_key(key_serial_t keyid, | |||
586 | } | 589 | } |
587 | 590 | ||
588 | okay: | 591 | okay: |
589 | /* calculate how much description we're going to return */ | ||
590 | ret = -ENOMEM; | ||
591 | tmpbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
592 | if (!tmpbuf) | ||
593 | goto error2; | ||
594 | |||
595 | key = key_ref_to_ptr(key_ref); | 592 | key = key_ref_to_ptr(key_ref); |
593 | desclen = strlen(key->description); | ||
596 | 594 | ||
597 | ret = snprintf(tmpbuf, PAGE_SIZE - 1, | 595 | /* calculate how much information we're going to return */ |
598 | "%s;%d;%d;%08x;%s", | 596 | ret = -ENOMEM; |
599 | key->type->name, | 597 | infobuf = kasprintf(GFP_KERNEL, |
600 | from_kuid_munged(current_user_ns(), key->uid), | 598 | "%s;%d;%d;%08x;", |
601 | from_kgid_munged(current_user_ns(), key->gid), | 599 | key->type->name, |
602 | key->perm, | 600 | from_kuid_munged(current_user_ns(), key->uid), |
603 | key->description ?: ""); | 601 | from_kgid_munged(current_user_ns(), key->gid), |
604 | 602 | key->perm); | |
605 | /* include a NUL char at the end of the data */ | 603 | if (!infobuf) |
606 | if (ret > PAGE_SIZE - 1) | 604 | goto error2; |
607 | ret = PAGE_SIZE - 1; | 605 | infolen = strlen(infobuf); |
608 | tmpbuf[ret] = 0; | 606 | ret = infolen + desclen + 1; |
609 | ret++; | ||
610 | 607 | ||
611 | /* consider returning the data */ | 608 | /* consider returning the data */ |
612 | if (buffer && buflen > 0) { | 609 | if (buffer && buflen >= ret) { |
613 | if (buflen > ret) | 610 | if (copy_to_user(buffer, infobuf, infolen) != 0 || |
614 | buflen = ret; | 611 | copy_to_user(buffer + infolen, key->description, |
615 | 612 | desclen + 1) != 0) | |
616 | if (copy_to_user(buffer, tmpbuf, buflen) != 0) | ||
617 | ret = -EFAULT; | 613 | ret = -EFAULT; |
618 | } | 614 | } |
619 | 615 | ||
620 | kfree(tmpbuf); | 616 | kfree(infobuf); |
621 | error2: | 617 | error2: |
622 | key_ref_put(key_ref); | 618 | key_ref_put(key_ref); |
623 | error: | 619 | error: |
@@ -649,7 +645,7 @@ long keyctl_keyring_search(key_serial_t ringid, | |||
649 | if (ret < 0) | 645 | if (ret < 0) |
650 | goto error; | 646 | goto error; |
651 | 647 | ||
652 | description = strndup_user(_description, PAGE_SIZE); | 648 | description = strndup_user(_description, KEY_MAX_DESC_SIZE); |
653 | if (IS_ERR(description)) { | 649 | if (IS_ERR(description)) { |
654 | ret = PTR_ERR(description); | 650 | ret = PTR_ERR(description); |
655 | goto error; | 651 | goto error; |
diff --git a/security/keys/keyring.c b/security/keys/keyring.c index 8177010174f7..e72548b5897e 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c | |||
@@ -546,7 +546,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data) | |||
546 | } | 546 | } |
547 | 547 | ||
548 | if (key->expiry && ctx->now.tv_sec >= key->expiry) { | 548 | if (key->expiry && ctx->now.tv_sec >= key->expiry) { |
549 | ctx->result = ERR_PTR(-EKEYEXPIRED); | 549 | if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED)) |
550 | ctx->result = ERR_PTR(-EKEYEXPIRED); | ||
550 | kleave(" = %d [expire]", ctx->skipped_ret); | 551 | kleave(" = %d [expire]", ctx->skipped_ret); |
551 | goto skipped; | 552 | goto skipped; |
552 | } | 553 | } |
@@ -628,6 +629,10 @@ static bool search_nested_keyrings(struct key *keyring, | |||
628 | ctx->index_key.type->name, | 629 | ctx->index_key.type->name, |
629 | ctx->index_key.description); | 630 | ctx->index_key.description); |
630 | 631 | ||
632 | #define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK) | ||
633 | BUG_ON((ctx->flags & STATE_CHECKS) == 0 || | ||
634 | (ctx->flags & STATE_CHECKS) == STATE_CHECKS); | ||
635 | |||
631 | if (ctx->index_key.description) | 636 | if (ctx->index_key.description) |
632 | ctx->index_key.desc_len = strlen(ctx->index_key.description); | 637 | ctx->index_key.desc_len = strlen(ctx->index_key.description); |
633 | 638 | ||
@@ -637,7 +642,6 @@ static bool search_nested_keyrings(struct key *keyring, | |||
637 | if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE || | 642 | if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE || |
638 | keyring_compare_object(keyring, &ctx->index_key)) { | 643 | keyring_compare_object(keyring, &ctx->index_key)) { |
639 | ctx->skipped_ret = 2; | 644 | ctx->skipped_ret = 2; |
640 | ctx->flags |= KEYRING_SEARCH_DO_STATE_CHECK; | ||
641 | switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) { | 645 | switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) { |
642 | case 1: | 646 | case 1: |
643 | goto found; | 647 | goto found; |
@@ -649,8 +653,6 @@ static bool search_nested_keyrings(struct key *keyring, | |||
649 | } | 653 | } |
650 | 654 | ||
651 | ctx->skipped_ret = 0; | 655 | ctx->skipped_ret = 0; |
652 | if (ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK) | ||
653 | ctx->flags &= ~KEYRING_SEARCH_DO_STATE_CHECK; | ||
654 | 656 | ||
655 | /* Start processing a new keyring */ | 657 | /* Start processing a new keyring */ |
656 | descend_to_keyring: | 658 | descend_to_keyring: |
diff --git a/security/keys/request_key.c b/security/keys/request_key.c index bb4337c7ae1b..0c7aea4dea54 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c | |||
@@ -516,6 +516,8 @@ struct key *request_key_and_link(struct key_type *type, | |||
516 | .match_data.cmp = key_default_cmp, | 516 | .match_data.cmp = key_default_cmp, |
517 | .match_data.raw_data = description, | 517 | .match_data.raw_data = description, |
518 | .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, | 518 | .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, |
519 | .flags = (KEYRING_SEARCH_DO_STATE_CHECK | | ||
520 | KEYRING_SEARCH_SKIP_EXPIRED), | ||
519 | }; | 521 | }; |
520 | struct key *key; | 522 | struct key *key; |
521 | key_ref_t key_ref; | 523 | key_ref_t key_ref; |
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index 6639e2cb8853..5d672f7580dd 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c | |||
@@ -249,6 +249,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id) | |||
249 | .match_data.cmp = key_default_cmp, | 249 | .match_data.cmp = key_default_cmp, |
250 | .match_data.raw_data = description, | 250 | .match_data.raw_data = description, |
251 | .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, | 251 | .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, |
252 | .flags = KEYRING_SEARCH_DO_STATE_CHECK, | ||
252 | }; | 253 | }; |
253 | struct key *authkey; | 254 | struct key *authkey; |
254 | key_ref_t authkey_ref; | 255 | key_ref_t authkey_ref; |