aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/x86/include/asm/gart.h24
-rw-r--r--arch/x86/kernel/aperture_64.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c22
-rw-r--r--arch/x86/kernel/pci-gart_64.c9
-rw-r--r--block/blk-core.c13
-rw-r--r--block/blk-sysfs.c8
-rw-r--r--block/blk.h1
-rw-r--r--block/cfq-iosched.c20
-rw-r--r--drivers/connector/connector.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c5
-rw-r--r--drivers/gpu/drm/radeon/atom.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c89
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c6
-rw-r--r--drivers/media/video/videobuf-dma-contig.c2
-rw-r--r--drivers/net/bna/bfa_ioc.c31
-rw-r--r--drivers/net/bna/bfa_ioc.h1
-rw-r--r--drivers/net/bna/bfa_ioc_ct.c28
-rw-r--r--drivers/net/bna/bfi.h6
-rw-r--r--drivers/net/bna/bnad.c1
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c9
-rw-r--r--drivers/net/bonding/bond_alb.c6
-rw-r--r--drivers/net/bonding/bond_alb.h4
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/loopback.c3
-rw-r--r--drivers/net/natsemi.c3
-rw-r--r--drivers/net/netxen/netxen_nic.h4
-rw-r--r--drivers/net/netxen/netxen_nic_main.c17
-rw-r--r--drivers/net/qlcnic/qlcnic.h1
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c14
-rw-r--r--drivers/net/sfc/efx.c6
-rw-r--r--drivers/net/sfc/io.h2
-rw-r--r--drivers/net/sfc/net_driver.h2
-rw-r--r--drivers/net/sfc/nic.c22
-rw-r--r--drivers/net/sfc/nic.h1
-rw-r--r--drivers/net/sfc/selftest.c25
-rw-r--r--drivers/net/sfc/tx.c3
-rw-r--r--drivers/net/sis900.c23
-rw-r--r--drivers/net/stmmac/dwmac_lib.c28
-rw-r--r--drivers/net/stmmac/stmmac_main.c49
-rw-r--r--drivers/net/tokenring/3c359.c4
-rw-r--r--drivers/net/tokenring/lanstreamer.c2
-rw-r--r--drivers/net/tokenring/olympic.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c6
-rw-r--r--drivers/net/wireless/ath/regd_common.h1
-rw-r--r--drivers/net/wireless/iwlegacy/Kconfig9
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-hw.h2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-hw.h3
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c17
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c7
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c3
-rw-r--r--drivers/net/wireless/mwl8k.c9
-rw-r--r--drivers/net/wireless/p54/txrx.c2
-rw-r--r--drivers/pci/Kconfig4
-rw-r--r--drivers/pci/Makefile4
-rw-r--r--drivers/rtc/rtc-omap.c2
-rw-r--r--drivers/scsi/scsi_lib.c17
-rw-r--r--drivers/scsi/scsi_transport_fc.c19
-rw-r--r--fs/gfs2/aops.c2
-rw-r--r--fs/gfs2/dir.c2
-rw-r--r--fs/gfs2/file.c58
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/inode.c56
-rw-r--r--fs/gfs2/inode.h3
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/rgrp.c4
-rw-r--r--fs/gfs2/super.c14
-rw-r--r--include/linux/blkdev.h26
-rw-r--r--include/linux/posix-clock.h5
-rw-r--r--include/linux/usb/usbnet.h4
-rw-r--r--kernel/time/posix-clock.c24
-rw-r--r--net/bridge/br_netfilter.c6
-rw-r--r--net/caif/cfdgml.c6
-rw-r--r--net/caif/cfmuxl.c4
-rw-r--r--net/core/dev.c10
-rw-r--r--net/ieee802154/Makefile2
-rw-r--r--net/ipv4/inet_connection_sock.c5
-rw-r--r--net/ipv4/inetpeer.c13
-rw-r--r--net/ipv4/ip_options.c6
-rw-r--r--net/ipv4/sysctl_net_ipv4.c3
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/irda/af_irda.c3
-rw-r--r--net/llc/llc_input.c3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c4
-rw-r--r--net/netfilter/ipset/ip_set_core.c18
-rw-r--r--net/netfilter/xt_set.c18
-rw-r--r--net/sctp/associola.c4
-rw-r--r--tools/perf/builtin-record.c7
-rw-r--r--tools/perf/builtin-stat.c7
-rw-r--r--tools/perf/builtin-test.c10
-rw-r--r--tools/perf/builtin-top.c3
-rw-r--r--tools/perf/util/evlist.c14
-rw-r--r--tools/perf/util/evsel.c27
-rw-r--r--tools/perf/util/evsel.h6
-rw-r--r--tools/perf/util/python.c9
-rw-r--r--tools/perf/util/ui/browsers/annotate.c6
-rw-r--r--tools/perf/util/ui/browsers/hists.c2
116 files changed, 678 insertions, 487 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index ec3600306289..1e2724e55cf0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -151,6 +151,7 @@ S: Maintained
151F: drivers/net/hamradio/6pack.c 151F: drivers/net/hamradio/6pack.c
152 152
1538169 10/100/1000 GIGABIT ETHERNET DRIVER 1538169 10/100/1000 GIGABIT ETHERNET DRIVER
154M: Realtek linux nic maintainers <nic_swsd@realtek.com>
154M: Francois Romieu <romieu@fr.zoreil.com> 155M: Francois Romieu <romieu@fr.zoreil.com>
155L: netdev@vger.kernel.org 156L: netdev@vger.kernel.org
156S: Maintained 157S: Maintained
diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h
index 43085bfc99c3..156cd5d18d2a 100644
--- a/arch/x86/include/asm/gart.h
+++ b/arch/x86/include/asm/gart.h
@@ -66,7 +66,7 @@ static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order)
66 * Don't enable translation but enable GART IO and CPU accesses. 66 * Don't enable translation but enable GART IO and CPU accesses.
67 * Also, set DISTLBWALKPRB since GART tables memory is UC. 67 * Also, set DISTLBWALKPRB since GART tables memory is UC.
68 */ 68 */
69 ctl = DISTLBWALKPRB | order << 1; 69 ctl = order << 1;
70 70
71 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); 71 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
72} 72}
@@ -75,17 +75,17 @@ static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
75{ 75{
76 u32 tmp, ctl; 76 u32 tmp, ctl;
77 77
78 /* address of the mappings table */ 78 /* address of the mappings table */
79 addr >>= 12; 79 addr >>= 12;
80 tmp = (u32) addr<<4; 80 tmp = (u32) addr<<4;
81 tmp &= ~0xf; 81 tmp &= ~0xf;
82 pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp); 82 pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
83 83
84 /* Enable GART translation for this hammer. */ 84 /* Enable GART translation for this hammer. */
85 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 85 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
86 ctl |= GARTEN; 86 ctl |= GARTEN | DISTLBWALKPRB;
87 ctl &= ~(DISGARTCPU | DISGARTIO); 87 ctl &= ~(DISGARTCPU | DISGARTIO);
88 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); 88 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
89} 89}
90 90
91static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size) 91static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 86d1ad4962a7..73fb469908c6 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -499,7 +499,7 @@ out:
499 * Don't enable translation yet but enable GART IO and CPU 499 * Don't enable translation yet but enable GART IO and CPU
500 * accesses and set DISTLBWALKPRB since GART table memory is UC. 500 * accesses and set DISTLBWALKPRB since GART table memory is UC.
501 */ 501 */
502 u32 ctl = DISTLBWALKPRB | aper_order << 1; 502 u32 ctl = aper_order << 1;
503 503
504 bus = amd_nb_bus_dev_ranges[i].bus; 504 bus = amd_nb_bus_dev_ranges[i].bus;
505 dev_base = amd_nb_bus_dev_ranges[i].dev_base; 505 dev_base = amd_nb_bus_dev_ranges[i].dev_base;
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 461f62bbd774..cf4e369cea67 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -8,7 +8,7 @@ static __initconst const u64 amd_hw_cache_event_ids
8 [ C(L1D) ] = { 8 [ C(L1D) ] = {
9 [ C(OP_READ) ] = { 9 [ C(OP_READ) ] = {
10 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ 10 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
11 [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */ 11 [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
12 }, 12 },
13 [ C(OP_WRITE) ] = { 13 [ C(OP_WRITE) ] = {
14 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */ 14 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
@@ -427,7 +427,9 @@ static __initconst const struct x86_pmu amd_pmu = {
427 * 427 *
428 * Exceptions: 428 * Exceptions:
429 * 429 *
430 * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
430 * 0x003 FP PERF_CTL[3] 431 * 0x003 FP PERF_CTL[3]
432 * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
431 * 0x00B FP PERF_CTL[3] 433 * 0x00B FP PERF_CTL[3]
432 * 0x00D FP PERF_CTL[3] 434 * 0x00D FP PERF_CTL[3]
433 * 0x023 DE PERF_CTL[2:0] 435 * 0x023 DE PERF_CTL[2:0]
@@ -448,6 +450,8 @@ static __initconst const struct x86_pmu amd_pmu = {
448 * 0x0DF LS PERF_CTL[5:0] 450 * 0x0DF LS PERF_CTL[5:0]
449 * 0x1D6 EX PERF_CTL[5:0] 451 * 0x1D6 EX PERF_CTL[5:0]
450 * 0x1D8 EX PERF_CTL[5:0] 452 * 0x1D8 EX PERF_CTL[5:0]
453 *
454 * (*) depending on the umask all FPU counters may be used
451 */ 455 */
452 456
453static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); 457static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
@@ -460,18 +464,28 @@ static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
460static struct event_constraint * 464static struct event_constraint *
461amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) 465amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
462{ 466{
463 unsigned int event_code = amd_get_event_code(&event->hw); 467 struct hw_perf_event *hwc = &event->hw;
468 unsigned int event_code = amd_get_event_code(hwc);
464 469
465 switch (event_code & AMD_EVENT_TYPE_MASK) { 470 switch (event_code & AMD_EVENT_TYPE_MASK) {
466 case AMD_EVENT_FP: 471 case AMD_EVENT_FP:
467 switch (event_code) { 472 switch (event_code) {
473 case 0x000:
474 if (!(hwc->config & 0x0000F000ULL))
475 break;
476 if (!(hwc->config & 0x00000F00ULL))
477 break;
478 return &amd_f15_PMC3;
479 case 0x004:
480 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
481 break;
482 return &amd_f15_PMC3;
468 case 0x003: 483 case 0x003:
469 case 0x00B: 484 case 0x00B:
470 case 0x00D: 485 case 0x00D:
471 return &amd_f15_PMC3; 486 return &amd_f15_PMC3;
472 default:
473 return &amd_f15_PMC53;
474 } 487 }
488 return &amd_f15_PMC53;
475 case AMD_EVENT_LS: 489 case AMD_EVENT_LS:
476 case AMD_EVENT_DC: 490 case AMD_EVENT_DC:
477 case AMD_EVENT_EX_LS: 491 case AMD_EVENT_EX_LS:
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 82ada01625b9..b117efd24f71 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -81,6 +81,9 @@ static u32 gart_unmapped_entry;
81#define AGPEXTERN 81#define AGPEXTERN
82#endif 82#endif
83 83
84/* GART can only remap to physical addresses < 1TB */
85#define GART_MAX_PHYS_ADDR (1ULL << 40)
86
84/* backdoor interface to AGP driver */ 87/* backdoor interface to AGP driver */
85AGPEXTERN int agp_memory_reserved; 88AGPEXTERN int agp_memory_reserved;
86AGPEXTERN __u32 *agp_gatt_table; 89AGPEXTERN __u32 *agp_gatt_table;
@@ -212,9 +215,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
212 size_t size, int dir, unsigned long align_mask) 215 size_t size, int dir, unsigned long align_mask)
213{ 216{
214 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); 217 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
215 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask); 218 unsigned long iommu_page;
216 int i; 219 int i;
217 220
221 if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
222 return bad_dma_addr;
223
224 iommu_page = alloc_iommu(dev, npages, align_mask);
218 if (iommu_page == -1) { 225 if (iommu_page == -1) {
219 if (!nonforced_iommu(dev, phys_mem, size)) 226 if (!nonforced_iommu(dev, phys_mem, size))
220 return phys_mem; 227 return phys_mem;
diff --git a/block/blk-core.c b/block/blk-core.c
index 5fa3dd2705c6..a2e58eeb3549 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -292,7 +292,6 @@ EXPORT_SYMBOL(blk_sync_queue);
292/** 292/**
293 * __blk_run_queue - run a single device queue 293 * __blk_run_queue - run a single device queue
294 * @q: The queue to run 294 * @q: The queue to run
295 * @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
296 * 295 *
297 * Description: 296 * Description:
298 * See @blk_run_queue. This variant must be called with the queue lock 297 * See @blk_run_queue. This variant must be called with the queue lock
@@ -303,15 +302,7 @@ void __blk_run_queue(struct request_queue *q)
303 if (unlikely(blk_queue_stopped(q))) 302 if (unlikely(blk_queue_stopped(q)))
304 return; 303 return;
305 304
306 /* 305 q->request_fn(q);
307 * Only recurse once to avoid overrunning the stack, let the unplug
308 * handling reinvoke the handler shortly if we already got there.
309 */
310 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
311 q->request_fn(q);
312 queue_flag_clear(QUEUE_FLAG_REENTER, q);
313 } else
314 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
315} 306}
316EXPORT_SYMBOL(__blk_run_queue); 307EXPORT_SYMBOL(__blk_run_queue);
317 308
@@ -328,6 +319,7 @@ void blk_run_queue_async(struct request_queue *q)
328 if (likely(!blk_queue_stopped(q))) 319 if (likely(!blk_queue_stopped(q)))
329 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); 320 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
330} 321}
322EXPORT_SYMBOL(blk_run_queue_async);
331 323
332/** 324/**
333 * blk_run_queue - run a single device queue 325 * blk_run_queue - run a single device queue
@@ -2787,7 +2779,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2787 2779
2788 local_irq_restore(flags); 2780 local_irq_restore(flags);
2789} 2781}
2790EXPORT_SYMBOL(blk_flush_plug_list);
2791 2782
2792void blk_finish_plug(struct blk_plug *plug) 2783void blk_finish_plug(struct blk_plug *plug)
2793{ 2784{
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 6d735122bc59..bd236313f35d 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -66,14 +66,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
66 66
67 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { 67 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
68 blk_set_queue_full(q, BLK_RW_SYNC); 68 blk_set_queue_full(q, BLK_RW_SYNC);
69 } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) { 69 } else {
70 blk_clear_queue_full(q, BLK_RW_SYNC); 70 blk_clear_queue_full(q, BLK_RW_SYNC);
71 wake_up(&rl->wait[BLK_RW_SYNC]); 71 wake_up(&rl->wait[BLK_RW_SYNC]);
72 } 72 }
73 73
74 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { 74 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
75 blk_set_queue_full(q, BLK_RW_ASYNC); 75 blk_set_queue_full(q, BLK_RW_ASYNC);
76 } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) { 76 } else {
77 blk_clear_queue_full(q, BLK_RW_ASYNC); 77 blk_clear_queue_full(q, BLK_RW_ASYNC);
78 wake_up(&rl->wait[BLK_RW_ASYNC]); 78 wake_up(&rl->wait[BLK_RW_ASYNC]);
79 } 79 }
@@ -508,8 +508,10 @@ int blk_register_queue(struct gendisk *disk)
508 return ret; 508 return ret;
509 509
510 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 510 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
511 if (ret < 0) 511 if (ret < 0) {
512 blk_trace_remove_sysfs(dev);
512 return ret; 513 return ret;
514 }
513 515
514 kobject_uevent(&q->kobj, KOBJ_ADD); 516 kobject_uevent(&q->kobj, KOBJ_ADD);
515 517
diff --git a/block/blk.h b/block/blk.h
index c9df8fc3c999..61263463e38e 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -22,7 +22,6 @@ void blk_rq_timed_out_timer(unsigned long data);
22void blk_delete_timer(struct request *); 22void blk_delete_timer(struct request *);
23void blk_add_timer(struct request *); 23void blk_add_timer(struct request *);
24void __generic_unplug_device(struct request_queue *); 24void __generic_unplug_device(struct request_queue *);
25void blk_run_queue_async(struct request_queue *q);
26 25
27/* 26/*
28 * Internal atomic flags for request handling 27 * Internal atomic flags for request handling
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 46b0a1d1d925..5b52011e3a40 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2582,28 +2582,20 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
2582} 2582}
2583 2583
2584/* 2584/*
2585 * Must always be called with the rcu_read_lock() held 2585 * Call func for each cic attached to this ioc.
2586 */ 2586 */
2587static void 2587static void
2588__call_for_each_cic(struct io_context *ioc, 2588call_for_each_cic(struct io_context *ioc,
2589 void (*func)(struct io_context *, struct cfq_io_context *)) 2589 void (*func)(struct io_context *, struct cfq_io_context *))
2590{ 2590{
2591 struct cfq_io_context *cic; 2591 struct cfq_io_context *cic;
2592 struct hlist_node *n; 2592 struct hlist_node *n;
2593 2593
2594 rcu_read_lock();
2595
2594 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) 2596 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
2595 func(ioc, cic); 2597 func(ioc, cic);
2596}
2597 2598
2598/*
2599 * Call func for each cic attached to this ioc.
2600 */
2601static void
2602call_for_each_cic(struct io_context *ioc,
2603 void (*func)(struct io_context *, struct cfq_io_context *))
2604{
2605 rcu_read_lock();
2606 __call_for_each_cic(ioc, func);
2607 rcu_read_unlock(); 2599 rcu_read_unlock();
2608} 2600}
2609 2601
@@ -2664,7 +2656,7 @@ static void cfq_free_io_context(struct io_context *ioc)
2664 * should be ok to iterate over the known list, we will see all cic's 2656 * should be ok to iterate over the known list, we will see all cic's
2665 * since no new ones are added. 2657 * since no new ones are added.
2666 */ 2658 */
2667 __call_for_each_cic(ioc, cic_free_func); 2659 call_for_each_cic(ioc, cic_free_func);
2668} 2660}
2669 2661
2670static void cfq_put_cooperator(struct cfq_queue *cfqq) 2662static void cfq_put_cooperator(struct cfq_queue *cfqq)
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index d77005849af8..219d88a0eeae 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -142,6 +142,7 @@ static int cn_call_callback(struct sk_buff *skb)
142 cbq->callback(msg, nsp); 142 cbq->callback(msg, nsp);
143 kfree_skb(skb); 143 kfree_skb(skb);
144 cn_queue_release_callback(cbq); 144 cn_queue_release_callback(cbq);
145 err = 0;
145 } 146 }
146 147
147 return err; 148 return err;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index ce38e97b9428..568caedd7216 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -83,7 +83,7 @@ nouveau_dma_init(struct nouveau_channel *chan)
83 return ret; 83 return ret;
84 84
85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ 85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
86 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000, 86 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
87 &chan->m2mf_ntfy); 87 &chan->m2mf_ntfy);
88 if (ret) 88 if (ret)
89 return ret; 89 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 856d56a98d1e..a76514a209b3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -682,6 +682,9 @@ struct drm_nouveau_private {
682 /* For PFIFO and PGRAPH. */ 682 /* For PFIFO and PGRAPH. */
683 spinlock_t context_switch_lock; 683 spinlock_t context_switch_lock;
684 684
685 /* VM/PRAMIN flush, legacy PRAMIN aperture */
686 spinlock_t vm_lock;
687
685 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ 688 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
686 struct nouveau_ramht *ramht; 689 struct nouveau_ramht *ramht;
687 struct nouveau_gpuobj *ramfc; 690 struct nouveau_gpuobj *ramfc;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 889c4454682e..39aee6d4daf8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -181,13 +181,13 @@ nouveau_fbcon_sync(struct fb_info *info)
181 OUT_RING (chan, 0); 181 OUT_RING (chan, 0);
182 } 182 }
183 183
184 nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); 184 nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff);
185 FIRE_RING(chan); 185 FIRE_RING(chan);
186 mutex_unlock(&chan->mutex); 186 mutex_unlock(&chan->mutex);
187 187
188 ret = -EBUSY; 188 ret = -EBUSY;
189 for (i = 0; i < 100000; i++) { 189 for (i = 0; i < 100000; i++) {
190 if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) { 190 if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) {
191 ret = 0; 191 ret = 0;
192 break; 192 break;
193 } 193 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 78f467fe30be..5045f8b921d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -398,7 +398,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
398 dma_bits = 40; 398 dma_bits = 40;
399 } else 399 } else
400 if (drm_pci_device_is_pcie(dev) && 400 if (drm_pci_device_is_pcie(dev) &&
401 dev_priv->chipset != 0x40 && 401 dev_priv->chipset > 0x40 &&
402 dev_priv->chipset != 0x45) { 402 dev_priv->chipset != 0x45) {
403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) 403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
404 dma_bits = 39; 404 dma_bits = 39;
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 7ba3fc0b30c1..5b39718ae1f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -35,19 +35,22 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
35{ 35{
36 struct drm_device *dev = chan->dev; 36 struct drm_device *dev = chan->dev;
37 struct nouveau_bo *ntfy = NULL; 37 struct nouveau_bo *ntfy = NULL;
38 uint32_t flags; 38 uint32_t flags, ttmpl;
39 int ret; 39 int ret;
40 40
41 if (nouveau_vram_notify) 41 if (nouveau_vram_notify) {
42 flags = NOUVEAU_GEM_DOMAIN_VRAM; 42 flags = NOUVEAU_GEM_DOMAIN_VRAM;
43 else 43 ttmpl = TTM_PL_FLAG_VRAM;
44 } else {
44 flags = NOUVEAU_GEM_DOMAIN_GART; 45 flags = NOUVEAU_GEM_DOMAIN_GART;
46 ttmpl = TTM_PL_FLAG_TT;
47 }
45 48
46 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy); 49 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
47 if (ret) 50 if (ret)
48 return ret; 51 return ret;
49 52
50 ret = nouveau_bo_pin(ntfy, flags); 53 ret = nouveau_bo_pin(ntfy, ttmpl);
51 if (ret) 54 if (ret)
52 goto out_err; 55 goto out_err;
53 56
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 4f00c87ed86e..67a16e01ffa6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -1039,19 +1039,20 @@ nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
1039{ 1039{
1040 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; 1040 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
1041 struct drm_device *dev = gpuobj->dev; 1041 struct drm_device *dev = gpuobj->dev;
1042 unsigned long flags;
1042 1043
1043 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { 1044 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
1044 u64 ptr = gpuobj->vinst + offset; 1045 u64 ptr = gpuobj->vinst + offset;
1045 u32 base = ptr >> 16; 1046 u32 base = ptr >> 16;
1046 u32 val; 1047 u32 val;
1047 1048
1048 spin_lock(&dev_priv->ramin_lock); 1049 spin_lock_irqsave(&dev_priv->vm_lock, flags);
1049 if (dev_priv->ramin_base != base) { 1050 if (dev_priv->ramin_base != base) {
1050 dev_priv->ramin_base = base; 1051 dev_priv->ramin_base = base;
1051 nv_wr32(dev, 0x001700, dev_priv->ramin_base); 1052 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1052 } 1053 }
1053 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff)); 1054 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
1054 spin_unlock(&dev_priv->ramin_lock); 1055 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
1055 return val; 1056 return val;
1056 } 1057 }
1057 1058
@@ -1063,18 +1064,19 @@ nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
1063{ 1064{
1064 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; 1065 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
1065 struct drm_device *dev = gpuobj->dev; 1066 struct drm_device *dev = gpuobj->dev;
1067 unsigned long flags;
1066 1068
1067 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { 1069 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
1068 u64 ptr = gpuobj->vinst + offset; 1070 u64 ptr = gpuobj->vinst + offset;
1069 u32 base = ptr >> 16; 1071 u32 base = ptr >> 16;
1070 1072
1071 spin_lock(&dev_priv->ramin_lock); 1073 spin_lock_irqsave(&dev_priv->vm_lock, flags);
1072 if (dev_priv->ramin_base != base) { 1074 if (dev_priv->ramin_base != base) {
1073 dev_priv->ramin_base = base; 1075 dev_priv->ramin_base = base;
1074 nv_wr32(dev, 0x001700, dev_priv->ramin_base); 1076 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1075 } 1077 }
1076 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val); 1078 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
1077 spin_unlock(&dev_priv->ramin_lock); 1079 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
1078 return; 1080 return;
1079 } 1081 }
1080 1082
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index a33fe4019286..4bce801bc588 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -55,6 +55,7 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
55 be->func->clear(be); 55 be->func->clear(be);
56 return -EFAULT; 56 return -EFAULT;
57 } 57 }
58 nvbe->ttm_alloced[nvbe->nr_pages] = false;
58 } 59 }
59 60
60 nvbe->nr_pages++; 61 nvbe->nr_pages++;
@@ -427,7 +428,7 @@ nouveau_sgdma_init(struct drm_device *dev)
427 u32 aper_size, align; 428 u32 aper_size, align;
428 int ret; 429 int ret;
429 430
430 if (dev_priv->card_type >= NV_50 || drm_pci_device_is_pcie(dev)) 431 if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev))
431 aper_size = 512 * 1024 * 1024; 432 aper_size = 512 * 1024 * 1024;
432 else 433 else
433 aper_size = 64 * 1024 * 1024; 434 aper_size = 64 * 1024 * 1024;
@@ -457,7 +458,7 @@ nouveau_sgdma_init(struct drm_device *dev)
457 dev_priv->gart_info.func = &nv50_sgdma_backend; 458 dev_priv->gart_info.func = &nv50_sgdma_backend;
458 } else 459 } else
459 if (drm_pci_device_is_pcie(dev) && 460 if (drm_pci_device_is_pcie(dev) &&
460 dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) { 461 dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
461 if (nv44_graph_class(dev)) { 462 if (nv44_graph_class(dev)) {
462 dev_priv->gart_info.func = &nv44_sgdma_backend; 463 dev_priv->gart_info.func = &nv44_sgdma_backend;
463 align = 512 * 1024; 464 align = 512 * 1024;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 6e2b1a6caa2d..a30adec5beaa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -608,6 +608,7 @@ nouveau_card_init(struct drm_device *dev)
608 spin_lock_init(&dev_priv->channels.lock); 608 spin_lock_init(&dev_priv->channels.lock);
609 spin_lock_init(&dev_priv->tile.lock); 609 spin_lock_init(&dev_priv->tile.lock);
610 spin_lock_init(&dev_priv->context_switch_lock); 610 spin_lock_init(&dev_priv->context_switch_lock);
611 spin_lock_init(&dev_priv->vm_lock);
611 612
612 /* Make the CRTCs and I2C buses accessible */ 613 /* Make the CRTCs and I2C buses accessible */
613 ret = engine->display.early_init(dev); 614 ret = engine->display.early_init(dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index a6f8aa651fc6..4f95a1e5822e 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -404,23 +404,25 @@ void
404nv50_instmem_flush(struct drm_device *dev) 404nv50_instmem_flush(struct drm_device *dev)
405{ 405{
406 struct drm_nouveau_private *dev_priv = dev->dev_private; 406 struct drm_nouveau_private *dev_priv = dev->dev_private;
407 unsigned long flags;
407 408
408 spin_lock(&dev_priv->ramin_lock); 409 spin_lock_irqsave(&dev_priv->vm_lock, flags);
409 nv_wr32(dev, 0x00330c, 0x00000001); 410 nv_wr32(dev, 0x00330c, 0x00000001);
410 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) 411 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
411 NV_ERROR(dev, "PRAMIN flush timeout\n"); 412 NV_ERROR(dev, "PRAMIN flush timeout\n");
412 spin_unlock(&dev_priv->ramin_lock); 413 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
413} 414}
414 415
415void 416void
416nv84_instmem_flush(struct drm_device *dev) 417nv84_instmem_flush(struct drm_device *dev)
417{ 418{
418 struct drm_nouveau_private *dev_priv = dev->dev_private; 419 struct drm_nouveau_private *dev_priv = dev->dev_private;
420 unsigned long flags;
419 421
420 spin_lock(&dev_priv->ramin_lock); 422 spin_lock_irqsave(&dev_priv->vm_lock, flags);
421 nv_wr32(dev, 0x070000, 0x00000001); 423 nv_wr32(dev, 0x070000, 0x00000001);
422 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) 424 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
423 NV_ERROR(dev, "PRAMIN flush timeout\n"); 425 NV_ERROR(dev, "PRAMIN flush timeout\n");
424 spin_unlock(&dev_priv->ramin_lock); 426 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
425} 427}
426 428
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 4fd3432b5b8d..6c2694490741 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -174,10 +174,11 @@ void
174nv50_vm_flush_engine(struct drm_device *dev, int engine) 174nv50_vm_flush_engine(struct drm_device *dev, int engine)
175{ 175{
176 struct drm_nouveau_private *dev_priv = dev->dev_private; 176 struct drm_nouveau_private *dev_priv = dev->dev_private;
177 unsigned long flags;
177 178
178 spin_lock(&dev_priv->ramin_lock); 179 spin_lock_irqsave(&dev_priv->vm_lock, flags);
179 nv_wr32(dev, 0x100c80, (engine << 16) | 1); 180 nv_wr32(dev, 0x100c80, (engine << 16) | 1);
180 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) 181 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
181 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); 182 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
182 spin_unlock(&dev_priv->ramin_lock); 183 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
183} 184}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index a0a2a0277f73..a179e6c55afb 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -104,11 +104,12 @@ nvc0_vm_flush(struct nouveau_vm *vm)
104 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 104 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
105 struct drm_device *dev = vm->dev; 105 struct drm_device *dev = vm->dev;
106 struct nouveau_vm_pgd *vpgd; 106 struct nouveau_vm_pgd *vpgd;
107 unsigned long flags;
107 u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5; 108 u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5;
108 109
109 pinstmem->flush(vm->dev); 110 pinstmem->flush(vm->dev);
110 111
111 spin_lock(&dev_priv->ramin_lock); 112 spin_lock_irqsave(&dev_priv->vm_lock, flags);
112 list_for_each_entry(vpgd, &vm->pgd_list, head) { 113 list_for_each_entry(vpgd, &vm->pgd_list, head) {
113 /* looks like maybe a "free flush slots" counter, the 114 /* looks like maybe a "free flush slots" counter, the
114 * faster you write to 0x100cbc to more it decreases 115 * faster you write to 0x100cbc to more it decreases
@@ -125,5 +126,5 @@ nvc0_vm_flush(struct nouveau_vm *vm)
125 nv_rd32(dev, 0x100c80), engine); 126 nv_rd32(dev, 0x100c80), engine);
126 } 127 }
127 } 128 }
128 spin_unlock(&dev_priv->ramin_lock); 129 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
129} 130}
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index d71d375149f8..7bd745689097 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -135,7 +135,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
135 case ATOM_IIO_MOVE_INDEX: 135 case ATOM_IIO_MOVE_INDEX:
136 temp &= 136 temp &=
137 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 137 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
138 CU8(base + 2)); 138 CU8(base + 3));
139 temp |= 139 temp |=
140 ((index >> CU8(base + 2)) & 140 ((index >> CU8(base + 2)) &
141 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + 141 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
@@ -145,7 +145,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
145 case ATOM_IIO_MOVE_DATA: 145 case ATOM_IIO_MOVE_DATA:
146 temp &= 146 temp &=
147 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 147 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
148 CU8(base + 2)); 148 CU8(base + 3));
149 temp |= 149 temp |=
150 ((data >> CU8(base + 2)) & 150 ((data >> CU8(base + 2)) &
151 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + 151 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
@@ -155,7 +155,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
155 case ATOM_IIO_MOVE_ATTR: 155 case ATOM_IIO_MOVE_ATTR:
156 temp &= 156 temp &=
157 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 157 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
158 CU8(base + 2)); 158 CU8(base + 3));
159 temp |= 159 temp |=
160 ((ctx-> 160 ((ctx->
161 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - 161 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9d516a8c4dfa..529a3a704731 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -532,10 +532,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
532 else 532 else
533 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 533 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
534 534
535 if ((rdev->family == CHIP_R600) || 535 if (rdev->family < CHIP_RV770)
536 (rdev->family == CHIP_RV610) ||
537 (rdev->family == CHIP_RV630) ||
538 (rdev->family == CHIP_RV670))
539 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 536 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
540 } else { 537 } else {
541 pll->flags |= RADEON_PLL_LEGACY; 538 pll->flags |= RADEON_PLL_LEGACY;
@@ -565,7 +562,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
565 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 562 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
566 if (ss_enabled) { 563 if (ss_enabled) {
567 if (ss->refdiv) { 564 if (ss->refdiv) {
568 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
569 pll->flags |= RADEON_PLL_USE_REF_DIV; 565 pll->flags |= RADEON_PLL_USE_REF_DIV;
570 pll->reference_div = ss->refdiv; 566 pll->reference_div = ss->refdiv;
571 if (ASIC_IS_AVIVO(rdev)) 567 if (ASIC_IS_AVIVO(rdev))
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 3453910ee0f3..43fd01674489 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -353,7 +353,7 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
353 struct drm_display_mode *mode, 353 struct drm_display_mode *mode,
354 struct drm_display_mode *other_mode) 354 struct drm_display_mode *other_mode)
355{ 355{
356 u32 tmp = 0; 356 u32 tmp;
357 /* 357 /*
358 * Line Buffer Setup 358 * Line Buffer Setup
359 * There are 3 line buffers, each one shared by 2 display controllers. 359 * There are 3 line buffers, each one shared by 2 display controllers.
@@ -363,64 +363,63 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
363 * first display controller 363 * first display controller
364 * 0 - first half of lb (3840 * 2) 364 * 0 - first half of lb (3840 * 2)
365 * 1 - first 3/4 of lb (5760 * 2) 365 * 1 - first 3/4 of lb (5760 * 2)
366 * 2 - whole lb (7680 * 2) 366 * 2 - whole lb (7680 * 2), other crtc must be disabled
367 * 3 - first 1/4 of lb (1920 * 2) 367 * 3 - first 1/4 of lb (1920 * 2)
368 * second display controller 368 * second display controller
369 * 4 - second half of lb (3840 * 2) 369 * 4 - second half of lb (3840 * 2)
370 * 5 - second 3/4 of lb (5760 * 2) 370 * 5 - second 3/4 of lb (5760 * 2)
371 * 6 - whole lb (7680 * 2) 371 * 6 - whole lb (7680 * 2), other crtc must be disabled
372 * 7 - last 1/4 of lb (1920 * 2) 372 * 7 - last 1/4 of lb (1920 * 2)
373 */ 373 */
374 if (mode && other_mode) { 374 /* this can get tricky if we have two large displays on a paired group
375 if (mode->hdisplay > other_mode->hdisplay) { 375 * of crtcs. Ideally for multiple large displays we'd assign them to
376 if (mode->hdisplay > 2560) 376 * non-linked crtcs for maximum line buffer allocation.
377 tmp = 1; /* 3/4 */ 377 */
378 else 378 if (radeon_crtc->base.enabled && mode) {
379 tmp = 0; /* 1/2 */ 379 if (other_mode)
380 } else if (other_mode->hdisplay > mode->hdisplay) {
381 if (other_mode->hdisplay > 2560)
382 tmp = 3; /* 1/4 */
383 else
384 tmp = 0; /* 1/2 */
385 } else
386 tmp = 0; /* 1/2 */ 380 tmp = 0; /* 1/2 */
387 } else if (mode) 381 else
388 tmp = 2; /* whole */ 382 tmp = 2; /* whole */
389 else if (other_mode) 383 } else
390 tmp = 3; /* 1/4 */ 384 tmp = 0;
391 385
392 /* second controller of the pair uses second half of the lb */ 386 /* second controller of the pair uses second half of the lb */
393 if (radeon_crtc->crtc_id % 2) 387 if (radeon_crtc->crtc_id % 2)
394 tmp += 4; 388 tmp += 4;
395 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); 389 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
396 390
397 switch (tmp) { 391 if (radeon_crtc->base.enabled && mode) {
398 case 0: 392 switch (tmp) {
399 case 4: 393 case 0:
400 default: 394 case 4:
401 if (ASIC_IS_DCE5(rdev)) 395 default:
402 return 4096 * 2; 396 if (ASIC_IS_DCE5(rdev))
403 else 397 return 4096 * 2;
404 return 3840 * 2; 398 else
405 case 1: 399 return 3840 * 2;
406 case 5: 400 case 1:
407 if (ASIC_IS_DCE5(rdev)) 401 case 5:
408 return 6144 * 2; 402 if (ASIC_IS_DCE5(rdev))
409 else 403 return 6144 * 2;
410 return 5760 * 2; 404 else
411 case 2: 405 return 5760 * 2;
412 case 6: 406 case 2:
413 if (ASIC_IS_DCE5(rdev)) 407 case 6:
414 return 8192 * 2; 408 if (ASIC_IS_DCE5(rdev))
415 else 409 return 8192 * 2;
416 return 7680 * 2; 410 else
417 case 3: 411 return 7680 * 2;
418 case 7: 412 case 3:
419 if (ASIC_IS_DCE5(rdev)) 413 case 7:
420 return 2048 * 2; 414 if (ASIC_IS_DCE5(rdev))
421 else 415 return 2048 * 2;
422 return 1920 * 2; 416 else
417 return 1920 * 2;
418 }
423 } 419 }
420
421 /* controller not enabled, so no lb used */
422 return 0;
424} 423}
425 424
426static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev) 425static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2ef6d5135064..5f45fa12bb8b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1199,7 +1199,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1199 if (router->ddc_valid || router->cd_valid) { 1199 if (router->ddc_valid || router->cd_valid) {
1200 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); 1200 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
1201 if (!radeon_connector->router_bus) 1201 if (!radeon_connector->router_bus)
1202 goto failed; 1202 DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
1203 } 1203 }
1204 switch (connector_type) { 1204 switch (connector_type) {
1205 case DRM_MODE_CONNECTOR_VGA: 1205 case DRM_MODE_CONNECTOR_VGA:
@@ -1208,7 +1208,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1208 if (i2c_bus->valid) { 1208 if (i2c_bus->valid) {
1209 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1209 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1210 if (!radeon_connector->ddc_bus) 1210 if (!radeon_connector->ddc_bus)
1211 goto failed; 1211 DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1212 } 1212 }
1213 radeon_connector->dac_load_detect = true; 1213 radeon_connector->dac_load_detect = true;
1214 drm_connector_attach_property(&radeon_connector->base, 1214 drm_connector_attach_property(&radeon_connector->base,
@@ -1226,7 +1226,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1226 if (i2c_bus->valid) { 1226 if (i2c_bus->valid) {
1227 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1227 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1228 if (!radeon_connector->ddc_bus) 1228 if (!radeon_connector->ddc_bus)
1229 goto failed; 1229 DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1230 } 1230 }
1231 radeon_connector->dac_load_detect = true; 1231 radeon_connector->dac_load_detect = true;
1232 drm_connector_attach_property(&radeon_connector->base, 1232 drm_connector_attach_property(&radeon_connector->base,
@@ -1249,7 +1249,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1249 if (i2c_bus->valid) { 1249 if (i2c_bus->valid) {
1250 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1250 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1251 if (!radeon_connector->ddc_bus) 1251 if (!radeon_connector->ddc_bus)
1252 goto failed; 1252 DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1253 } 1253 }
1254 subpixel_order = SubPixelHorizontalRGB; 1254 subpixel_order = SubPixelHorizontalRGB;
1255 drm_connector_attach_property(&radeon_connector->base, 1255 drm_connector_attach_property(&radeon_connector->base,
@@ -1290,7 +1290,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1290 if (i2c_bus->valid) { 1290 if (i2c_bus->valid) {
1291 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1291 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1292 if (!radeon_connector->ddc_bus) 1292 if (!radeon_connector->ddc_bus)
1293 goto failed; 1293 DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1294 } 1294 }
1295 drm_connector_attach_property(&radeon_connector->base, 1295 drm_connector_attach_property(&radeon_connector->base,
1296 rdev->mode_info.coherent_mode_property, 1296 rdev->mode_info.coherent_mode_property,
@@ -1329,10 +1329,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1329 else 1329 else
1330 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); 1330 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
1331 if (!radeon_dig_connector->dp_i2c_bus) 1331 if (!radeon_dig_connector->dp_i2c_bus)
1332 goto failed; 1332 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
1333 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1333 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1334 if (!radeon_connector->ddc_bus) 1334 if (!radeon_connector->ddc_bus)
1335 goto failed; 1335 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1336 } 1336 }
1337 subpixel_order = SubPixelHorizontalRGB; 1337 subpixel_order = SubPixelHorizontalRGB;
1338 drm_connector_attach_property(&radeon_connector->base, 1338 drm_connector_attach_property(&radeon_connector->base,
@@ -1381,7 +1381,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1381 if (i2c_bus->valid) { 1381 if (i2c_bus->valid) {
1382 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1382 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1383 if (!radeon_connector->ddc_bus) 1383 if (!radeon_connector->ddc_bus)
1384 goto failed; 1384 DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1385 } 1385 }
1386 drm_connector_attach_property(&radeon_connector->base, 1386 drm_connector_attach_property(&radeon_connector->base,
1387 dev->mode_config.scaling_mode_property, 1387 dev->mode_config.scaling_mode_property,
@@ -1457,7 +1457,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1457 if (i2c_bus->valid) { 1457 if (i2c_bus->valid) {
1458 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1458 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1459 if (!radeon_connector->ddc_bus) 1459 if (!radeon_connector->ddc_bus)
1460 goto failed; 1460 DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1461 } 1461 }
1462 radeon_connector->dac_load_detect = true; 1462 radeon_connector->dac_load_detect = true;
1463 drm_connector_attach_property(&radeon_connector->base, 1463 drm_connector_attach_property(&radeon_connector->base,
@@ -1475,7 +1475,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1475 if (i2c_bus->valid) { 1475 if (i2c_bus->valid) {
1476 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1476 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1477 if (!radeon_connector->ddc_bus) 1477 if (!radeon_connector->ddc_bus)
1478 goto failed; 1478 DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1479 } 1479 }
1480 radeon_connector->dac_load_detect = true; 1480 radeon_connector->dac_load_detect = true;
1481 drm_connector_attach_property(&radeon_connector->base, 1481 drm_connector_attach_property(&radeon_connector->base,
@@ -1493,7 +1493,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1493 if (i2c_bus->valid) { 1493 if (i2c_bus->valid) {
1494 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1494 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1495 if (!radeon_connector->ddc_bus) 1495 if (!radeon_connector->ddc_bus)
1496 goto failed; 1496 DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1497 } 1497 }
1498 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1498 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1499 radeon_connector->dac_load_detect = true; 1499 radeon_connector->dac_load_detect = true;
@@ -1538,7 +1538,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1538 if (i2c_bus->valid) { 1538 if (i2c_bus->valid) {
1539 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1539 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1540 if (!radeon_connector->ddc_bus) 1540 if (!radeon_connector->ddc_bus)
1541 goto failed; 1541 DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1542 } 1542 }
1543 drm_connector_attach_property(&radeon_connector->base, 1543 drm_connector_attach_property(&radeon_connector->base,
1544 dev->mode_config.scaling_mode_property, 1544 dev->mode_config.scaling_mode_property,
@@ -1567,9 +1567,4 @@ radeon_add_legacy_connector(struct drm_device *dev,
1567 radeon_legacy_backlight_init(radeon_encoder, connector); 1567 radeon_legacy_backlight_init(radeon_encoder, connector);
1568 } 1568 }
1569 } 1569 }
1570 return;
1571
1572failed:
1573 drm_connector_cleanup(connector);
1574 kfree(connector);
1575} 1570}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index ccbabf734a61..983cbac75af0 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -1096,6 +1096,9 @@ void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
1096 if (!radeon_connector->router.ddc_valid) 1096 if (!radeon_connector->router.ddc_valid)
1097 return; 1097 return;
1098 1098
1099 if (!radeon_connector->router_bus)
1100 return;
1101
1099 radeon_i2c_get_byte(radeon_connector->router_bus, 1102 radeon_i2c_get_byte(radeon_connector->router_bus,
1100 radeon_connector->router.i2c_addr, 1103 radeon_connector->router.i2c_addr,
1101 0x3, &val); 1104 0x3, &val);
@@ -1121,6 +1124,9 @@ void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
1121 if (!radeon_connector->router.cd_valid) 1124 if (!radeon_connector->router.cd_valid)
1122 return; 1125 return;
1123 1126
1127 if (!radeon_connector->router_bus)
1128 return;
1129
1124 radeon_i2c_get_byte(radeon_connector->router_bus, 1130 radeon_i2c_get_byte(radeon_connector->router_bus,
1125 radeon_connector->router.i2c_addr, 1131 radeon_connector->router.i2c_addr,
1126 0x3, &val); 1132 0x3, &val);
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index c4742fc15529..c9691115f2d2 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -300,7 +300,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
300 300
301 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 301 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
302 retval = remap_pfn_range(vma, vma->vm_start, 302 retval = remap_pfn_range(vma, vma->vm_start,
303 PFN_DOWN(virt_to_phys(mem->vaddr)), 303 mem->dma_handle >> PAGE_SHIFT,
304 size, vma->vm_page_prot); 304 size, vma->vm_page_prot);
305 if (retval) { 305 if (retval) {
306 dev_err(q->dev, "mmap: remap failed with error %d. ", retval); 306 dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
index e3de0b8625cd..7581518ecfa2 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/bna/bfa_ioc.c
@@ -38,6 +38,8 @@
38#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 38#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
39#define bfa_ioc_notify_fail(__ioc) \ 39#define bfa_ioc_notify_fail(__ioc) \
40 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) 40 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
41#define bfa_ioc_sync_start(__ioc) \
42 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
41#define bfa_ioc_sync_join(__ioc) \ 43#define bfa_ioc_sync_join(__ioc) \
42 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) 44 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
43#define bfa_ioc_sync_leave(__ioc) \ 45#define bfa_ioc_sync_leave(__ioc) \
@@ -602,7 +604,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
602 switch (event) { 604 switch (event) {
603 case IOCPF_E_SEMLOCKED: 605 case IOCPF_E_SEMLOCKED:
604 if (bfa_ioc_firmware_lock(ioc)) { 606 if (bfa_ioc_firmware_lock(ioc)) {
605 if (bfa_ioc_sync_complete(ioc)) { 607 if (bfa_ioc_sync_start(ioc)) {
606 iocpf->retry_count = 0; 608 iocpf->retry_count = 0;
607 bfa_ioc_sync_join(ioc); 609 bfa_ioc_sync_join(ioc);
608 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 610 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
@@ -1314,7 +1316,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1314 * execution context (driver/bios) must match. 1316 * execution context (driver/bios) must match.
1315 */ 1317 */
1316static bool 1318static bool
1317bfa_ioc_fwver_valid(struct bfa_ioc *ioc) 1319bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1318{ 1320{
1319 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr; 1321 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
1320 1322
@@ -1325,7 +1327,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
1325 if (fwhdr.signature != drv_fwhdr->signature) 1327 if (fwhdr.signature != drv_fwhdr->signature)
1326 return false; 1328 return false;
1327 1329
1328 if (fwhdr.exec != drv_fwhdr->exec) 1330 if (swab32(fwhdr.param) != boot_env)
1329 return false; 1331 return false;
1330 1332
1331 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); 1333 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
@@ -1352,9 +1354,12 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1352{ 1354{
1353 enum bfi_ioc_state ioc_fwstate; 1355 enum bfi_ioc_state ioc_fwstate;
1354 bool fwvalid; 1356 bool fwvalid;
1357 u32 boot_env;
1355 1358
1356 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); 1359 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1357 1360
1361 boot_env = BFI_BOOT_LOADER_OS;
1362
1358 if (force) 1363 if (force)
1359 ioc_fwstate = BFI_IOC_UNINIT; 1364 ioc_fwstate = BFI_IOC_UNINIT;
1360 1365
@@ -1362,10 +1367,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1362 * check if firmware is valid 1367 * check if firmware is valid
1363 */ 1368 */
1364 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? 1369 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1365 false : bfa_ioc_fwver_valid(ioc); 1370 false : bfa_ioc_fwver_valid(ioc, boot_env);
1366 1371
1367 if (!fwvalid) { 1372 if (!fwvalid) {
1368 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); 1373 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
1369 return; 1374 return;
1370 } 1375 }
1371 1376
@@ -1396,7 +1401,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1396 /** 1401 /**
1397 * Initialize the h/w for any other states. 1402 * Initialize the h/w for any other states.
1398 */ 1403 */
1399 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); 1404 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
1400} 1405}
1401 1406
1402void 1407void
@@ -1506,7 +1511,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1506 */ 1511 */
1507static void 1512static void
1508bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, 1513bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1509 u32 boot_param) 1514 u32 boot_env)
1510{ 1515{
1511 u32 *fwimg; 1516 u32 *fwimg;
1512 u32 pgnum, pgoff; 1517 u32 pgnum, pgoff;
@@ -1558,10 +1563,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1558 /* 1563 /*
1559 * Set boot type and boot param at the end. 1564 * Set boot type and boot param at the end.
1560 */ 1565 */
1561 writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start) 1566 writel(boot_type, ((ioc->ioc_regs.smem_page_start)
1562 + (BFI_BOOT_TYPE_OFF))); 1567 + (BFI_BOOT_TYPE_OFF)));
1563 writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start) 1568 writel(boot_env, ((ioc->ioc_regs.smem_page_start)
1564 + (BFI_BOOT_PARAM_OFF))); 1569 + (BFI_BOOT_LOADER_OFF)));
1565} 1570}
1566 1571
1567static void 1572static void
@@ -1721,7 +1726,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
1721 * as the entry vector. 1726 * as the entry vector.
1722 */ 1727 */
1723static void 1728static void
1724bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param) 1729bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
1725{ 1730{
1726 void __iomem *rb; 1731 void __iomem *rb;
1727 1732
@@ -1734,7 +1739,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
1734 * Initialize IOC state of all functions on a chip reset. 1739 * Initialize IOC state of all functions on a chip reset.
1735 */ 1740 */
1736 rb = ioc->pcidev.pci_bar_kva; 1741 rb = ioc->pcidev.pci_bar_kva;
1737 if (boot_param == BFI_BOOT_TYPE_MEMTEST) { 1742 if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
1738 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG)); 1743 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1739 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG)); 1744 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1740 } else { 1745 } else {
@@ -1743,7 +1748,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
1743 } 1748 }
1744 1749
1745 bfa_ioc_msgflush(ioc); 1750 bfa_ioc_msgflush(ioc);
1746 bfa_ioc_download_fw(ioc, boot_type, boot_param); 1751 bfa_ioc_download_fw(ioc, boot_type, boot_env);
1747 1752
1748 /** 1753 /**
1749 * Enable interrupts just before starting LPU 1754 * Enable interrupts just before starting LPU
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
index e4974bc24ef6..bd48abee781f 100644
--- a/drivers/net/bna/bfa_ioc.h
+++ b/drivers/net/bna/bfa_ioc.h
@@ -194,6 +194,7 @@ struct bfa_ioc_hwif {
194 bool msix); 194 bool msix);
195 void (*ioc_notify_fail) (struct bfa_ioc *ioc); 195 void (*ioc_notify_fail) (struct bfa_ioc *ioc);
196 void (*ioc_ownership_reset) (struct bfa_ioc *ioc); 196 void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
197 bool (*ioc_sync_start) (struct bfa_ioc *ioc);
197 void (*ioc_sync_join) (struct bfa_ioc *ioc); 198 void (*ioc_sync_join) (struct bfa_ioc *ioc);
198 void (*ioc_sync_leave) (struct bfa_ioc *ioc); 199 void (*ioc_sync_leave) (struct bfa_ioc *ioc);
199 void (*ioc_sync_ack) (struct bfa_ioc *ioc); 200 void (*ioc_sync_ack) (struct bfa_ioc *ioc);
diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
index 469997c4ffd1..87aecdf22cf9 100644
--- a/drivers/net/bna/bfa_ioc_ct.c
+++ b/drivers/net/bna/bfa_ioc_ct.c
@@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); 41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
42static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc); 42static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); 43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
44static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
44static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); 45static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
45static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); 46static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
46static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); 47static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
@@ -63,6 +64,7 @@ bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
63 nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; 64 nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
64 nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail; 65 nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
65 nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; 66 nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
67 nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
66 nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join; 68 nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
67 nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave; 69 nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
68 nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack; 70 nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
@@ -345,6 +347,32 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
345/** 347/**
346 * Synchronized IOC failure processing routines 348 * Synchronized IOC failure processing routines
347 */ 349 */
350static bool
351bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
352{
353 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
354 u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
355
356 /*
357 * Driver load time. If the sync required bit for this PCI fn
358 * is set, it is due to an unclean exit by the driver for this
359 * PCI fn in the previous incarnation. Whoever comes here first
360 * should clean it up, no matter which PCI fn.
361 */
362
363 if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
364 writel(0, ioc->ioc_regs.ioc_fail_sync);
365 writel(1, ioc->ioc_regs.ioc_usage_reg);
366 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
367 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
368 return true;
369 }
370
371 return bfa_ioc_ct_sync_complete(ioc);
372}
373/**
374 * Synchronized IOC failure processing routines
375 */
348static void 376static void
349bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) 377bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
350{ 378{
diff --git a/drivers/net/bna/bfi.h b/drivers/net/bna/bfi.h
index a97396811050..6050379526f7 100644
--- a/drivers/net/bna/bfi.h
+++ b/drivers/net/bna/bfi.h
@@ -184,12 +184,14 @@ enum bfi_mclass {
184#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */ 184#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
185 185
186#define BFI_BOOT_TYPE_OFF 8 186#define BFI_BOOT_TYPE_OFF 8
187#define BFI_BOOT_PARAM_OFF 12 187#define BFI_BOOT_LOADER_OFF 12
188 188
189#define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */ 189#define BFI_BOOT_TYPE_NORMAL 0
190#define BFI_BOOT_TYPE_FLASH 1 190#define BFI_BOOT_TYPE_FLASH 1
191#define BFI_BOOT_TYPE_MEMTEST 2 191#define BFI_BOOT_TYPE_MEMTEST 2
192 192
193#define BFI_BOOT_LOADER_OS 0
194
193#define BFI_BOOT_MEMTEST_RES_ADDR 0x900 195#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
194#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3 196#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
195 197
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index 9f356d5d0f33..8e6ceab9f4d8 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -1837,7 +1837,6 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
1837 /* Initialize the Rx event handlers */ 1837 /* Initialize the Rx event handlers */
1838 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup; 1838 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
1839 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy; 1839 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
1840 rx_cbfn.rcb_destroy_cbfn = NULL;
1841 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup; 1840 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1842 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy; 1841 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1843 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup; 1842 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index f5050155c6b5..89cb977898cb 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -2114,19 +2114,18 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
2114 for (i = 0; i < (data * 2); i++) { 2114 for (i = 0; i < (data * 2); i++) {
2115 if ((i % 2) == 0) 2115 if ((i % 2) == 0)
2116 bnx2x_set_led(&bp->link_params, &bp->link_vars, 2116 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2117 LED_MODE_OPER, SPEED_1000); 2117 LED_MODE_ON, SPEED_1000);
2118 else 2118 else
2119 bnx2x_set_led(&bp->link_params, &bp->link_vars, 2119 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2120 LED_MODE_OFF, 0); 2120 LED_MODE_FRONT_PANEL_OFF, 0);
2121 2121
2122 msleep_interruptible(500); 2122 msleep_interruptible(500);
2123 if (signal_pending(current)) 2123 if (signal_pending(current))
2124 break; 2124 break;
2125 } 2125 }
2126 2126
2127 if (bp->link_vars.link_up) 2127 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2128 bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_OPER, 2128 LED_MODE_OPER, bp->link_vars.line_speed);
2129 bp->link_vars.line_speed);
2130 2129
2131 return 0; 2130 return 0;
2132} 2131}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9bc5de3e04a8..ba715826e2a8 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -176,7 +176,7 @@ static int tlb_initialize(struct bonding *bond)
176 bond_info->tx_hashtbl = new_hashtbl; 176 bond_info->tx_hashtbl = new_hashtbl;
177 177
178 for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) { 178 for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
179 tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1); 179 tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
180 } 180 }
181 181
182 _unlock_tx_hashtbl(bond); 182 _unlock_tx_hashtbl(bond);
@@ -701,7 +701,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
701 */ 701 */
702 rlb_choose_channel(skb, bond); 702 rlb_choose_channel(skb, bond);
703 703
704 /* The ARP relpy packets must be delayed so that 704 /* The ARP reply packets must be delayed so that
705 * they can cancel out the influence of the ARP request. 705 * they can cancel out the influence of the ARP request.
706 */ 706 */
707 bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY; 707 bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY;
@@ -1042,7 +1042,7 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
1042 * 1042 *
1043 * If the permanent hw address of @slave is @bond's hw address, we need to 1043 * If the permanent hw address of @slave is @bond's hw address, we need to
1044 * find a different hw address to give @slave, that isn't in use by any other 1044 * find a different hw address to give @slave, that isn't in use by any other
1045 * slave in the bond. This address must be, of course, one of the premanent 1045 * slave in the bond. This address must be, of course, one of the permanent
1046 * addresses of the other slaves. 1046 * addresses of the other slaves.
1047 * 1047 *
1048 * We go over the slave list, and for each slave there we compare its 1048 * We go over the slave list, and for each slave there we compare its
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 86861f08b24d..8ca7158b2dda 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -75,7 +75,7 @@ struct tlb_client_info {
75 * gave this entry index. 75 * gave this entry index.
76 */ 76 */
77 u32 tx_bytes; /* Each Client accumulates the BytesTx that 77 u32 tx_bytes; /* Each Client accumulates the BytesTx that
78 * were tranmitted to it, and after each 78 * were transmitted to it, and after each
79 * CallBack the LoadHistory is divided 79 * CallBack the LoadHistory is divided
80 * by the balance interval 80 * by the balance interval
81 */ 81 */
@@ -122,7 +122,6 @@ struct tlb_slave_info {
122}; 122};
123 123
124struct alb_bond_info { 124struct alb_bond_info {
125 struct timer_list alb_timer;
126 struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ 125 struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
127 spinlock_t tx_hashtbl_lock; 126 spinlock_t tx_hashtbl_lock;
128 u32 unbalanced_load; 127 u32 unbalanced_load;
@@ -140,7 +139,6 @@ struct alb_bond_info {
140 struct slave *next_rx_slave;/* next slave to be assigned 139 struct slave *next_rx_slave;/* next slave to be assigned
141 * to a new rx client for 140 * to a new rx client for
142 */ 141 */
143 u32 rlb_interval_counter;
144 u8 primary_is_promisc; /* boolean */ 142 u8 primary_is_promisc; /* boolean */
145 u32 rlb_promisc_timeout_counter;/* counts primary 143 u32 rlb_promisc_timeout_counter;/* counts primary
146 * promiscuity time 144 * promiscuity time
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index c0a1bc5b1435..bd1d811c204f 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -260,7 +260,7 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
260 260
261 if (!ofdev->dev.of_match) 261 if (!ofdev->dev.of_match)
262 return -EINVAL; 262 return -EINVAL;
263 data = (struct mpc5xxx_can_data *)of_dev->dev.of_match->data; 263 data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data;
264 264
265 base = of_iomap(np, 0); 265 base = of_iomap(np, 0);
266 if (!base) { 266 if (!base) {
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index ea0dc451da9c..d70fb76edb77 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -173,7 +173,8 @@ static void loopback_setup(struct net_device *dev)
173 | NETIF_F_RXCSUM 173 | NETIF_F_RXCSUM
174 | NETIF_F_HIGHDMA 174 | NETIF_F_HIGHDMA
175 | NETIF_F_LLTX 175 | NETIF_F_LLTX
176 | NETIF_F_NETNS_LOCAL; 176 | NETIF_F_NETNS_LOCAL
177 | NETIF_F_VLAN_CHALLENGED;
177 dev->ethtool_ops = &loopback_ethtool_ops; 178 dev->ethtool_ops = &loopback_ethtool_ops;
178 dev->header_ops = &eth_header_ops; 179 dev->header_ops = &eth_header_ops;
179 dev->netdev_ops = &loopback_ops; 180 dev->netdev_ops = &loopback_ops;
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index aa2813e06d00..1074231f0a0d 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -860,6 +860,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
860 prev_eedata = eedata; 860 prev_eedata = eedata;
861 } 861 }
862 862
863 /* Store MAC Address in perm_addr */
864 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
865
863 dev->base_addr = (unsigned long __force) ioaddr; 866 dev->base_addr = (unsigned long __force) ioaddr;
864 dev->irq = irq; 867 dev->irq = irq;
865 868
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index d7299f1a4940..679dc8519c5b 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -174,7 +174,7 @@
174 174
175#define MAX_NUM_CARDS 4 175#define MAX_NUM_CARDS 4
176 176
177#define MAX_BUFFERS_PER_CMD 32 177#define NETXEN_MAX_FRAGS_PER_TX 14
178#define MAX_TSO_HEADER_DESC 2 178#define MAX_TSO_HEADER_DESC 2
179#define MGMT_CMD_DESC_RESV 4 179#define MGMT_CMD_DESC_RESV 4
180#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ 180#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
@@ -558,7 +558,7 @@ struct netxen_recv_crb {
558 */ 558 */
559struct netxen_cmd_buffer { 559struct netxen_cmd_buffer {
560 struct sk_buff *skb; 560 struct sk_buff *skb;
561 struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1]; 561 struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1];
562 u32 frag_count; 562 u32 frag_count;
563}; 563};
564 564
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 83348dc4b184..e8a4b6655999 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1844,6 +1844,8 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1844 struct cmd_desc_type0 *hwdesc, *first_desc; 1844 struct cmd_desc_type0 *hwdesc, *first_desc;
1845 struct pci_dev *pdev; 1845 struct pci_dev *pdev;
1846 int i, k; 1846 int i, k;
1847 int delta = 0;
1848 struct skb_frag_struct *frag;
1847 1849
1848 u32 producer; 1850 u32 producer;
1849 int frag_count, no_of_desc; 1851 int frag_count, no_of_desc;
@@ -1851,6 +1853,21 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1851 1853
1852 frag_count = skb_shinfo(skb)->nr_frags + 1; 1854 frag_count = skb_shinfo(skb)->nr_frags + 1;
1853 1855
1856 /* 14 frags supported for normal packet and
1857 * 32 frags supported for TSO packet
1858 */
1859 if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) {
1860
1861 for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
1862 frag = &skb_shinfo(skb)->frags[i];
1863 delta += frag->size;
1864 }
1865
1866 if (!__pskb_pull_tail(skb, delta))
1867 goto drop_packet;
1868
1869 frag_count = 1 + skb_shinfo(skb)->nr_frags;
1870 }
1854 /* 4 fragments per cmd des */ 1871 /* 4 fragments per cmd des */
1855 no_of_desc = (frag_count + 3) >> 2; 1872 no_of_desc = (frag_count + 3) >> 2;
1856 1873
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index dc44564ef6f9..b0dead00b2d1 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -99,6 +99,7 @@
99#define TX_UDPV6_PKT 0x0c 99#define TX_UDPV6_PKT 0x0c
100 100
101/* Tx defines */ 101/* Tx defines */
102#define QLCNIC_MAX_FRAGS_PER_TX 14
102#define MAX_TSO_HEADER_DESC 2 103#define MAX_TSO_HEADER_DESC 2
103#define MGMT_CMD_DESC_RESV 4 104#define MGMT_CMD_DESC_RESV 4
104#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ 105#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index cd88c7e1bfa9..cb1a1ef36c0a 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -2099,6 +2099,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2099 struct cmd_desc_type0 *hwdesc, *first_desc; 2099 struct cmd_desc_type0 *hwdesc, *first_desc;
2100 struct pci_dev *pdev; 2100 struct pci_dev *pdev;
2101 struct ethhdr *phdr; 2101 struct ethhdr *phdr;
2102 int delta = 0;
2102 int i, k; 2103 int i, k;
2103 2104
2104 u32 producer; 2105 u32 producer;
@@ -2118,6 +2119,19 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2118 } 2119 }
2119 2120
2120 frag_count = skb_shinfo(skb)->nr_frags + 1; 2121 frag_count = skb_shinfo(skb)->nr_frags + 1;
2122 /* 14 frags supported for normal packet and
2123 * 32 frags supported for TSO packet
2124 */
2125 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
2126
2127 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
2128 delta += skb_shinfo(skb)->frags[i].size;
2129
2130 if (!__pskb_pull_tail(skb, delta))
2131 goto drop_packet;
2132
2133 frag_count = 1 + skb_shinfo(skb)->nr_frags;
2134 }
2121 2135
2122 /* 4 fragments per cmd des */ 2136 /* 4 fragments per cmd des */
2123 no_of_desc = (frag_count + 3) >> 2; 2137 no_of_desc = (frag_count + 3) >> 2;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index d890679e4c4d..a3c2aab53de8 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -328,7 +328,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
328 * processing to finish, then directly poll (and ack ) the eventq. 328 * processing to finish, then directly poll (and ack ) the eventq.
329 * Finally reenable NAPI and interrupts. 329 * Finally reenable NAPI and interrupts.
330 * 330 *
331 * Since we are touching interrupts the caller should hold the suspend lock 331 * This is for use only during a loopback self-test. It must not
332 * deliver any packets up the stack as this can result in deadlock.
332 */ 333 */
333void efx_process_channel_now(struct efx_channel *channel) 334void efx_process_channel_now(struct efx_channel *channel)
334{ 335{
@@ -336,6 +337,7 @@ void efx_process_channel_now(struct efx_channel *channel)
336 337
337 BUG_ON(channel->channel >= efx->n_channels); 338 BUG_ON(channel->channel >= efx->n_channels);
338 BUG_ON(!channel->enabled); 339 BUG_ON(!channel->enabled);
340 BUG_ON(!efx->loopback_selftest);
339 341
340 /* Disable interrupts and wait for ISRs to complete */ 342 /* Disable interrupts and wait for ISRs to complete */
341 efx_nic_disable_interrupts(efx); 343 efx_nic_disable_interrupts(efx);
@@ -1436,7 +1438,7 @@ static void efx_start_all(struct efx_nic *efx)
1436 * restart the transmit interface early so the watchdog timer stops */ 1438 * restart the transmit interface early so the watchdog timer stops */
1437 efx_start_port(efx); 1439 efx_start_port(efx);
1438 1440
1439 if (efx_dev_registered(efx)) 1441 if (efx_dev_registered(efx) && !efx->port_inhibited)
1440 netif_tx_wake_all_queues(efx->net_dev); 1442 netif_tx_wake_all_queues(efx->net_dev);
1441 1443
1442 efx_for_each_channel(channel, efx) 1444 efx_for_each_channel(channel, efx)
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index d9d8c2ef1074..cc978803d484 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -152,6 +152,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
152 152
153 spin_lock_irqsave(&efx->biu_lock, flags); 153 spin_lock_irqsave(&efx->biu_lock, flags);
154 value->u32[0] = _efx_readd(efx, reg + 0); 154 value->u32[0] = _efx_readd(efx, reg + 0);
155 rmb();
155 value->u32[1] = _efx_readd(efx, reg + 4); 156 value->u32[1] = _efx_readd(efx, reg + 4);
156 value->u32[2] = _efx_readd(efx, reg + 8); 157 value->u32[2] = _efx_readd(efx, reg + 8);
157 value->u32[3] = _efx_readd(efx, reg + 12); 158 value->u32[3] = _efx_readd(efx, reg + 12);
@@ -174,6 +175,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
174 value->u64[0] = (__force __le64)__raw_readq(membase + addr); 175 value->u64[0] = (__force __le64)__raw_readq(membase + addr);
175#else 176#else
176 value->u32[0] = (__force __le32)__raw_readl(membase + addr); 177 value->u32[0] = (__force __le32)__raw_readl(membase + addr);
178 rmb();
177 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); 179 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
178#endif 180#endif
179 spin_unlock_irqrestore(&efx->biu_lock, flags); 181 spin_unlock_irqrestore(&efx->biu_lock, flags);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 9ffa9a6b55a0..191a311da2dc 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -330,7 +330,6 @@ enum efx_rx_alloc_method {
330 * @eventq_mask: Event queue pointer mask 330 * @eventq_mask: Event queue pointer mask
331 * @eventq_read_ptr: Event queue read pointer 331 * @eventq_read_ptr: Event queue read pointer
332 * @last_eventq_read_ptr: Last event queue read pointer value. 332 * @last_eventq_read_ptr: Last event queue read pointer value.
333 * @magic_count: Event queue test event count
334 * @irq_count: Number of IRQs since last adaptive moderation decision 333 * @irq_count: Number of IRQs since last adaptive moderation decision
335 * @irq_mod_score: IRQ moderation score 334 * @irq_mod_score: IRQ moderation score
336 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors 335 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -360,7 +359,6 @@ struct efx_channel {
360 unsigned int eventq_mask; 359 unsigned int eventq_mask;
361 unsigned int eventq_read_ptr; 360 unsigned int eventq_read_ptr;
362 unsigned int last_eventq_read_ptr; 361 unsigned int last_eventq_read_ptr;
363 unsigned int magic_count;
364 362
365 unsigned int irq_count; 363 unsigned int irq_count;
366 unsigned int irq_mod_score; 364 unsigned int irq_mod_score;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index e8396614daf3..10f1cb79c147 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -84,7 +84,8 @@ static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
84static inline efx_qword_t *efx_event(struct efx_channel *channel, 84static inline efx_qword_t *efx_event(struct efx_channel *channel,
85 unsigned int index) 85 unsigned int index)
86{ 86{
87 return ((efx_qword_t *) (channel->eventq.addr)) + index; 87 return ((efx_qword_t *) (channel->eventq.addr)) +
88 (index & channel->eventq_mask);
88} 89}
89 90
90/* See if an event is present 91/* See if an event is present
@@ -673,7 +674,8 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
673 efx_dword_t reg; 674 efx_dword_t reg;
674 struct efx_nic *efx = channel->efx; 675 struct efx_nic *efx = channel->efx;
675 676
676 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr); 677 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
678 channel->eventq_read_ptr & channel->eventq_mask);
677 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base, 679 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
678 channel->channel); 680 channel->channel);
679} 681}
@@ -908,7 +910,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
908 910
909 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 911 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
910 if (code == EFX_CHANNEL_MAGIC_TEST(channel)) 912 if (code == EFX_CHANNEL_MAGIC_TEST(channel))
911 ++channel->magic_count; 913 ; /* ignore */
912 else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) 914 else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
913 /* The queue must be empty, so we won't receive any rx 915 /* The queue must be empty, so we won't receive any rx
914 * events, so efx_process_channel() won't refill the 916 * events, so efx_process_channel() won't refill the
@@ -1015,8 +1017,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1015 /* Clear this event by marking it all ones */ 1017 /* Clear this event by marking it all ones */
1016 EFX_SET_QWORD(*p_event); 1018 EFX_SET_QWORD(*p_event);
1017 1019
1018 /* Increment read pointer */ 1020 ++read_ptr;
1019 read_ptr = (read_ptr + 1) & channel->eventq_mask;
1020 1021
1021 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1022 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1022 1023
@@ -1060,6 +1061,13 @@ out:
1060 return spent; 1061 return spent;
1061} 1062}
1062 1063
1064/* Check whether an event is present in the eventq at the current
1065 * read pointer. Only useful for self-test.
1066 */
1067bool efx_nic_event_present(struct efx_channel *channel)
1068{
1069 return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
1070}
1063 1071
1064/* Allocate buffer table entries for event queue */ 1072/* Allocate buffer table entries for event queue */
1065int efx_nic_probe_eventq(struct efx_channel *channel) 1073int efx_nic_probe_eventq(struct efx_channel *channel)
@@ -1165,7 +1173,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1165 struct efx_tx_queue *tx_queue; 1173 struct efx_tx_queue *tx_queue;
1166 struct efx_rx_queue *rx_queue; 1174 struct efx_rx_queue *rx_queue;
1167 unsigned int read_ptr = channel->eventq_read_ptr; 1175 unsigned int read_ptr = channel->eventq_read_ptr;
1168 unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask; 1176 unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
1169 1177
1170 do { 1178 do {
1171 efx_qword_t *event = efx_event(channel, read_ptr); 1179 efx_qword_t *event = efx_event(channel, read_ptr);
@@ -1205,7 +1213,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1205 * it's ok to throw away every non-flush event */ 1213 * it's ok to throw away every non-flush event */
1206 EFX_SET_QWORD(*event); 1214 EFX_SET_QWORD(*event);
1207 1215
1208 read_ptr = (read_ptr + 1) & channel->eventq_mask; 1216 ++read_ptr;
1209 } while (read_ptr != end_ptr); 1217 } while (read_ptr != end_ptr);
1210 1218
1211 channel->eventq_read_ptr = read_ptr; 1219 channel->eventq_read_ptr = read_ptr;
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index d9de1b647d41..a42db6e35be3 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -184,6 +184,7 @@ extern void efx_nic_fini_eventq(struct efx_channel *channel);
184extern void efx_nic_remove_eventq(struct efx_channel *channel); 184extern void efx_nic_remove_eventq(struct efx_channel *channel);
185extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); 185extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
186extern void efx_nic_eventq_read_ack(struct efx_channel *channel); 186extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
187extern bool efx_nic_event_present(struct efx_channel *channel);
187 188
188/* MAC/PHY */ 189/* MAC/PHY */
189extern void falcon_drain_tx_fifo(struct efx_nic *efx); 190extern void falcon_drain_tx_fifo(struct efx_nic *efx);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index a0f49b348d62..50ad3bcaf68a 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -131,8 +131,6 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
131static int efx_test_interrupts(struct efx_nic *efx, 131static int efx_test_interrupts(struct efx_nic *efx,
132 struct efx_self_tests *tests) 132 struct efx_self_tests *tests)
133{ 133{
134 struct efx_channel *channel;
135
136 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); 134 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
137 tests->interrupt = -1; 135 tests->interrupt = -1;
138 136
@@ -140,15 +138,6 @@ static int efx_test_interrupts(struct efx_nic *efx,
140 efx->last_irq_cpu = -1; 138 efx->last_irq_cpu = -1;
141 smp_wmb(); 139 smp_wmb();
142 140
143 /* ACK each interrupting event queue. Receiving an interrupt due to
144 * traffic before a test event is raised is considered a pass */
145 efx_for_each_channel(channel, efx) {
146 if (channel->work_pending)
147 efx_process_channel_now(channel);
148 if (efx->last_irq_cpu >= 0)
149 goto success;
150 }
151
152 efx_nic_generate_interrupt(efx); 141 efx_nic_generate_interrupt(efx);
153 142
154 /* Wait for arrival of test interrupt. */ 143 /* Wait for arrival of test interrupt. */
@@ -173,13 +162,13 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
173 struct efx_self_tests *tests) 162 struct efx_self_tests *tests)
174{ 163{
175 struct efx_nic *efx = channel->efx; 164 struct efx_nic *efx = channel->efx;
176 unsigned int magic_count, count; 165 unsigned int read_ptr, count;
177 166
178 tests->eventq_dma[channel->channel] = -1; 167 tests->eventq_dma[channel->channel] = -1;
179 tests->eventq_int[channel->channel] = -1; 168 tests->eventq_int[channel->channel] = -1;
180 tests->eventq_poll[channel->channel] = -1; 169 tests->eventq_poll[channel->channel] = -1;
181 170
182 magic_count = channel->magic_count; 171 read_ptr = channel->eventq_read_ptr;
183 channel->efx->last_irq_cpu = -1; 172 channel->efx->last_irq_cpu = -1;
184 smp_wmb(); 173 smp_wmb();
185 174
@@ -190,10 +179,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
190 do { 179 do {
191 schedule_timeout_uninterruptible(HZ / 100); 180 schedule_timeout_uninterruptible(HZ / 100);
192 181
193 if (channel->work_pending) 182 if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr)
194 efx_process_channel_now(channel);
195
196 if (channel->magic_count != magic_count)
197 goto eventq_ok; 183 goto eventq_ok;
198 } while (++count < 2); 184 } while (++count < 2);
199 185
@@ -211,8 +197,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
211 } 197 }
212 198
213 /* Check to see if event was received even if interrupt wasn't */ 199 /* Check to see if event was received even if interrupt wasn't */
214 efx_process_channel_now(channel); 200 if (efx_nic_event_present(channel)) {
215 if (channel->magic_count != magic_count) {
216 netif_err(efx, drv, efx->net_dev, 201 netif_err(efx, drv, efx->net_dev,
217 "channel %d event was generated, but " 202 "channel %d event was generated, but "
218 "failed to trigger an interrupt\n", channel->channel); 203 "failed to trigger an interrupt\n", channel->channel);
@@ -770,6 +755,8 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
770 __efx_reconfigure_port(efx); 755 __efx_reconfigure_port(efx);
771 mutex_unlock(&efx->mac_lock); 756 mutex_unlock(&efx->mac_lock);
772 757
758 netif_tx_wake_all_queues(efx->net_dev);
759
773 return rc_test; 760 return rc_test;
774} 761}
775 762
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 139801908217..d2c85dfdf3bf 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -435,7 +435,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
435 * queue state. */ 435 * queue state. */
436 smp_mb(); 436 smp_mb();
437 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 437 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
438 likely(efx->port_enabled)) { 438 likely(efx->port_enabled) &&
439 likely(!efx->port_inhibited)) {
439 fill_level = tx_queue->insert_count - tx_queue->read_count; 440 fill_level = tx_queue->insert_count - tx_queue->read_count;
440 if (fill_level < EFX_TXQ_THRESHOLD(efx)) { 441 if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
441 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); 442 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index cb317cd069ff..484f795a779d 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -240,7 +240,8 @@ static const struct ethtool_ops sis900_ethtool_ops;
240 * @net_dev: the net device to get address for 240 * @net_dev: the net device to get address for
241 * 241 *
242 * Older SiS900 and friends, use EEPROM to store MAC address. 242 * Older SiS900 and friends, use EEPROM to store MAC address.
243 * MAC address is read from read_eeprom() into @net_dev->dev_addr. 243 * MAC address is read from read_eeprom() into @net_dev->dev_addr and
244 * @net_dev->perm_addr.
244 */ 245 */
245 246
246static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev) 247static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev)
@@ -261,6 +262,9 @@ static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_de
261 for (i = 0; i < 3; i++) 262 for (i = 0; i < 3; i++)
262 ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); 263 ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
263 264
265 /* Store MAC Address in perm_addr */
266 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
267
264 return 1; 268 return 1;
265} 269}
266 270
@@ -271,7 +275,8 @@ static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_de
271 * 275 *
272 * SiS630E model, use APC CMOS RAM to store MAC address. 276 * SiS630E model, use APC CMOS RAM to store MAC address.
273 * APC CMOS RAM is accessed through ISA bridge. 277 * APC CMOS RAM is accessed through ISA bridge.
274 * MAC address is read into @net_dev->dev_addr. 278 * MAC address is read into @net_dev->dev_addr and
279 * @net_dev->perm_addr.
275 */ 280 */
276 281
277static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev, 282static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
@@ -296,6 +301,10 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
296 outb(0x09 + i, 0x70); 301 outb(0x09 + i, 0x70);
297 ((u8 *)(net_dev->dev_addr))[i] = inb(0x71); 302 ((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
298 } 303 }
304
305 /* Store MAC Address in perm_addr */
306 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
307
299 pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40); 308 pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
300 pci_dev_put(isa_bridge); 309 pci_dev_put(isa_bridge);
301 310
@@ -310,7 +319,7 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
310 * 319 *
311 * SiS635 model, set MAC Reload Bit to load Mac address from APC 320 * SiS635 model, set MAC Reload Bit to load Mac address from APC
312 * to rfdr. rfdr is accessed through rfcr. MAC address is read into 321 * to rfdr. rfdr is accessed through rfcr. MAC address is read into
313 * @net_dev->dev_addr. 322 * @net_dev->dev_addr and @net_dev->perm_addr.
314 */ 323 */
315 324
316static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev, 325static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
@@ -334,6 +343,9 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
334 *( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr); 343 *( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr);
335 } 344 }
336 345
346 /* Store MAC Address in perm_addr */
347 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
348
337 /* enable packet filtering */ 349 /* enable packet filtering */
338 outl(rfcrSave | RFEN, rfcr + ioaddr); 350 outl(rfcrSave | RFEN, rfcr + ioaddr);
339 351
@@ -353,7 +365,7 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
353 * EEDONE signal to refuse EEPROM access by LAN. 365 * EEDONE signal to refuse EEPROM access by LAN.
354 * The EEPROM map of SiS962 or SiS963 is different to SiS900. 366 * The EEPROM map of SiS962 or SiS963 is different to SiS900.
355 * The signature field in SiS962 or SiS963 spec is meaningless. 367 * The signature field in SiS962 or SiS963 spec is meaningless.
356 * MAC address is read into @net_dev->dev_addr. 368 * MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr.
357 */ 369 */
358 370
359static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev, 371static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
@@ -372,6 +384,9 @@ static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
372 for (i = 0; i < 3; i++) 384 for (i = 0; i < 3; i++)
373 ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); 385 ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
374 386
387 /* Store MAC Address in perm_addr */
388 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
389
375 outl(EEDONE, ee_addr); 390 outl(EEDONE, ee_addr);
376 return 1; 391 return 1;
377 } else { 392 } else {
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
index d65fab1ba790..e25093510b0c 100644
--- a/drivers/net/stmmac/dwmac_lib.c
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -26,9 +26,9 @@
26 26
27#undef DWMAC_DMA_DEBUG 27#undef DWMAC_DMA_DEBUG
28#ifdef DWMAC_DMA_DEBUG 28#ifdef DWMAC_DMA_DEBUG
29#define DBG(fmt, args...) printk(fmt, ## args) 29#define DWMAC_LIB_DBG(fmt, args...) printk(fmt, ## args)
30#else 30#else
31#define DBG(fmt, args...) do { } while (0) 31#define DWMAC_LIB_DBG(fmt, args...) do { } while (0)
32#endif 32#endif
33 33
34/* CSR1 enables the transmit DMA to check for new descriptor */ 34/* CSR1 enables the transmit DMA to check for new descriptor */
@@ -152,7 +152,7 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
152 /* read the status register (CSR5) */ 152 /* read the status register (CSR5) */
153 u32 intr_status = readl(ioaddr + DMA_STATUS); 153 u32 intr_status = readl(ioaddr + DMA_STATUS);
154 154
155 DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status); 155 DWMAC_LIB_DBG(KERN_INFO "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
156#ifdef DWMAC_DMA_DEBUG 156#ifdef DWMAC_DMA_DEBUG
157 /* It displays the DMA process states (CSR5 register) */ 157 /* It displays the DMA process states (CSR5 register) */
158 show_tx_process_state(intr_status); 158 show_tx_process_state(intr_status);
@@ -160,43 +160,43 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
160#endif 160#endif
161 /* ABNORMAL interrupts */ 161 /* ABNORMAL interrupts */
162 if (unlikely(intr_status & DMA_STATUS_AIS)) { 162 if (unlikely(intr_status & DMA_STATUS_AIS)) {
163 DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: "); 163 DWMAC_LIB_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: ");
164 if (unlikely(intr_status & DMA_STATUS_UNF)) { 164 if (unlikely(intr_status & DMA_STATUS_UNF)) {
165 DBG(INFO, "transmit underflow\n"); 165 DWMAC_LIB_DBG(KERN_INFO "transmit underflow\n");
166 ret = tx_hard_error_bump_tc; 166 ret = tx_hard_error_bump_tc;
167 x->tx_undeflow_irq++; 167 x->tx_undeflow_irq++;
168 } 168 }
169 if (unlikely(intr_status & DMA_STATUS_TJT)) { 169 if (unlikely(intr_status & DMA_STATUS_TJT)) {
170 DBG(INFO, "transmit jabber\n"); 170 DWMAC_LIB_DBG(KERN_INFO "transmit jabber\n");
171 x->tx_jabber_irq++; 171 x->tx_jabber_irq++;
172 } 172 }
173 if (unlikely(intr_status & DMA_STATUS_OVF)) { 173 if (unlikely(intr_status & DMA_STATUS_OVF)) {
174 DBG(INFO, "recv overflow\n"); 174 DWMAC_LIB_DBG(KERN_INFO "recv overflow\n");
175 x->rx_overflow_irq++; 175 x->rx_overflow_irq++;
176 } 176 }
177 if (unlikely(intr_status & DMA_STATUS_RU)) { 177 if (unlikely(intr_status & DMA_STATUS_RU)) {
178 DBG(INFO, "receive buffer unavailable\n"); 178 DWMAC_LIB_DBG(KERN_INFO "receive buffer unavailable\n");
179 x->rx_buf_unav_irq++; 179 x->rx_buf_unav_irq++;
180 } 180 }
181 if (unlikely(intr_status & DMA_STATUS_RPS)) { 181 if (unlikely(intr_status & DMA_STATUS_RPS)) {
182 DBG(INFO, "receive process stopped\n"); 182 DWMAC_LIB_DBG(KERN_INFO "receive process stopped\n");
183 x->rx_process_stopped_irq++; 183 x->rx_process_stopped_irq++;
184 } 184 }
185 if (unlikely(intr_status & DMA_STATUS_RWT)) { 185 if (unlikely(intr_status & DMA_STATUS_RWT)) {
186 DBG(INFO, "receive watchdog\n"); 186 DWMAC_LIB_DBG(KERN_INFO "receive watchdog\n");
187 x->rx_watchdog_irq++; 187 x->rx_watchdog_irq++;
188 } 188 }
189 if (unlikely(intr_status & DMA_STATUS_ETI)) { 189 if (unlikely(intr_status & DMA_STATUS_ETI)) {
190 DBG(INFO, "transmit early interrupt\n"); 190 DWMAC_LIB_DBG(KERN_INFO "transmit early interrupt\n");
191 x->tx_early_irq++; 191 x->tx_early_irq++;
192 } 192 }
193 if (unlikely(intr_status & DMA_STATUS_TPS)) { 193 if (unlikely(intr_status & DMA_STATUS_TPS)) {
194 DBG(INFO, "transmit process stopped\n"); 194 DWMAC_LIB_DBG(KERN_INFO "transmit process stopped\n");
195 x->tx_process_stopped_irq++; 195 x->tx_process_stopped_irq++;
196 ret = tx_hard_error; 196 ret = tx_hard_error;
197 } 197 }
198 if (unlikely(intr_status & DMA_STATUS_FBI)) { 198 if (unlikely(intr_status & DMA_STATUS_FBI)) {
199 DBG(INFO, "fatal bus error\n"); 199 DWMAC_LIB_DBG(KERN_INFO "fatal bus error\n");
200 x->fatal_bus_error_irq++; 200 x->fatal_bus_error_irq++;
201 ret = tx_hard_error; 201 ret = tx_hard_error;
202 } 202 }
@@ -215,7 +215,7 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
215 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ 215 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
216 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS); 216 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
217 217
218 DBG(INFO, "\n\n"); 218 DWMAC_LIB_DBG(KERN_INFO "\n\n");
219 return ret; 219 return ret;
220} 220}
221 221
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 0e5f03135b50..cc973fc38405 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -750,7 +750,6 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
750 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); 750 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
751 priv->xstats.threshold = tc; 751 priv->xstats.threshold = tc;
752 } 752 }
753 stmmac_tx_err(priv);
754 } else if (unlikely(status == tx_hard_error)) 753 } else if (unlikely(status == tx_hard_error))
755 stmmac_tx_err(priv); 754 stmmac_tx_err(priv);
756} 755}
@@ -781,21 +780,6 @@ static int stmmac_open(struct net_device *dev)
781 780
782 stmmac_verify_args(); 781 stmmac_verify_args();
783 782
784 ret = stmmac_init_phy(dev);
785 if (unlikely(ret)) {
786 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
787 return ret;
788 }
789
790 /* Request the IRQ lines */
791 ret = request_irq(dev->irq, stmmac_interrupt,
792 IRQF_SHARED, dev->name, dev);
793 if (unlikely(ret < 0)) {
794 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
795 __func__, dev->irq, ret);
796 return ret;
797 }
798
799#ifdef CONFIG_STMMAC_TIMER 783#ifdef CONFIG_STMMAC_TIMER
800 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); 784 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
801 if (unlikely(priv->tm == NULL)) { 785 if (unlikely(priv->tm == NULL)) {
@@ -814,6 +798,11 @@ static int stmmac_open(struct net_device *dev)
814 } else 798 } else
815 priv->tm->enable = 1; 799 priv->tm->enable = 1;
816#endif 800#endif
801 ret = stmmac_init_phy(dev);
802 if (unlikely(ret)) {
803 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
804 goto open_error;
805 }
817 806
818 /* Create and initialize the TX/RX descriptors chains. */ 807 /* Create and initialize the TX/RX descriptors chains. */
819 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); 808 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
@@ -822,12 +811,11 @@ static int stmmac_open(struct net_device *dev)
822 init_dma_desc_rings(dev); 811 init_dma_desc_rings(dev);
823 812
824 /* DMA initialization and SW reset */ 813 /* DMA initialization and SW reset */
825 if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl, 814 ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
826 priv->dma_tx_phy, 815 priv->dma_tx_phy, priv->dma_rx_phy);
827 priv->dma_rx_phy) < 0)) { 816 if (ret < 0) {
828
829 pr_err("%s: DMA initialization failed\n", __func__); 817 pr_err("%s: DMA initialization failed\n", __func__);
830 return -1; 818 goto open_error;
831 } 819 }
832 820
833 /* Copy the MAC addr into the HW */ 821 /* Copy the MAC addr into the HW */
@@ -848,6 +836,15 @@ static int stmmac_open(struct net_device *dev)
848 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK); 836 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
849 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK); 837 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
850 838
839 /* Request the IRQ lines */
840 ret = request_irq(dev->irq, stmmac_interrupt,
841 IRQF_SHARED, dev->name, dev);
842 if (unlikely(ret < 0)) {
843 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
844 __func__, dev->irq, ret);
845 goto open_error;
846 }
847
851 /* Enable the MAC Rx/Tx */ 848 /* Enable the MAC Rx/Tx */
852 stmmac_enable_mac(priv->ioaddr); 849 stmmac_enable_mac(priv->ioaddr);
853 850
@@ -878,7 +875,17 @@ static int stmmac_open(struct net_device *dev)
878 napi_enable(&priv->napi); 875 napi_enable(&priv->napi);
879 skb_queue_head_init(&priv->rx_recycle); 876 skb_queue_head_init(&priv->rx_recycle);
880 netif_start_queue(dev); 877 netif_start_queue(dev);
878
881 return 0; 879 return 0;
880
881open_error:
882#ifdef CONFIG_STMMAC_TIMER
883 kfree(priv->tm);
884#endif
885 if (priv->phydev)
886 phy_disconnect(priv->phydev);
887
888 return ret;
882} 889}
883 890
884/** 891/**
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 8a3b191b195b..ff32befd8443 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -1251,7 +1251,7 @@ static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev)
1251/* 1251/*
1252 * The NIC has told us that a packet has been downloaded onto the card, we must 1252 * The NIC has told us that a packet has been downloaded onto the card, we must
1253 * find out which packet it has done, clear the skb and information for the packet 1253 * find out which packet it has done, clear the skb and information for the packet
1254 * then advance around the ring for all tranmitted packets 1254 * then advance around the ring for all transmitted packets
1255 */ 1255 */
1256 1256
1257static void xl_dn_comp(struct net_device *dev) 1257static void xl_dn_comp(struct net_device *dev)
@@ -1568,7 +1568,7 @@ static void xl_arb_cmd(struct net_device *dev)
1568 if (lan_status_diff & LSC_SOFT_ERR) 1568 if (lan_status_diff & LSC_SOFT_ERR)
1569 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name); 1569 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
1570 if (lan_status_diff & LSC_TRAN_BCN) 1570 if (lan_status_diff & LSC_TRAN_BCN)
1571 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name); 1571 printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
1572 if (lan_status_diff & LSC_SS) 1572 if (lan_status_diff & LSC_SS)
1573 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); 1573 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1574 if (lan_status_diff & LSC_RING_REC) 1574 if (lan_status_diff & LSC_RING_REC)
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 5bd140704533..9354ca9da576 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -1675,7 +1675,7 @@ drop_frame:
1675 if (lan_status_diff & LSC_SOFT_ERR) 1675 if (lan_status_diff & LSC_SOFT_ERR)
1676 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name); 1676 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name);
1677 if (lan_status_diff & LSC_TRAN_BCN) 1677 if (lan_status_diff & LSC_TRAN_BCN)
1678 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name); 1678 printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n", dev->name);
1679 if (lan_status_diff & LSC_SS) 1679 if (lan_status_diff & LSC_SS)
1680 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); 1680 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1681 if (lan_status_diff & LSC_RING_REC) 1681 if (lan_status_diff & LSC_RING_REC)
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 3d2fbe60b46e..2684003b8ab6 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -1500,7 +1500,7 @@ drop_frame:
1500 if (lan_status_diff & LSC_SOFT_ERR) 1500 if (lan_status_diff & LSC_SOFT_ERR)
1501 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name); 1501 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
1502 if (lan_status_diff & LSC_TRAN_BCN) 1502 if (lan_status_diff & LSC_TRAN_BCN)
1503 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name); 1503 printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
1504 if (lan_status_diff & LSC_SS) 1504 if (lan_status_diff & LSC_SS)
1505 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); 1505 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1506 if (lan_status_diff & LSC_RING_REC) 1506 if (lan_status_diff & LSC_RING_REC)
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index f1b8af64569c..2d10239ce829 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -1040,7 +1040,7 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
1040 } 1040 }
1041 1041
1042 ret = ath9k_htc_hw_init(hif_dev->htc_handle, 1042 ret = ath9k_htc_hw_init(hif_dev->htc_handle,
1043 &hif_dev->udev->dev, hif_dev->device_id, 1043 &interface->dev, hif_dev->device_id,
1044 hif_dev->udev->product, id->driver_info); 1044 hif_dev->udev->product, id->driver_info);
1045 if (ret) { 1045 if (ret) {
1046 ret = -EINVAL; 1046 ret = -EINVAL;
@@ -1158,7 +1158,7 @@ fail_resume:
1158#endif 1158#endif
1159 1159
1160static struct usb_driver ath9k_hif_usb_driver = { 1160static struct usb_driver ath9k_hif_usb_driver = {
1161 .name = "ath9k_hif_usb", 1161 .name = KBUILD_MODNAME,
1162 .probe = ath9k_hif_usb_probe, 1162 .probe = ath9k_hif_usb_probe,
1163 .disconnect = ath9k_hif_usb_disconnect, 1163 .disconnect = ath9k_hif_usb_disconnect,
1164#ifdef CONFIG_PM 1164#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 1ec9bcd6b281..c95bc5cc1a1f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1254,15 +1254,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1254 ah->txchainmask = common->tx_chainmask; 1254 ah->txchainmask = common->tx_chainmask;
1255 ah->rxchainmask = common->rx_chainmask; 1255 ah->rxchainmask = common->rx_chainmask;
1256 1256
1257 if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
1258 ath9k_hw_abortpcurecv(ah);
1259 if (!ath9k_hw_stopdmarecv(ah)) {
1260 ath_dbg(common, ATH_DBG_XMIT,
1261 "Failed to stop receive dma\n");
1262 bChannelChange = false;
1263 }
1264 }
1265
1266 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 1257 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
1267 return -EIO; 1258 return -EIO;
1268 1259
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 562257ac52cf..edc1cbbfecaf 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -751,28 +751,47 @@ void ath9k_hw_abortpcurecv(struct ath_hw *ah)
751} 751}
752EXPORT_SYMBOL(ath9k_hw_abortpcurecv); 752EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
753 753
754bool ath9k_hw_stopdmarecv(struct ath_hw *ah) 754bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
755{ 755{
756#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ 756#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
757#define AH_RX_TIME_QUANTUM 100 /* usec */ 757#define AH_RX_TIME_QUANTUM 100 /* usec */
758 struct ath_common *common = ath9k_hw_common(ah); 758 struct ath_common *common = ath9k_hw_common(ah);
759 u32 mac_status, last_mac_status = 0;
759 int i; 760 int i;
760 761
762 /* Enable access to the DMA observation bus */
763 REG_WRITE(ah, AR_MACMISC,
764 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
765 (AR_MACMISC_MISC_OBS_BUS_1 <<
766 AR_MACMISC_MISC_OBS_BUS_MSB_S)));
767
761 REG_WRITE(ah, AR_CR, AR_CR_RXD); 768 REG_WRITE(ah, AR_CR, AR_CR_RXD);
762 769
763 /* Wait for rx enable bit to go low */ 770 /* Wait for rx enable bit to go low */
764 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) { 771 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
765 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0) 772 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
766 break; 773 break;
774
775 if (!AR_SREV_9300_20_OR_LATER(ah)) {
776 mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
777 if (mac_status == 0x1c0 && mac_status == last_mac_status) {
778 *reset = true;
779 break;
780 }
781
782 last_mac_status = mac_status;
783 }
784
767 udelay(AH_TIME_QUANTUM); 785 udelay(AH_TIME_QUANTUM);
768 } 786 }
769 787
770 if (i == 0) { 788 if (i == 0) {
771 ath_err(common, 789 ath_err(common,
772 "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n", 790 "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
773 AH_RX_STOP_DMA_TIMEOUT / 1000, 791 AH_RX_STOP_DMA_TIMEOUT / 1000,
774 REG_READ(ah, AR_CR), 792 REG_READ(ah, AR_CR),
775 REG_READ(ah, AR_DIAG_SW)); 793 REG_READ(ah, AR_DIAG_SW),
794 REG_READ(ah, AR_DMADBG_7));
776 return false; 795 return false;
777 } else { 796 } else {
778 return true; 797 return true;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index b2b2ff852c32..c2a59386fb9c 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -695,7 +695,7 @@ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
695void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp); 695void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
696void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning); 696void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
697void ath9k_hw_abortpcurecv(struct ath_hw *ah); 697void ath9k_hw_abortpcurecv(struct ath_hw *ah);
698bool ath9k_hw_stopdmarecv(struct ath_hw *ah); 698bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset);
699int ath9k_hw_beaconq_setup(struct ath_hw *ah); 699int ath9k_hw_beaconq_setup(struct ath_hw *ah);
700 700
701/* Interrupt Handling */ 701/* Interrupt Handling */
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dddb85de622d..17d04ff8d678 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1376,7 +1376,6 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
1376 1376
1377 ath9k_calculate_iter_data(hw, vif, &iter_data); 1377 ath9k_calculate_iter_data(hw, vif, &iter_data);
1378 1378
1379 ath9k_ps_wakeup(sc);
1380 /* Set BSSID mask. */ 1379 /* Set BSSID mask. */
1381 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); 1380 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
1382 ath_hw_setbssidmask(common); 1381 ath_hw_setbssidmask(common);
@@ -1411,7 +1410,6 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
1411 } 1410 }
1412 1411
1413 ath9k_hw_set_interrupts(ah, ah->imask); 1412 ath9k_hw_set_interrupts(ah, ah->imask);
1414 ath9k_ps_restore(sc);
1415 1413
1416 /* Set up ANI */ 1414 /* Set up ANI */
1417 if ((iter_data.naps + iter_data.nadhocs) > 0) { 1415 if ((iter_data.naps + iter_data.nadhocs) > 0) {
@@ -1457,6 +1455,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1457 struct ath_vif *avp = (void *)vif->drv_priv; 1455 struct ath_vif *avp = (void *)vif->drv_priv;
1458 int ret = 0; 1456 int ret = 0;
1459 1457
1458 ath9k_ps_wakeup(sc);
1460 mutex_lock(&sc->mutex); 1459 mutex_lock(&sc->mutex);
1461 1460
1462 switch (vif->type) { 1461 switch (vif->type) {
@@ -1503,6 +1502,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1503 ath9k_do_vif_add_setup(hw, vif); 1502 ath9k_do_vif_add_setup(hw, vif);
1504out: 1503out:
1505 mutex_unlock(&sc->mutex); 1504 mutex_unlock(&sc->mutex);
1505 ath9k_ps_restore(sc);
1506 return ret; 1506 return ret;
1507} 1507}
1508 1508
@@ -1517,6 +1517,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
1517 1517
1518 ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n"); 1518 ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
1519 mutex_lock(&sc->mutex); 1519 mutex_lock(&sc->mutex);
1520 ath9k_ps_wakeup(sc);
1520 1521
1521 /* See if new interface type is valid. */ 1522 /* See if new interface type is valid. */
1522 if ((new_type == NL80211_IFTYPE_ADHOC) && 1523 if ((new_type == NL80211_IFTYPE_ADHOC) &&
@@ -1546,6 +1547,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
1546 1547
1547 ath9k_do_vif_add_setup(hw, vif); 1548 ath9k_do_vif_add_setup(hw, vif);
1548out: 1549out:
1550 ath9k_ps_restore(sc);
1549 mutex_unlock(&sc->mutex); 1551 mutex_unlock(&sc->mutex);
1550 return ret; 1552 return ret;
1551} 1553}
@@ -1558,6 +1560,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1558 1560
1559 ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n"); 1561 ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
1560 1562
1563 ath9k_ps_wakeup(sc);
1561 mutex_lock(&sc->mutex); 1564 mutex_lock(&sc->mutex);
1562 1565
1563 sc->nvifs--; 1566 sc->nvifs--;
@@ -1569,6 +1572,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1569 ath9k_calculate_summary_state(hw, NULL); 1572 ath9k_calculate_summary_state(hw, NULL);
1570 1573
1571 mutex_unlock(&sc->mutex); 1574 mutex_unlock(&sc->mutex);
1575 ath9k_ps_restore(sc);
1572} 1576}
1573 1577
1574static void ath9k_enable_ps(struct ath_softc *sc) 1578static void ath9k_enable_ps(struct ath_softc *sc)
@@ -1809,6 +1813,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
1809 1813
1810 txq = sc->tx.txq_map[queue]; 1814 txq = sc->tx.txq_map[queue];
1811 1815
1816 ath9k_ps_wakeup(sc);
1812 mutex_lock(&sc->mutex); 1817 mutex_lock(&sc->mutex);
1813 1818
1814 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info)); 1819 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
@@ -1832,6 +1837,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
1832 ath_beaconq_config(sc); 1837 ath_beaconq_config(sc);
1833 1838
1834 mutex_unlock(&sc->mutex); 1839 mutex_unlock(&sc->mutex);
1840 ath9k_ps_restore(sc);
1835 1841
1836 return ret; 1842 return ret;
1837} 1843}
@@ -1894,6 +1900,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1894 int slottime; 1900 int slottime;
1895 int error; 1901 int error;
1896 1902
1903 ath9k_ps_wakeup(sc);
1897 mutex_lock(&sc->mutex); 1904 mutex_lock(&sc->mutex);
1898 1905
1899 if (changed & BSS_CHANGED_BSSID) { 1906 if (changed & BSS_CHANGED_BSSID) {
@@ -1994,6 +2001,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1994 } 2001 }
1995 2002
1996 mutex_unlock(&sc->mutex); 2003 mutex_unlock(&sc->mutex);
2004 ath9k_ps_restore(sc);
1997} 2005}
1998 2006
1999static u64 ath9k_get_tsf(struct ieee80211_hw *hw) 2007static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index a9c3f4672aa0..dcd19bc337d1 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -486,12 +486,12 @@ start_recv:
486bool ath_stoprecv(struct ath_softc *sc) 486bool ath_stoprecv(struct ath_softc *sc)
487{ 487{
488 struct ath_hw *ah = sc->sc_ah; 488 struct ath_hw *ah = sc->sc_ah;
489 bool stopped; 489 bool stopped, reset = false;
490 490
491 spin_lock_bh(&sc->rx.rxbuflock); 491 spin_lock_bh(&sc->rx.rxbuflock);
492 ath9k_hw_abortpcurecv(ah); 492 ath9k_hw_abortpcurecv(ah);
493 ath9k_hw_setrxfilter(ah, 0); 493 ath9k_hw_setrxfilter(ah, 0);
494 stopped = ath9k_hw_stopdmarecv(ah); 494 stopped = ath9k_hw_stopdmarecv(ah, &reset);
495 495
496 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 496 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
497 ath_edma_stop_recv(sc); 497 ath_edma_stop_recv(sc);
@@ -506,7 +506,7 @@ bool ath_stoprecv(struct ath_softc *sc)
506 "confusing the DMA engine when we start RX up\n"); 506 "confusing the DMA engine when we start RX up\n");
507 ATH_DBG_WARN_ON_ONCE(!stopped); 507 ATH_DBG_WARN_ON_ONCE(!stopped);
508 } 508 }
509 return stopped; 509 return stopped || reset;
510} 510}
511 511
512void ath_flushrecv(struct ath_softc *sc) 512void ath_flushrecv(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
index 248c670fdfbe..5c2cfe694152 100644
--- a/drivers/net/wireless/ath/regd_common.h
+++ b/drivers/net/wireless/ath/regd_common.h
@@ -195,6 +195,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
195 {APL9_WORLD, CTL_ETSI, CTL_ETSI}, 195 {APL9_WORLD, CTL_ETSI, CTL_ETSI},
196 196
197 {APL3_FCCA, CTL_FCC, CTL_FCC}, 197 {APL3_FCCA, CTL_FCC, CTL_FCC},
198 {APL7_FCCA, CTL_FCC, CTL_FCC},
198 {APL1_ETSIC, CTL_FCC, CTL_ETSI}, 199 {APL1_ETSIC, CTL_FCC, CTL_ETSI},
199 {APL2_ETSIC, CTL_FCC, CTL_ETSI}, 200 {APL2_ETSIC, CTL_FCC, CTL_ETSI},
200 {APL2_APLD, CTL_FCC, NO_CTL}, 201 {APL2_APLD, CTL_FCC, NO_CTL},
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
index 2a45dd44cc12..aef65cd47661 100644
--- a/drivers/net/wireless/iwlegacy/Kconfig
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -1,6 +1,5 @@
1config IWLWIFI_LEGACY 1config IWLWIFI_LEGACY
2 tristate "Intel Wireless Wifi legacy devices" 2 tristate
3 depends on PCI && MAC80211
4 select FW_LOADER 3 select FW_LOADER
5 select NEW_LEDS 4 select NEW_LEDS
6 select LEDS_CLASS 5 select LEDS_CLASS
@@ -65,7 +64,8 @@ endmenu
65 64
66config IWL4965 65config IWL4965
67 tristate "Intel Wireless WiFi 4965AGN (iwl4965)" 66 tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
68 depends on IWLWIFI_LEGACY 67 depends on PCI && MAC80211
68 select IWLWIFI_LEGACY
69 ---help--- 69 ---help---
70 This option enables support for 70 This option enables support for
71 71
@@ -92,7 +92,8 @@ config IWL4965
92 92
93config IWL3945 93config IWL3945
94 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" 94 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
95 depends on IWLWIFI_LEGACY 95 depends on PCI && MAC80211
96 select IWLWIFI_LEGACY
96 ---help--- 97 ---help---
97 Select to build the driver supporting the: 98 Select to build the driver supporting the:
98 99
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
index 779d3cb86e2c..5c3a68d3af12 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
@@ -74,8 +74,6 @@
74/* RSSI to dBm */ 74/* RSSI to dBm */
75#define IWL39_RSSI_OFFSET 95 75#define IWL39_RSSI_OFFSET 95
76 76
77#define IWL_DEFAULT_TX_POWER 0x0F
78
79/* 77/*
80 * EEPROM related constants, enums, and structures. 78 * EEPROM related constants, enums, and structures.
81 */ 79 */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
index 08b189c8472d..fc6fa2886d9c 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
@@ -804,9 +804,6 @@ struct iwl4965_scd_bc_tbl {
804 804
805#define IWL4965_DEFAULT_TX_RETRY 15 805#define IWL4965_DEFAULT_TX_RETRY 15
806 806
807/* Limit range of txpower output target to be between these values */
808#define IWL4965_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
809
810/* EEPROM */ 807/* EEPROM */
811#define IWL4965_FIRST_AMPDU_QUEUE 10 808#define IWL4965_FIRST_AMPDU_QUEUE 10
812 809
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
index 7007d61bb6b5..c1511b14b239 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -160,6 +160,7 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
160 struct ieee80211_channel *geo_ch; 160 struct ieee80211_channel *geo_ch;
161 struct ieee80211_rate *rates; 161 struct ieee80211_rate *rates;
162 int i = 0; 162 int i = 0;
163 s8 max_tx_power = 0;
163 164
164 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || 165 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
165 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { 166 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
@@ -235,8 +236,8 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
235 236
236 geo_ch->flags |= ch->ht40_extension_channel; 237 geo_ch->flags |= ch->ht40_extension_channel;
237 238
238 if (ch->max_power_avg > priv->tx_power_device_lmt) 239 if (ch->max_power_avg > max_tx_power)
239 priv->tx_power_device_lmt = ch->max_power_avg; 240 max_tx_power = ch->max_power_avg;
240 } else { 241 } else {
241 geo_ch->flags |= IEEE80211_CHAN_DISABLED; 242 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
242 } 243 }
@@ -249,6 +250,10 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
249 geo_ch->flags); 250 geo_ch->flags);
250 } 251 }
251 252
253 priv->tx_power_device_lmt = max_tx_power;
254 priv->tx_power_user_lmt = max_tx_power;
255 priv->tx_power_next = max_tx_power;
256
252 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && 257 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
253 priv->cfg->sku & IWL_SKU_A) { 258 priv->cfg->sku & IWL_SKU_A) {
254 IWL_INFO(priv, "Incorrectly detected BG card as ABG. " 259 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
@@ -1124,11 +1129,11 @@ int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1124 if (!priv->cfg->ops->lib->send_tx_power) 1129 if (!priv->cfg->ops->lib->send_tx_power)
1125 return -EOPNOTSUPP; 1130 return -EOPNOTSUPP;
1126 1131
1127 if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) { 1132 /* 0 dBm mean 1 milliwatt */
1133 if (tx_power < 0) {
1128 IWL_WARN(priv, 1134 IWL_WARN(priv,
1129 "Requested user TXPOWER %d below lower limit %d.\n", 1135 "Requested user TXPOWER %d below 1 mW.\n",
1130 tx_power, 1136 tx_power);
1131 IWL4965_TX_POWER_TARGET_POWER_MIN);
1132 return -EINVAL; 1137 return -EINVAL;
1133 } 1138 }
1134 1139
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
index 04c5648027df..cb346d1a9ffa 100644
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
@@ -471,13 +471,6 @@ int iwl_legacy_init_channel_map(struct iwl_priv *priv)
471 flags & EEPROM_CHANNEL_RADAR)) 471 flags & EEPROM_CHANNEL_RADAR))
472 ? "" : "not "); 472 ? "" : "not ");
473 473
474 /* Set the tx_power_user_lmt to the highest power
475 * supported by any channel */
476 if (eeprom_ch_info[ch].max_power_avg >
477 priv->tx_power_user_lmt)
478 priv->tx_power_user_lmt =
479 eeprom_ch_info[ch].max_power_avg;
480
481 ch_info++; 474 ch_info++;
482 } 475 }
483 } 476 }
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
index 28eb3d885ba1..cc7ebcee60e5 100644
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
@@ -3825,10 +3825,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3825 priv->force_reset[IWL_FW_RESET].reset_duration = 3825 priv->force_reset[IWL_FW_RESET].reset_duration =
3826 IWL_DELAY_NEXT_FORCE_FW_RELOAD; 3826 IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3827 3827
3828
3829 priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
3830 priv->tx_power_next = IWL_DEFAULT_TX_POWER;
3831
3832 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { 3828 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
3833 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n", 3829 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
3834 eeprom->version); 3830 eeprom->version);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
index 91b3d8b9d7a5..d484c3678163 100644
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -3140,12 +3140,6 @@ static int iwl4965_init_drv(struct iwl_priv *priv)
3140 3140
3141 iwl_legacy_init_scan_params(priv); 3141 iwl_legacy_init_scan_params(priv);
3142 3142
3143 /* Set the tx_power_user_lmt to the lowest power level
3144 * this value will get overwritten by channel max power avg
3145 * from eeprom */
3146 priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN;
3147 priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN;
3148
3149 ret = iwl_legacy_init_channel_map(priv); 3143 ret = iwl_legacy_init_channel_map(priv);
3150 if (ret) { 3144 if (ret) {
3151 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); 3145 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 3ea31b659d1a..22e045b5bcee 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -530,6 +530,9 @@ static struct iwl_ht_params iwl5000_ht_params = {
530struct iwl_cfg iwl5300_agn_cfg = { 530struct iwl_cfg iwl5300_agn_cfg = {
531 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", 531 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
532 IWL_DEVICE_5000, 532 IWL_DEVICE_5000,
533 /* at least EEPROM 0x11A has wrong info */
534 .valid_tx_ant = ANT_ABC, /* .cfg overwrite */
535 .valid_rx_ant = ANT_ABC, /* .cfg overwrite */
533 .ht_params = &iwl5000_ht_params, 536 .ht_params = &iwl5000_ht_params,
534}; 537};
535 538
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 36952274950e..c1ceb4b23971 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -137,6 +137,7 @@ struct mwl8k_tx_queue {
137struct mwl8k_priv { 137struct mwl8k_priv {
138 struct ieee80211_hw *hw; 138 struct ieee80211_hw *hw;
139 struct pci_dev *pdev; 139 struct pci_dev *pdev;
140 int irq;
140 141
141 struct mwl8k_device_info *device_info; 142 struct mwl8k_device_info *device_info;
142 143
@@ -3761,9 +3762,11 @@ static int mwl8k_start(struct ieee80211_hw *hw)
3761 rc = request_irq(priv->pdev->irq, mwl8k_interrupt, 3762 rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
3762 IRQF_SHARED, MWL8K_NAME, hw); 3763 IRQF_SHARED, MWL8K_NAME, hw);
3763 if (rc) { 3764 if (rc) {
3765 priv->irq = -1;
3764 wiphy_err(hw->wiphy, "failed to register IRQ handler\n"); 3766 wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
3765 return -EIO; 3767 return -EIO;
3766 } 3768 }
3769 priv->irq = priv->pdev->irq;
3767 3770
3768 /* Enable TX reclaim and RX tasklets. */ 3771 /* Enable TX reclaim and RX tasklets. */
3769 tasklet_enable(&priv->poll_tx_task); 3772 tasklet_enable(&priv->poll_tx_task);
@@ -3800,6 +3803,7 @@ static int mwl8k_start(struct ieee80211_hw *hw)
3800 if (rc) { 3803 if (rc) {
3801 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 3804 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
3802 free_irq(priv->pdev->irq, hw); 3805 free_irq(priv->pdev->irq, hw);
3806 priv->irq = -1;
3803 tasklet_disable(&priv->poll_tx_task); 3807 tasklet_disable(&priv->poll_tx_task);
3804 tasklet_disable(&priv->poll_rx_task); 3808 tasklet_disable(&priv->poll_rx_task);
3805 } 3809 }
@@ -3818,7 +3822,10 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
3818 3822
3819 /* Disable interrupts */ 3823 /* Disable interrupts */
3820 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 3824 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
3821 free_irq(priv->pdev->irq, hw); 3825 if (priv->irq != -1) {
3826 free_irq(priv->pdev->irq, hw);
3827 priv->irq = -1;
3828 }
3822 3829
3823 /* Stop finalize join worker */ 3830 /* Stop finalize join worker */
3824 cancel_work_sync(&priv->finalize_join_worker); 3831 cancel_work_sync(&priv->finalize_join_worker);
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 7834c26c2954..042842e704de 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -703,7 +703,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
703 struct p54_tx_info *p54info; 703 struct p54_tx_info *p54info;
704 struct p54_hdr *hdr; 704 struct p54_hdr *hdr;
705 struct p54_tx_data *txhdr; 705 struct p54_tx_data *txhdr;
706 unsigned int padding, len, extra_len; 706 unsigned int padding, len, extra_len = 0;
707 int i, j, ridx; 707 int i, j, ridx;
708 u16 hdr_flags = 0, aid = 0; 708 u16 hdr_flags = 0, aid = 0;
709 u8 rate, queue = 0, crypt_offset = 0; 709 u8 rate, queue = 0, crypt_offset = 0;
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index c8ff646c0b05..0fa466a91bf4 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -88,4 +88,6 @@ config PCI_IOAPIC
88 depends on HOTPLUG 88 depends on HOTPLUG
89 default y 89 default y
90 90
91select NLS if (DMI || ACPI) 91config PCI_LABEL
92 def_bool y if (DMI || ACPI)
93 select NLS
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 98d61c8e984b..c85f744270a5 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -56,10 +56,10 @@ obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o
56# ACPI Related PCI FW Functions 56# ACPI Related PCI FW Functions
57# ACPI _DSM provided firmware instance and string name 57# ACPI _DSM provided firmware instance and string name
58# 58#
59obj-$(CONFIG_ACPI) += pci-acpi.o pci-label.o 59obj-$(CONFIG_ACPI) += pci-acpi.o
60 60
61# SMBIOS provided firmware instance and labels 61# SMBIOS provided firmware instance and labels
62obj-$(CONFIG_DMI) += pci-label.o 62obj-$(CONFIG_PCI_LABEL) += pci-label.o
63 63
64# Cardbus & CompactPCI use setup-bus 64# Cardbus & CompactPCI use setup-bus
65obj-$(CONFIG_HOTPLUG) += setup-bus.o 65obj-$(CONFIG_HOTPLUG) += setup-bus.o
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index de0dd7b1f146..bcae8dd41496 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -394,7 +394,7 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
394 return 0; 394 return 0;
395 395
396fail2: 396fail2:
397 free_irq(omap_rtc_timer, NULL); 397 free_irq(omap_rtc_timer, rtc);
398fail1: 398fail1:
399 rtc_device_unregister(rtc); 399 rtc_device_unregister(rtc);
400fail0: 400fail0:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ab55c2fa7ce2..e9901b8f8443 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -411,8 +411,6 @@ static void scsi_run_queue(struct request_queue *q)
411 list_splice_init(&shost->starved_list, &starved_list); 411 list_splice_init(&shost->starved_list, &starved_list);
412 412
413 while (!list_empty(&starved_list)) { 413 while (!list_empty(&starved_list)) {
414 int flagset;
415
416 /* 414 /*
417 * As long as shost is accepting commands and we have 415 * As long as shost is accepting commands and we have
418 * starved queues, call blk_run_queue. scsi_request_fn 416 * starved queues, call blk_run_queue. scsi_request_fn
@@ -435,20 +433,7 @@ static void scsi_run_queue(struct request_queue *q)
435 continue; 433 continue;
436 } 434 }
437 435
438 spin_unlock(shost->host_lock); 436 blk_run_queue_async(sdev->request_queue);
439
440 spin_lock(sdev->request_queue->queue_lock);
441 flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
442 !test_bit(QUEUE_FLAG_REENTER,
443 &sdev->request_queue->queue_flags);
444 if (flagset)
445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
446 __blk_run_queue(sdev->request_queue);
447 if (flagset)
448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
449 spin_unlock(sdev->request_queue->queue_lock);
450
451 spin_lock(shost->host_lock);
452 } 437 }
453 /* put any unprocessed entries back */ 438 /* put any unprocessed entries back */
454 list_splice(&starved_list, &shost->starved_list); 439 list_splice(&starved_list, &shost->starved_list);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 28c33506e4ad..815069d13f9b 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3816,28 +3816,17 @@ fail_host_msg:
3816static void 3816static void
3817fc_bsg_goose_queue(struct fc_rport *rport) 3817fc_bsg_goose_queue(struct fc_rport *rport)
3818{ 3818{
3819 int flagset;
3820 unsigned long flags;
3821
3822 if (!rport->rqst_q) 3819 if (!rport->rqst_q)
3823 return; 3820 return;
3824 3821
3822 /*
3823 * This get/put dance makes no sense
3824 */
3825 get_device(&rport->dev); 3825 get_device(&rport->dev);
3826 3826 blk_run_queue_async(rport->rqst_q);
3827 spin_lock_irqsave(rport->rqst_q->queue_lock, flags);
3828 flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
3829 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
3830 if (flagset)
3831 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
3832 __blk_run_queue(rport->rqst_q);
3833 if (flagset)
3834 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
3835 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
3836
3837 put_device(&rport->dev); 3827 put_device(&rport->dev);
3838} 3828}
3839 3829
3840
3841/** 3830/**
3842 * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD 3831 * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
3843 * @q: rport request queue 3832 * @q: rport request queue
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index c71995b111bf..0f5c4f9d5d62 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -884,8 +884,8 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
884 } 884 }
885 885
886 brelse(dibh); 886 brelse(dibh);
887 gfs2_trans_end(sdp);
888failed: 887failed:
888 gfs2_trans_end(sdp);
889 if (al) { 889 if (al) {
890 gfs2_inplace_release(ip); 890 gfs2_inplace_release(ip);
891 gfs2_quota_unlock(ip); 891 gfs2_quota_unlock(ip);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 5c356d09c321..f789c5732b7c 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1506,7 +1506,7 @@ struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name)
1506 inode = gfs2_inode_lookup(dir->i_sb, 1506 inode = gfs2_inode_lookup(dir->i_sb,
1507 be16_to_cpu(dent->de_type), 1507 be16_to_cpu(dent->de_type),
1508 be64_to_cpu(dent->de_inum.no_addr), 1508 be64_to_cpu(dent->de_inum.no_addr),
1509 be64_to_cpu(dent->de_inum.no_formal_ino)); 1509 be64_to_cpu(dent->de_inum.no_formal_ino), 0);
1510 brelse(bh); 1510 brelse(bh);
1511 return inode; 1511 return inode;
1512 } 1512 }
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index b2682e073eee..e48310885c48 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -617,18 +617,51 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
617 return generic_file_aio_write(iocb, iov, nr_segs, pos); 617 return generic_file_aio_write(iocb, iov, nr_segs, pos);
618} 618}
619 619
620static void empty_write_end(struct page *page, unsigned from, 620static int empty_write_end(struct page *page, unsigned from,
621 unsigned to) 621 unsigned to, int mode)
622{ 622{
623 struct gfs2_inode *ip = GFS2_I(page->mapping->host); 623 struct inode *inode = page->mapping->host;
624 struct gfs2_inode *ip = GFS2_I(inode);
625 struct buffer_head *bh;
626 unsigned offset, blksize = 1 << inode->i_blkbits;
627 pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
624 628
625 zero_user(page, from, to-from); 629 zero_user(page, from, to-from);
626 mark_page_accessed(page); 630 mark_page_accessed(page);
627 631
628 if (!gfs2_is_writeback(ip)) 632 if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) {
629 gfs2_page_add_databufs(ip, page, from, to); 633 if (!gfs2_is_writeback(ip))
634 gfs2_page_add_databufs(ip, page, from, to);
635
636 block_commit_write(page, from, to);
637 return 0;
638 }
639
640 offset = 0;
641 bh = page_buffers(page);
642 while (offset < to) {
643 if (offset >= from) {
644 set_buffer_uptodate(bh);
645 mark_buffer_dirty(bh);
646 clear_buffer_new(bh);
647 write_dirty_buffer(bh, WRITE);
648 }
649 offset += blksize;
650 bh = bh->b_this_page;
651 }
630 652
631 block_commit_write(page, from, to); 653 offset = 0;
654 bh = page_buffers(page);
655 while (offset < to) {
656 if (offset >= from) {
657 wait_on_buffer(bh);
658 if (!buffer_uptodate(bh))
659 return -EIO;
660 }
661 offset += blksize;
662 bh = bh->b_this_page;
663 }
664 return 0;
632} 665}
633 666
634static int needs_empty_write(sector_t block, struct inode *inode) 667static int needs_empty_write(sector_t block, struct inode *inode)
@@ -643,7 +676,8 @@ static int needs_empty_write(sector_t block, struct inode *inode)
643 return !buffer_mapped(&bh_map); 676 return !buffer_mapped(&bh_map);
644} 677}
645 678
646static int write_empty_blocks(struct page *page, unsigned from, unsigned to) 679static int write_empty_blocks(struct page *page, unsigned from, unsigned to,
680 int mode)
647{ 681{
648 struct inode *inode = page->mapping->host; 682 struct inode *inode = page->mapping->host;
649 unsigned start, end, next, blksize; 683 unsigned start, end, next, blksize;
@@ -668,7 +702,9 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
668 gfs2_block_map); 702 gfs2_block_map);
669 if (unlikely(ret)) 703 if (unlikely(ret))
670 return ret; 704 return ret;
671 empty_write_end(page, start, end); 705 ret = empty_write_end(page, start, end, mode);
706 if (unlikely(ret))
707 return ret;
672 end = 0; 708 end = 0;
673 } 709 }
674 start = next; 710 start = next;
@@ -682,7 +718,9 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
682 ret = __block_write_begin(page, start, end - start, gfs2_block_map); 718 ret = __block_write_begin(page, start, end - start, gfs2_block_map);
683 if (unlikely(ret)) 719 if (unlikely(ret))
684 return ret; 720 return ret;
685 empty_write_end(page, start, end); 721 ret = empty_write_end(page, start, end, mode);
722 if (unlikely(ret))
723 return ret;
686 } 724 }
687 725
688 return 0; 726 return 0;
@@ -731,7 +769,7 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
731 769
732 if (curr == end) 770 if (curr == end)
733 to = end_offset; 771 to = end_offset;
734 error = write_empty_blocks(page, from, to); 772 error = write_empty_blocks(page, from, to, mode);
735 if (!error && offset + to > inode->i_size && 773 if (!error && offset + to > inode->i_size &&
736 !(mode & FALLOC_FL_KEEP_SIZE)) { 774 !(mode & FALLOC_FL_KEEP_SIZE)) {
737 i_size_write(inode, offset + to); 775 i_size_write(inode, offset + to);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 3754e3cbf02b..25eeb2bcee47 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -385,6 +385,10 @@ static int trans_go_demote_ok(const struct gfs2_glock *gl)
385static void iopen_go_callback(struct gfs2_glock *gl) 385static void iopen_go_callback(struct gfs2_glock *gl)
386{ 386{
387 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; 387 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
388 struct gfs2_sbd *sdp = gl->gl_sbd;
389
390 if (sdp->sd_vfs->s_flags & MS_RDONLY)
391 return;
388 392
389 if (gl->gl_demote_state == LM_ST_UNLOCKED && 393 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
390 gl->gl_state == LM_ST_SHARED && ip) { 394 gl->gl_state == LM_ST_SHARED && ip) {
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 97d54a28776a..9134dcb89479 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -40,37 +40,61 @@ struct gfs2_inum_range_host {
40 u64 ir_length; 40 u64 ir_length;
41}; 41};
42 42
43struct gfs2_skip_data {
44 u64 no_addr;
45 int skipped;
46 int non_block;
47};
48
43static int iget_test(struct inode *inode, void *opaque) 49static int iget_test(struct inode *inode, void *opaque)
44{ 50{
45 struct gfs2_inode *ip = GFS2_I(inode); 51 struct gfs2_inode *ip = GFS2_I(inode);
46 u64 *no_addr = opaque; 52 struct gfs2_skip_data *data = opaque;
47 53
48 if (ip->i_no_addr == *no_addr) 54 if (ip->i_no_addr == data->no_addr) {
55 if (data->non_block &&
56 inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
57 data->skipped = 1;
58 return 0;
59 }
49 return 1; 60 return 1;
50 61 }
51 return 0; 62 return 0;
52} 63}
53 64
54static int iget_set(struct inode *inode, void *opaque) 65static int iget_set(struct inode *inode, void *opaque)
55{ 66{
56 struct gfs2_inode *ip = GFS2_I(inode); 67 struct gfs2_inode *ip = GFS2_I(inode);
57 u64 *no_addr = opaque; 68 struct gfs2_skip_data *data = opaque;
58 69
59 inode->i_ino = (unsigned long)*no_addr; 70 if (data->skipped)
60 ip->i_no_addr = *no_addr; 71 return -ENOENT;
72 inode->i_ino = (unsigned long)(data->no_addr);
73 ip->i_no_addr = data->no_addr;
61 return 0; 74 return 0;
62} 75}
63 76
64struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr) 77struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
65{ 78{
66 unsigned long hash = (unsigned long)no_addr; 79 unsigned long hash = (unsigned long)no_addr;
67 return ilookup5(sb, hash, iget_test, &no_addr); 80 struct gfs2_skip_data data;
81
82 data.no_addr = no_addr;
83 data.skipped = 0;
84 data.non_block = 0;
85 return ilookup5(sb, hash, iget_test, &data);
68} 86}
69 87
70static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr) 88static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr,
89 int non_block)
71{ 90{
91 struct gfs2_skip_data data;
72 unsigned long hash = (unsigned long)no_addr; 92 unsigned long hash = (unsigned long)no_addr;
73 return iget5_locked(sb, hash, iget_test, iget_set, &no_addr); 93
94 data.no_addr = no_addr;
95 data.skipped = 0;
96 data.non_block = non_block;
97 return iget5_locked(sb, hash, iget_test, iget_set, &data);
74} 98}
75 99
76/** 100/**
@@ -111,19 +135,20 @@ static void gfs2_set_iop(struct inode *inode)
111 * @sb: The super block 135 * @sb: The super block
112 * @no_addr: The inode number 136 * @no_addr: The inode number
113 * @type: The type of the inode 137 * @type: The type of the inode
138 * non_block: Can we block on inodes that are being freed?
114 * 139 *
115 * Returns: A VFS inode, or an error 140 * Returns: A VFS inode, or an error
116 */ 141 */
117 142
118struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, 143struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
119 u64 no_addr, u64 no_formal_ino) 144 u64 no_addr, u64 no_formal_ino, int non_block)
120{ 145{
121 struct inode *inode; 146 struct inode *inode;
122 struct gfs2_inode *ip; 147 struct gfs2_inode *ip;
123 struct gfs2_glock *io_gl = NULL; 148 struct gfs2_glock *io_gl = NULL;
124 int error; 149 int error;
125 150
126 inode = gfs2_iget(sb, no_addr); 151 inode = gfs2_iget(sb, no_addr, non_block);
127 ip = GFS2_I(inode); 152 ip = GFS2_I(inode);
128 153
129 if (!inode) 154 if (!inode)
@@ -185,11 +210,12 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
185{ 210{
186 struct super_block *sb = sdp->sd_vfs; 211 struct super_block *sb = sdp->sd_vfs;
187 struct gfs2_holder i_gh; 212 struct gfs2_holder i_gh;
188 struct inode *inode; 213 struct inode *inode = NULL;
189 int error; 214 int error;
190 215
216 /* Must not read in block until block type is verified */
191 error = gfs2_glock_nq_num(sdp, no_addr, &gfs2_inode_glops, 217 error = gfs2_glock_nq_num(sdp, no_addr, &gfs2_inode_glops,
192 LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 218 LM_ST_EXCLUSIVE, GL_SKIP, &i_gh);
193 if (error) 219 if (error)
194 return ERR_PTR(error); 220 return ERR_PTR(error);
195 221
@@ -197,7 +223,7 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
197 if (error) 223 if (error)
198 goto fail; 224 goto fail;
199 225
200 inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0); 226 inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0, 1);
201 if (IS_ERR(inode)) 227 if (IS_ERR(inode))
202 goto fail; 228 goto fail;
203 229
@@ -843,7 +869,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
843 goto fail_gunlock2; 869 goto fail_gunlock2;
844 870
845 inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode), inum.no_addr, 871 inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode), inum.no_addr,
846 inum.no_formal_ino); 872 inum.no_formal_ino, 0);
847 if (IS_ERR(inode)) 873 if (IS_ERR(inode))
848 goto fail_gunlock2; 874 goto fail_gunlock2;
849 875
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 3e00a66e7cbd..099ca305e518 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -97,7 +97,8 @@ err:
97} 97}
98 98
99extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, 99extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
100 u64 no_addr, u64 no_formal_ino); 100 u64 no_addr, u64 no_formal_ino,
101 int non_block);
101extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr, 102extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
102 u64 *no_formal_ino, 103 u64 *no_formal_ino,
103 unsigned int blktype); 104 unsigned int blktype);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 42ef24355afb..d3c69eb91c74 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -430,7 +430,7 @@ static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
430 struct dentry *dentry; 430 struct dentry *dentry;
431 struct inode *inode; 431 struct inode *inode;
432 432
433 inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0); 433 inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0, 0);
434 if (IS_ERR(inode)) { 434 if (IS_ERR(inode)) {
435 fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode)); 435 fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
436 return PTR_ERR(inode); 436 return PTR_ERR(inode);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index cf930cd9664a..6fcae8469f6d 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -945,7 +945,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
945 /* rgblk_search can return a block < goal, so we need to 945 /* rgblk_search can return a block < goal, so we need to
946 keep it marching forward. */ 946 keep it marching forward. */
947 no_addr = block + rgd->rd_data0; 947 no_addr = block + rgd->rd_data0;
948 goal++; 948 goal = max(block + 1, goal + 1);
949 if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked) 949 if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked)
950 continue; 950 continue;
951 if (no_addr == skip) 951 if (no_addr == skip)
@@ -971,7 +971,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
971 found++; 971 found++;
972 972
973 /* Limit reclaim to sensible number of tasks */ 973 /* Limit reclaim to sensible number of tasks */
974 if (found > 2*NR_CPUS) 974 if (found > NR_CPUS)
975 return; 975 return;
976 } 976 }
977 977
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index a4e23d68a398..b9f28e66dad1 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1318,15 +1318,17 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1318 1318
1319static void gfs2_evict_inode(struct inode *inode) 1319static void gfs2_evict_inode(struct inode *inode)
1320{ 1320{
1321 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; 1321 struct super_block *sb = inode->i_sb;
1322 struct gfs2_sbd *sdp = sb->s_fs_info;
1322 struct gfs2_inode *ip = GFS2_I(inode); 1323 struct gfs2_inode *ip = GFS2_I(inode);
1323 struct gfs2_holder gh; 1324 struct gfs2_holder gh;
1324 int error; 1325 int error;
1325 1326
1326 if (inode->i_nlink) 1327 if (inode->i_nlink || (sb->s_flags & MS_RDONLY))
1327 goto out; 1328 goto out;
1328 1329
1329 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 1330 /* Must not read inode block until block type has been verified */
1331 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
1330 if (unlikely(error)) { 1332 if (unlikely(error)) {
1331 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 1333 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1332 goto out; 1334 goto out;
@@ -1336,6 +1338,12 @@ static void gfs2_evict_inode(struct inode *inode)
1336 if (error) 1338 if (error)
1337 goto out_truncate; 1339 goto out_truncate;
1338 1340
1341 if (test_bit(GIF_INVALID, &ip->i_flags)) {
1342 error = gfs2_inode_refresh(ip);
1343 if (error)
1344 goto out_truncate;
1345 }
1346
1339 ip->i_iopen_gh.gh_flags |= GL_NOCACHE; 1347 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1340 gfs2_glock_dq_wait(&ip->i_iopen_gh); 1348 gfs2_glock_dq_wait(&ip->i_iopen_gh);
1341 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh); 1349 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index cbbfd98ad4a3..2ad95fa1d130 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -388,20 +388,19 @@ struct request_queue
388#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 388#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
389#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 389#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
390#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 390#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
391#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 391#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */
392#define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */ 392#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
393#define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */ 393#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
394#define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */ 394#define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */
395#define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */ 395#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
396#define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */ 396#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
397#define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */ 397#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
398#define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */
399#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 398#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
400#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 399#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
401#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ 400#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
402#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ 401#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
403#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ 402#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
404#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ 403#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
405 404
406#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 405#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
407 (1 << QUEUE_FLAG_STACKABLE) | \ 406 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -699,6 +698,7 @@ extern void blk_sync_queue(struct request_queue *q);
699extern void __blk_stop_queue(struct request_queue *q); 698extern void __blk_stop_queue(struct request_queue *q);
700extern void __blk_run_queue(struct request_queue *q); 699extern void __blk_run_queue(struct request_queue *q);
701extern void blk_run_queue(struct request_queue *); 700extern void blk_run_queue(struct request_queue *);
701extern void blk_run_queue_async(struct request_queue *q);
702extern int blk_rq_map_user(struct request_queue *, struct request *, 702extern int blk_rq_map_user(struct request_queue *, struct request *,
703 struct rq_map_data *, void __user *, unsigned long, 703 struct rq_map_data *, void __user *, unsigned long,
704 gfp_t); 704 gfp_t);
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 369e19d3750b..7f1183dcd119 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -24,6 +24,7 @@
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <linux/poll.h> 25#include <linux/poll.h>
26#include <linux/posix-timers.h> 26#include <linux/posix-timers.h>
27#include <linux/rwsem.h>
27 28
28struct posix_clock; 29struct posix_clock;
29 30
@@ -104,7 +105,7 @@ struct posix_clock_operations {
104 * @ops: Functional interface to the clock 105 * @ops: Functional interface to the clock
105 * @cdev: Character device instance for this clock 106 * @cdev: Character device instance for this clock
106 * @kref: Reference count. 107 * @kref: Reference count.
107 * @mutex: Protects the 'zombie' field from concurrent access. 108 * @rwsem: Protects the 'zombie' field from concurrent access.
108 * @zombie: If 'zombie' is true, then the hardware has disappeared. 109 * @zombie: If 'zombie' is true, then the hardware has disappeared.
109 * @release: A function to free the structure when the reference count reaches 110 * @release: A function to free the structure when the reference count reaches
110 * zero. May be NULL if structure is statically allocated. 111 * zero. May be NULL if structure is statically allocated.
@@ -117,7 +118,7 @@ struct posix_clock {
117 struct posix_clock_operations ops; 118 struct posix_clock_operations ops;
118 struct cdev cdev; 119 struct cdev cdev;
119 struct kref kref; 120 struct kref kref;
120 struct mutex mutex; 121 struct rw_semaphore rwsem;
121 bool zombie; 122 bool zombie;
122 void (*release)(struct posix_clock *clk); 123 void (*release)(struct posix_clock *clk);
123}; 124};
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 3c7329b8ea0e..0e1855079fbb 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -103,8 +103,8 @@ struct driver_info {
103 * Indicates to usbnet, that USB driver accumulates multiple IP packets. 103 * Indicates to usbnet, that USB driver accumulates multiple IP packets.
104 * Affects statistic (counters) and short packet handling. 104 * Affects statistic (counters) and short packet handling.
105 */ 105 */
106#define FLAG_MULTI_PACKET 0x1000 106#define FLAG_MULTI_PACKET 0x2000
107#define FLAG_RX_ASSEMBLE 0x2000 /* rx packets may span >1 frames */ 107#define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */
108 108
109 /* init device ... can sleep, or cause probe() failure */ 109 /* init device ... can sleep, or cause probe() failure */
110 int (*bind)(struct usbnet *, struct usb_interface *); 110 int (*bind)(struct usbnet *, struct usb_interface *);
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index 25028dd4fa18..c340ca658f37 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -19,7 +19,6 @@
19 */ 19 */
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/file.h> 21#include <linux/file.h>
22#include <linux/mutex.h>
23#include <linux/posix-clock.h> 22#include <linux/posix-clock.h>
24#include <linux/slab.h> 23#include <linux/slab.h>
25#include <linux/syscalls.h> 24#include <linux/syscalls.h>
@@ -34,19 +33,19 @@ static struct posix_clock *get_posix_clock(struct file *fp)
34{ 33{
35 struct posix_clock *clk = fp->private_data; 34 struct posix_clock *clk = fp->private_data;
36 35
37 mutex_lock(&clk->mutex); 36 down_read(&clk->rwsem);
38 37
39 if (!clk->zombie) 38 if (!clk->zombie)
40 return clk; 39 return clk;
41 40
42 mutex_unlock(&clk->mutex); 41 up_read(&clk->rwsem);
43 42
44 return NULL; 43 return NULL;
45} 44}
46 45
47static void put_posix_clock(struct posix_clock *clk) 46static void put_posix_clock(struct posix_clock *clk)
48{ 47{
49 mutex_unlock(&clk->mutex); 48 up_read(&clk->rwsem);
50} 49}
51 50
52static ssize_t posix_clock_read(struct file *fp, char __user *buf, 51static ssize_t posix_clock_read(struct file *fp, char __user *buf,
@@ -156,7 +155,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
156 struct posix_clock *clk = 155 struct posix_clock *clk =
157 container_of(inode->i_cdev, struct posix_clock, cdev); 156 container_of(inode->i_cdev, struct posix_clock, cdev);
158 157
159 mutex_lock(&clk->mutex); 158 down_read(&clk->rwsem);
160 159
161 if (clk->zombie) { 160 if (clk->zombie) {
162 err = -ENODEV; 161 err = -ENODEV;
@@ -172,7 +171,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
172 fp->private_data = clk; 171 fp->private_data = clk;
173 } 172 }
174out: 173out:
175 mutex_unlock(&clk->mutex); 174 up_read(&clk->rwsem);
176 return err; 175 return err;
177} 176}
178 177
@@ -211,25 +210,20 @@ int posix_clock_register(struct posix_clock *clk, dev_t devid)
211 int err; 210 int err;
212 211
213 kref_init(&clk->kref); 212 kref_init(&clk->kref);
214 mutex_init(&clk->mutex); 213 init_rwsem(&clk->rwsem);
215 214
216 cdev_init(&clk->cdev, &posix_clock_file_operations); 215 cdev_init(&clk->cdev, &posix_clock_file_operations);
217 clk->cdev.owner = clk->ops.owner; 216 clk->cdev.owner = clk->ops.owner;
218 err = cdev_add(&clk->cdev, devid, 1); 217 err = cdev_add(&clk->cdev, devid, 1);
219 if (err)
220 goto no_cdev;
221 218
222 return err; 219 return err;
223no_cdev:
224 mutex_destroy(&clk->mutex);
225 return err;
226} 220}
227EXPORT_SYMBOL_GPL(posix_clock_register); 221EXPORT_SYMBOL_GPL(posix_clock_register);
228 222
229static void delete_clock(struct kref *kref) 223static void delete_clock(struct kref *kref)
230{ 224{
231 struct posix_clock *clk = container_of(kref, struct posix_clock, kref); 225 struct posix_clock *clk = container_of(kref, struct posix_clock, kref);
232 mutex_destroy(&clk->mutex); 226
233 if (clk->release) 227 if (clk->release)
234 clk->release(clk); 228 clk->release(clk);
235} 229}
@@ -238,9 +232,9 @@ void posix_clock_unregister(struct posix_clock *clk)
238{ 232{
239 cdev_del(&clk->cdev); 233 cdev_del(&clk->cdev);
240 234
241 mutex_lock(&clk->mutex); 235 down_write(&clk->rwsem);
242 clk->zombie = true; 236 clk->zombie = true;
243 mutex_unlock(&clk->mutex); 237 up_write(&clk->rwsem);
244 238
245 kref_put(&clk->kref, delete_clock); 239 kref_put(&clk->kref, delete_clock);
246} 240}
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 008ff6c4eecf..f3bc322c5891 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -249,11 +249,9 @@ static int br_parse_ip_options(struct sk_buff *skb)
249 goto drop; 249 goto drop;
250 } 250 }
251 251
252 /* Zero out the CB buffer if no options present */ 252 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
253 if (iph->ihl == 5) { 253 if (iph->ihl == 5)
254 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
255 return 0; 254 return 0;
256 }
257 255
258 opt->optlen = iph->ihl*4 - sizeof(struct iphdr); 256 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
259 if (ip_options_compile(dev_net(dev), opt, skb)) 257 if (ip_options_compile(dev_net(dev), opt, skb))
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 27dab26ad3b8..054fdb5aeb88 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -13,6 +13,7 @@
13#include <net/caif/cfsrvl.h> 13#include <net/caif/cfsrvl.h>
14#include <net/caif/cfpkt.h> 14#include <net/caif/cfpkt.h>
15 15
16
16#define container_obj(layr) ((struct cfsrvl *) layr) 17#define container_obj(layr) ((struct cfsrvl *) layr)
17 18
18#define DGM_CMD_BIT 0x80 19#define DGM_CMD_BIT 0x80
@@ -83,6 +84,7 @@ static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
83 84
84static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt) 85static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
85{ 86{
87 u8 packet_type;
86 u32 zero = 0; 88 u32 zero = 0;
87 struct caif_payload_info *info; 89 struct caif_payload_info *info;
88 struct cfsrvl *service = container_obj(layr); 90 struct cfsrvl *service = container_obj(layr);
@@ -94,7 +96,9 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
94 if (cfpkt_getlen(pkt) > DGM_MTU) 96 if (cfpkt_getlen(pkt) > DGM_MTU)
95 return -EMSGSIZE; 97 return -EMSGSIZE;
96 98
97 cfpkt_add_head(pkt, &zero, 4); 99 cfpkt_add_head(pkt, &zero, 3);
100 packet_type = 0x08; /* B9 set - UNCLASSIFIED */
101 cfpkt_add_head(pkt, &packet_type, 1);
98 102
99 /* Add info for MUX-layer to route the packet out. */ 103 /* Add info for MUX-layer to route the packet out. */
100 info = cfpkt_info(pkt); 104 info = cfpkt_info(pkt);
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 46f34b2e0478..24f1ffa74b06 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -244,9 +244,9 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
244 int phyid) 244 int phyid)
245{ 245{
246 struct cfmuxl *muxl = container_obj(layr); 246 struct cfmuxl *muxl = container_obj(layr);
247 struct list_head *node; 247 struct list_head *node, *next;
248 struct cflayer *layer; 248 struct cflayer *layer;
249 list_for_each(node, &muxl->srvl_list) { 249 list_for_each_safe(node, next, &muxl->srvl_list) {
250 layer = list_entry(node, struct cflayer, node); 250 layer = list_entry(node, struct cflayer, node);
251 if (cfsrvl_phyid_match(layer, phyid)) 251 if (cfsrvl_phyid_match(layer, phyid))
252 layer->ctrlcmd(layer, ctrl, phyid); 252 layer->ctrlcmd(layer, ctrl, phyid);
diff --git a/net/core/dev.c b/net/core/dev.c
index 956d3b006e8b..c2ac599fa0f6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5203,11 +5203,15 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
5203 } 5203 }
5204 5204
5205 /* TSO requires that SG is present as well. */ 5205 /* TSO requires that SG is present as well. */
5206 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) { 5206 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5207 netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n"); 5207 netdev_info(dev, "Dropping TSO features since no SG feature.\n");
5208 features &= ~NETIF_F_TSO; 5208 features &= ~NETIF_F_ALL_TSO;
5209 } 5209 }
5210 5210
5211 /* TSO ECN requires that TSO is present as well. */
5212 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5213 features &= ~NETIF_F_TSO_ECN;
5214
5211 /* Software GSO depends on SG. */ 5215 /* Software GSO depends on SG. */
5212 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 5216 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5213 netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 5217 netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index ce2d33582859..5761185f884e 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,5 +1,3 @@
1obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o 1obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
2ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o 2ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
3af_802154-y := af_ieee802154.o raw.o dgram.o 3af_802154-y := af_ieee802154.o raw.o dgram.o
4
5ccflags-y += -Wall -DDEBUG
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 6c0b7f4a3d7d..38f23e721b80 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
73 !sk2->sk_bound_dev_if || 73 !sk2->sk_bound_dev_if ||
74 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { 74 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
75 if (!reuse || !sk2->sk_reuse || 75 if (!reuse || !sk2->sk_reuse ||
76 ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) { 76 sk2->sk_state == TCP_LISTEN) {
77 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); 77 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
78 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || 78 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
79 sk2_rcv_saddr == sk_rcv_saddr(sk)) 79 sk2_rcv_saddr == sk_rcv_saddr(sk))
@@ -122,8 +122,7 @@ again:
122 (tb->num_owners < smallest_size || smallest_size == -1)) { 122 (tb->num_owners < smallest_size || smallest_size == -1)) {
123 smallest_size = tb->num_owners; 123 smallest_size = tb->num_owners;
124 smallest_rover = rover; 124 smallest_rover = rover;
125 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 && 125 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
126 !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
127 spin_unlock(&head->lock); 126 spin_unlock(&head->lock);
128 snum = smallest_rover; 127 snum = smallest_rover;
129 goto have_snum; 128 goto have_snum;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index dd1b20eca1a2..9df4e635fb5f 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -354,7 +354,8 @@ static void inetpeer_free_rcu(struct rcu_head *head)
354} 354}
355 355
356/* May be called with local BH enabled. */ 356/* May be called with local BH enabled. */
357static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) 357static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
358 struct inet_peer __rcu **stack[PEER_MAXDEPTH])
358{ 359{
359 int do_free; 360 int do_free;
360 361
@@ -368,7 +369,6 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
368 * We use refcnt=-1 to alert lockless readers this entry is deleted. 369 * We use refcnt=-1 to alert lockless readers this entry is deleted.
369 */ 370 */
370 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { 371 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
371 struct inet_peer __rcu **stack[PEER_MAXDEPTH];
372 struct inet_peer __rcu ***stackptr, ***delp; 372 struct inet_peer __rcu ***stackptr, ***delp;
373 if (lookup(&p->daddr, stack, base) != p) 373 if (lookup(&p->daddr, stack, base) != p)
374 BUG(); 374 BUG();
@@ -422,7 +422,7 @@ static struct inet_peer_base *peer_to_base(struct inet_peer *p)
422} 422}
423 423
424/* May be called with local BH enabled. */ 424/* May be called with local BH enabled. */
425static int cleanup_once(unsigned long ttl) 425static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH])
426{ 426{
427 struct inet_peer *p = NULL; 427 struct inet_peer *p = NULL;
428 428
@@ -454,7 +454,7 @@ static int cleanup_once(unsigned long ttl)
454 * happen because of entry limits in route cache. */ 454 * happen because of entry limits in route cache. */
455 return -1; 455 return -1;
456 456
457 unlink_from_pool(p, peer_to_base(p)); 457 unlink_from_pool(p, peer_to_base(p), stack);
458 return 0; 458 return 0;
459} 459}
460 460
@@ -524,7 +524,7 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
524 524
525 if (base->total >= inet_peer_threshold) 525 if (base->total >= inet_peer_threshold)
526 /* Remove one less-recently-used entry. */ 526 /* Remove one less-recently-used entry. */
527 cleanup_once(0); 527 cleanup_once(0, stack);
528 528
529 return p; 529 return p;
530} 530}
@@ -540,6 +540,7 @@ static void peer_check_expire(unsigned long dummy)
540{ 540{
541 unsigned long now = jiffies; 541 unsigned long now = jiffies;
542 int ttl, total; 542 int ttl, total;
543 struct inet_peer __rcu **stack[PEER_MAXDEPTH];
543 544
544 total = compute_total(); 545 total = compute_total();
545 if (total >= inet_peer_threshold) 546 if (total >= inet_peer_threshold)
@@ -548,7 +549,7 @@ static void peer_check_expire(unsigned long dummy)
548 ttl = inet_peer_maxttl 549 ttl = inet_peer_maxttl
549 - (inet_peer_maxttl - inet_peer_minttl) / HZ * 550 - (inet_peer_maxttl - inet_peer_minttl) / HZ *
550 total / inet_peer_threshold * HZ; 551 total / inet_peer_threshold * HZ;
551 while (!cleanup_once(ttl)) { 552 while (!cleanup_once(ttl, stack)) {
552 if (jiffies != now) 553 if (jiffies != now)
553 break; 554 break;
554 } 555 }
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 28a736f3442f..2391b24e8251 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -329,7 +329,7 @@ int ip_options_compile(struct net *net,
329 pp_ptr = optptr + 2; 329 pp_ptr = optptr + 2;
330 goto error; 330 goto error;
331 } 331 }
332 if (skb) { 332 if (rt) {
333 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); 333 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
334 opt->is_changed = 1; 334 opt->is_changed = 1;
335 } 335 }
@@ -371,7 +371,7 @@ int ip_options_compile(struct net *net,
371 goto error; 371 goto error;
372 } 372 }
373 opt->ts = optptr - iph; 373 opt->ts = optptr - iph;
374 if (skb) { 374 if (rt) {
375 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); 375 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
376 timeptr = (__be32*)&optptr[optptr[2]+3]; 376 timeptr = (__be32*)&optptr[optptr[2]+3];
377 } 377 }
@@ -603,7 +603,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
603 unsigned long orefdst; 603 unsigned long orefdst;
604 int err; 604 int err;
605 605
606 if (!opt->srr) 606 if (!opt->srr || !rt)
607 return 0; 607 return 0;
608 608
609 if (skb->pkt_type != PACKET_HOST) 609 if (skb->pkt_type != PACKET_HOST)
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1a456652086b..321e6e84dbcc 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -311,7 +311,6 @@ static struct ctl_table ipv4_table[] = {
311 .mode = 0644, 311 .mode = 0644,
312 .proc_handler = proc_do_large_bitmap, 312 .proc_handler = proc_do_large_bitmap,
313 }, 313 },
314#ifdef CONFIG_IP_MULTICAST
315 { 314 {
316 .procname = "igmp_max_memberships", 315 .procname = "igmp_max_memberships",
317 .data = &sysctl_igmp_max_memberships, 316 .data = &sysctl_igmp_max_memberships,
@@ -319,8 +318,6 @@ static struct ctl_table ipv4_table[] = {
319 .mode = 0644, 318 .mode = 0644,
320 .proc_handler = proc_dointvec 319 .proc_handler = proc_dointvec
321 }, 320 },
322
323#endif
324 { 321 {
325 .procname = "igmp_max_msf", 322 .procname = "igmp_max_msf",
326 .data = &sysctl_igmp_max_msf, 323 .data = &sysctl_igmp_max_msf,
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 166054650466..f2c5b0fc0f21 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
44 !sk2->sk_bound_dev_if || 44 !sk2->sk_bound_dev_if ||
45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && 45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
46 (!sk->sk_reuse || !sk2->sk_reuse || 46 (!sk->sk_reuse || !sk2->sk_reuse ||
47 ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) && 47 sk2->sk_state == TCP_LISTEN) &&
48 ipv6_rcv_saddr_equal(sk, sk2)) 48 ipv6_rcv_saddr_equal(sk, sk2))
49 break; 49 break;
50 } 50 }
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index c9890e25cd4c..cc616974a447 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1297,8 +1297,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
1297 /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ 1297 /* Note : socket.c set MSG_EOR on SEQPACKET sockets */
1298 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | 1298 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT |
1299 MSG_NOSIGNAL)) { 1299 MSG_NOSIGNAL)) {
1300 err = -EINVAL; 1300 return -EINVAL;
1301 goto out;
1302 } 1301 }
1303 1302
1304 lock_sock(sk); 1303 lock_sock(sk);
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index 058f1e9a9128..903242111317 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -121,8 +121,7 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
121 s32 data_size = ntohs(pdulen) - llc_len; 121 s32 data_size = ntohs(pdulen) - llc_len;
122 122
123 if (data_size < 0 || 123 if (data_size < 0 ||
124 ((skb_tail_pointer(skb) - 124 !pskb_may_pull(skb, data_size))
125 (u8 *)pdu) - llc_len) < data_size)
126 return 0; 125 return 0;
127 if (unlikely(pskb_trim_rcsum(skb, data_size))) 126 if (unlikely(pskb_trim_rcsum(skb, data_size)))
128 return 0; 127 return 0;
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 00a33242e90c..a274300b6a56 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -343,6 +343,10 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
343 ipset_adtfn adtfn = set->variant->adt[adt]; 343 ipset_adtfn adtfn = set->variant->adt[adt];
344 struct ipmac data; 344 struct ipmac data;
345 345
346 /* MAC can be src only */
347 if (!(flags & IPSET_DIM_TWO_SRC))
348 return 0;
349
346 data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC)); 350 data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
347 if (data.id < map->first_ip || data.id > map->last_ip) 351 if (data.id < map->first_ip || data.id > map->last_ip)
348 return -IPSET_ERR_BITMAP_RANGE; 352 return -IPSET_ERR_BITMAP_RANGE;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 9152e69a162d..72d1ac611fdc 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1022,8 +1022,9 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
1022 if (cb->args[1] >= ip_set_max) 1022 if (cb->args[1] >= ip_set_max)
1023 goto out; 1023 goto out;
1024 1024
1025 pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
1026 max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max; 1025 max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
1026dump_last:
1027 pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
1027 for (; cb->args[1] < max; cb->args[1]++) { 1028 for (; cb->args[1] < max; cb->args[1]++) {
1028 index = (ip_set_id_t) cb->args[1]; 1029 index = (ip_set_id_t) cb->args[1];
1029 set = ip_set_list[index]; 1030 set = ip_set_list[index];
@@ -1038,8 +1039,8 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
1038 * so that lists (unions of sets) are dumped last. 1039 * so that lists (unions of sets) are dumped last.
1039 */ 1040 */
1040 if (cb->args[0] != DUMP_ONE && 1041 if (cb->args[0] != DUMP_ONE &&
1041 !((cb->args[0] == DUMP_ALL) ^ 1042 ((cb->args[0] == DUMP_ALL) ==
1042 (set->type->features & IPSET_DUMP_LAST))) 1043 !!(set->type->features & IPSET_DUMP_LAST)))
1043 continue; 1044 continue;
1044 pr_debug("List set: %s\n", set->name); 1045 pr_debug("List set: %s\n", set->name);
1045 if (!cb->args[2]) { 1046 if (!cb->args[2]) {
@@ -1083,6 +1084,12 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
1083 goto release_refcount; 1084 goto release_refcount;
1084 } 1085 }
1085 } 1086 }
1087 /* If we dump all sets, continue with dumping last ones */
1088 if (cb->args[0] == DUMP_ALL) {
1089 cb->args[0] = DUMP_LAST;
1090 cb->args[1] = 0;
1091 goto dump_last;
1092 }
1086 goto out; 1093 goto out;
1087 1094
1088nla_put_failure: 1095nla_put_failure:
@@ -1093,11 +1100,6 @@ release_refcount:
1093 pr_debug("release set %s\n", ip_set_list[index]->name); 1100 pr_debug("release set %s\n", ip_set_list[index]->name);
1094 ip_set_put_byindex(index); 1101 ip_set_put_byindex(index);
1095 } 1102 }
1096
1097 /* If we dump all sets, continue with dumping last ones */
1098 if (cb->args[0] == DUMP_ALL && cb->args[1] >= max && !cb->args[2])
1099 cb->args[0] = DUMP_LAST;
1100
1101out: 1103out:
1102 if (nlh) { 1104 if (nlh) {
1103 nlmsg_end(skb, nlh); 1105 nlmsg_end(skb, nlh);
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 061d48cec137..b3babaed7719 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -81,6 +81,7 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par)
81 if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) { 81 if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
82 pr_warning("Protocol error: set match dimension " 82 pr_warning("Protocol error: set match dimension "
83 "is over the limit!\n"); 83 "is over the limit!\n");
84 ip_set_nfnl_put(info->match_set.index);
84 return -ERANGE; 85 return -ERANGE;
85 } 86 }
86 87
@@ -135,6 +136,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
135 if (index == IPSET_INVALID_ID) { 136 if (index == IPSET_INVALID_ID) {
136 pr_warning("Cannot find del_set index %u as target\n", 137 pr_warning("Cannot find del_set index %u as target\n",
137 info->del_set.index); 138 info->del_set.index);
139 if (info->add_set.index != IPSET_INVALID_ID)
140 ip_set_nfnl_put(info->add_set.index);
138 return -ENOENT; 141 return -ENOENT;
139 } 142 }
140 } 143 }
@@ -142,6 +145,10 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
142 info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) { 145 info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
143 pr_warning("Protocol error: SET target dimension " 146 pr_warning("Protocol error: SET target dimension "
144 "is over the limit!\n"); 147 "is over the limit!\n");
148 if (info->add_set.index != IPSET_INVALID_ID)
149 ip_set_nfnl_put(info->add_set.index);
150 if (info->del_set.index != IPSET_INVALID_ID)
151 ip_set_nfnl_put(info->del_set.index);
145 return -ERANGE; 152 return -ERANGE;
146 } 153 }
147 154
@@ -192,6 +199,7 @@ set_match_checkentry(const struct xt_mtchk_param *par)
192 if (info->match_set.dim > IPSET_DIM_MAX) { 199 if (info->match_set.dim > IPSET_DIM_MAX) {
193 pr_warning("Protocol error: set match dimension " 200 pr_warning("Protocol error: set match dimension "
194 "is over the limit!\n"); 201 "is over the limit!\n");
202 ip_set_nfnl_put(info->match_set.index);
195 return -ERANGE; 203 return -ERANGE;
196 } 204 }
197 205
@@ -219,7 +227,7 @@ set_target(struct sk_buff *skb, const struct xt_action_param *par)
219 if (info->del_set.index != IPSET_INVALID_ID) 227 if (info->del_set.index != IPSET_INVALID_ID)
220 ip_set_del(info->del_set.index, 228 ip_set_del(info->del_set.index,
221 skb, par->family, 229 skb, par->family,
222 info->add_set.dim, 230 info->del_set.dim,
223 info->del_set.flags); 231 info->del_set.flags);
224 232
225 return XT_CONTINUE; 233 return XT_CONTINUE;
@@ -245,13 +253,19 @@ set_target_checkentry(const struct xt_tgchk_param *par)
245 if (index == IPSET_INVALID_ID) { 253 if (index == IPSET_INVALID_ID) {
246 pr_warning("Cannot find del_set index %u as target\n", 254 pr_warning("Cannot find del_set index %u as target\n",
247 info->del_set.index); 255 info->del_set.index);
256 if (info->add_set.index != IPSET_INVALID_ID)
257 ip_set_nfnl_put(info->add_set.index);
248 return -ENOENT; 258 return -ENOENT;
249 } 259 }
250 } 260 }
251 if (info->add_set.dim > IPSET_DIM_MAX || 261 if (info->add_set.dim > IPSET_DIM_MAX ||
252 info->del_set.flags > IPSET_DIM_MAX) { 262 info->del_set.dim > IPSET_DIM_MAX) {
253 pr_warning("Protocol error: SET target dimension " 263 pr_warning("Protocol error: SET target dimension "
254 "is over the limit!\n"); 264 "is over the limit!\n");
265 if (info->add_set.index != IPSET_INVALID_ID)
266 ip_set_nfnl_put(info->add_set.index);
267 if (info->del_set.index != IPSET_INVALID_ID)
268 ip_set_nfnl_put(info->del_set.index);
255 return -ERANGE; 269 return -ERANGE;
256 } 270 }
257 271
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 0698cad61763..1a21c571aa03 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -569,6 +569,8 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
569 sctp_assoc_set_primary(asoc, transport); 569 sctp_assoc_set_primary(asoc, transport);
570 if (asoc->peer.active_path == peer) 570 if (asoc->peer.active_path == peer)
571 asoc->peer.active_path = transport; 571 asoc->peer.active_path = transport;
572 if (asoc->peer.retran_path == peer)
573 asoc->peer.retran_path = transport;
572 if (asoc->peer.last_data_from == peer) 574 if (asoc->peer.last_data_from == peer)
573 asoc->peer.last_data_from = transport; 575 asoc->peer.last_data_from = transport;
574 576
@@ -1323,6 +1325,8 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1323 1325
1324 if (t) 1326 if (t)
1325 asoc->peer.retran_path = t; 1327 asoc->peer.retran_path = t;
1328 else
1329 t = asoc->peer.retran_path;
1326 1330
1327 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" 1331 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
1328 " %p addr: ", 1332 " %p addr: ",
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 17d1dcb3c667..416538248a4b 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -163,6 +163,7 @@ static void config_attr(struct perf_evsel *evsel, struct perf_evlist *evlist)
163 struct perf_event_attr *attr = &evsel->attr; 163 struct perf_event_attr *attr = &evsel->attr;
164 int track = !evsel->idx; /* only the first counter needs these */ 164 int track = !evsel->idx; /* only the first counter needs these */
165 165
166 attr->inherit = !no_inherit;
166 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 167 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
167 PERF_FORMAT_TOTAL_TIME_RUNNING | 168 PERF_FORMAT_TOTAL_TIME_RUNNING |
168 PERF_FORMAT_ID; 169 PERF_FORMAT_ID;
@@ -251,6 +252,9 @@ static void open_counters(struct perf_evlist *evlist)
251{ 252{
252 struct perf_evsel *pos; 253 struct perf_evsel *pos;
253 254
255 if (evlist->cpus->map[0] < 0)
256 no_inherit = true;
257
254 list_for_each_entry(pos, &evlist->entries, node) { 258 list_for_each_entry(pos, &evlist->entries, node) {
255 struct perf_event_attr *attr = &pos->attr; 259 struct perf_event_attr *attr = &pos->attr;
256 /* 260 /*
@@ -271,8 +275,7 @@ static void open_counters(struct perf_evlist *evlist)
271retry_sample_id: 275retry_sample_id:
272 attr->sample_id_all = sample_id_all_avail ? 1 : 0; 276 attr->sample_id_all = sample_id_all_avail ? 1 : 0;
273try_again: 277try_again:
274 if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group, 278 if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group) < 0) {
275 !no_inherit) < 0) {
276 int err = errno; 279 int err = errno;
277 280
278 if (err == EPERM || err == EACCES) { 281 if (err == EPERM || err == EACCES) {
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index e2109f9b43eb..03f0e45f1479 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -167,16 +167,17 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
167 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 167 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
168 PERF_FORMAT_TOTAL_TIME_RUNNING; 168 PERF_FORMAT_TOTAL_TIME_RUNNING;
169 169
170 attr->inherit = !no_inherit;
171
170 if (system_wide) 172 if (system_wide)
171 return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false, false); 173 return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false);
172 174
173 attr->inherit = !no_inherit;
174 if (target_pid == -1 && target_tid == -1) { 175 if (target_pid == -1 && target_tid == -1) {
175 attr->disabled = 1; 176 attr->disabled = 1;
176 attr->enable_on_exec = 1; 177 attr->enable_on_exec = 1;
177 } 178 }
178 179
179 return perf_evsel__open_per_thread(evsel, evsel_list->threads, false, false); 180 return perf_evsel__open_per_thread(evsel, evsel_list->threads, false);
180} 181}
181 182
182/* 183/*
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 1b2106c58f66..11e3c8458362 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -290,7 +290,7 @@ static int test__open_syscall_event(void)
290 goto out_thread_map_delete; 290 goto out_thread_map_delete;
291 } 291 }
292 292
293 if (perf_evsel__open_per_thread(evsel, threads, false, false) < 0) { 293 if (perf_evsel__open_per_thread(evsel, threads, false) < 0) {
294 pr_debug("failed to open counter: %s, " 294 pr_debug("failed to open counter: %s, "
295 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 295 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
296 strerror(errno)); 296 strerror(errno));
@@ -303,7 +303,7 @@ static int test__open_syscall_event(void)
303 } 303 }
304 304
305 if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) { 305 if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
306 pr_debug("perf_evsel__open_read_on_cpu\n"); 306 pr_debug("perf_evsel__read_on_cpu\n");
307 goto out_close_fd; 307 goto out_close_fd;
308 } 308 }
309 309
@@ -365,7 +365,7 @@ static int test__open_syscall_event_on_all_cpus(void)
365 goto out_thread_map_delete; 365 goto out_thread_map_delete;
366 } 366 }
367 367
368 if (perf_evsel__open(evsel, cpus, threads, false, false) < 0) { 368 if (perf_evsel__open(evsel, cpus, threads, false) < 0) {
369 pr_debug("failed to open counter: %s, " 369 pr_debug("failed to open counter: %s, "
370 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 370 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
371 strerror(errno)); 371 strerror(errno));
@@ -418,7 +418,7 @@ static int test__open_syscall_event_on_all_cpus(void)
418 continue; 418 continue;
419 419
420 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { 420 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
421 pr_debug("perf_evsel__open_read_on_cpu\n"); 421 pr_debug("perf_evsel__read_on_cpu\n");
422 err = -1; 422 err = -1;
423 break; 423 break;
424 } 424 }
@@ -529,7 +529,7 @@ static int test__basic_mmap(void)
529 529
530 perf_evlist__add(evlist, evsels[i]); 530 perf_evlist__add(evlist, evsels[i]);
531 531
532 if (perf_evsel__open(evsels[i], cpus, threads, false, false) < 0) { 532 if (perf_evsel__open(evsels[i], cpus, threads, false) < 0) {
533 pr_debug("failed to open counter: %s, " 533 pr_debug("failed to open counter: %s, "
534 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 534 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
535 strerror(errno)); 535 strerror(errno));
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index fc1273e976c5..7e3d6e310bf8 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -845,9 +845,10 @@ static void start_counters(struct perf_evlist *evlist)
845 } 845 }
846 846
847 attr->mmap = 1; 847 attr->mmap = 1;
848 attr->inherit = inherit;
848try_again: 849try_again:
849 if (perf_evsel__open(counter, top.evlist->cpus, 850 if (perf_evsel__open(counter, top.evlist->cpus,
850 top.evlist->threads, group, inherit) < 0) { 851 top.evlist->threads, group) < 0) {
851 int err = errno; 852 int err = errno;
852 853
853 if (err == EPERM || err == EACCES) { 854 if (err == EPERM || err == EACCES) {
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index d852cefa20de..45da8d186b49 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -12,6 +12,7 @@
12#include "evlist.h" 12#include "evlist.h"
13#include "evsel.h" 13#include "evsel.h"
14#include "util.h" 14#include "util.h"
15#include "debug.h"
15 16
16#include <sys/mman.h> 17#include <sys/mman.h>
17 18
@@ -250,15 +251,19 @@ int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
250 return evlist->mmap != NULL ? 0 : -ENOMEM; 251 return evlist->mmap != NULL ? 0 : -ENOMEM;
251} 252}
252 253
253static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot, 254static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel,
254 int mask, int fd) 255 int cpu, int prot, int mask, int fd)
255{ 256{
256 evlist->mmap[cpu].prev = 0; 257 evlist->mmap[cpu].prev = 0;
257 evlist->mmap[cpu].mask = mask; 258 evlist->mmap[cpu].mask = mask;
258 evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot, 259 evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot,
259 MAP_SHARED, fd, 0); 260 MAP_SHARED, fd, 0);
260 if (evlist->mmap[cpu].base == MAP_FAILED) 261 if (evlist->mmap[cpu].base == MAP_FAILED) {
262 if (evlist->cpus->map[cpu] == -1 && evsel->attr.inherit)
263 ui__warning("Inherit is not allowed on per-task "
264 "events using mmap.\n");
261 return -1; 265 return -1;
266 }
262 267
263 perf_evlist__add_pollfd(evlist, fd); 268 perf_evlist__add_pollfd(evlist, fd);
264 return 0; 269 return 0;
@@ -312,7 +317,8 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
312 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, 317 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
313 FD(first_evsel, cpu, 0)) != 0) 318 FD(first_evsel, cpu, 0)) != 0)
314 goto out_unmap; 319 goto out_unmap;
315 } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0) 320 } else if (__perf_evlist__mmap(evlist, evsel, cpu,
321 prot, mask, fd) < 0)
316 goto out_unmap; 322 goto out_unmap;
317 323
318 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 324 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 662596afd7f1..d6fd59beb860 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -175,7 +175,7 @@ int __perf_evsel__read(struct perf_evsel *evsel,
175} 175}
176 176
177static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 177static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
178 struct thread_map *threads, bool group, bool inherit) 178 struct thread_map *threads, bool group)
179{ 179{
180 int cpu, thread; 180 int cpu, thread;
181 unsigned long flags = 0; 181 unsigned long flags = 0;
@@ -192,19 +192,6 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
192 192
193 for (cpu = 0; cpu < cpus->nr; cpu++) { 193 for (cpu = 0; cpu < cpus->nr; cpu++) {
194 int group_fd = -1; 194 int group_fd = -1;
195 /*
196 * Don't allow mmap() of inherited per-task counters. This
197 * would create a performance issue due to all children writing
198 * to the same buffer.
199 *
200 * FIXME:
201 * Proper fix is not to pass 'inherit' to perf_evsel__open*,
202 * but a 'flags' parameter, with 'group' folded there as well,
203 * then introduce a PERF_O_{MMAP,GROUP,INHERIT} enum, and if
204 * O_MMAP is set, emit a warning if cpu < 0 and O_INHERIT is
205 * set. Lets go for the minimal fix first tho.
206 */
207 evsel->attr.inherit = (cpus->map[cpu] >= 0) && inherit;
208 195
209 for (thread = 0; thread < threads->nr; thread++) { 196 for (thread = 0; thread < threads->nr; thread++) {
210 197
@@ -253,7 +240,7 @@ static struct {
253}; 240};
254 241
255int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 242int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
256 struct thread_map *threads, bool group, bool inherit) 243 struct thread_map *threads, bool group)
257{ 244{
258 if (cpus == NULL) { 245 if (cpus == NULL) {
259 /* Work around old compiler warnings about strict aliasing */ 246 /* Work around old compiler warnings about strict aliasing */
@@ -263,19 +250,19 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
263 if (threads == NULL) 250 if (threads == NULL)
264 threads = &empty_thread_map.map; 251 threads = &empty_thread_map.map;
265 252
266 return __perf_evsel__open(evsel, cpus, threads, group, inherit); 253 return __perf_evsel__open(evsel, cpus, threads, group);
267} 254}
268 255
269int perf_evsel__open_per_cpu(struct perf_evsel *evsel, 256int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
270 struct cpu_map *cpus, bool group, bool inherit) 257 struct cpu_map *cpus, bool group)
271{ 258{
272 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit); 259 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group);
273} 260}
274 261
275int perf_evsel__open_per_thread(struct perf_evsel *evsel, 262int perf_evsel__open_per_thread(struct perf_evsel *evsel,
276 struct thread_map *threads, bool group, bool inherit) 263 struct thread_map *threads, bool group)
277{ 264{
278 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit); 265 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group);
279} 266}
280 267
281static int perf_event__parse_id_sample(const union perf_event *event, u64 type, 268static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 6710ab538342..f79bb2c09a6c 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -81,11 +81,11 @@ void perf_evsel__free_id(struct perf_evsel *evsel);
81void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 81void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
82 82
83int perf_evsel__open_per_cpu(struct perf_evsel *evsel, 83int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
84 struct cpu_map *cpus, bool group, bool inherit); 84 struct cpu_map *cpus, bool group);
85int perf_evsel__open_per_thread(struct perf_evsel *evsel, 85int perf_evsel__open_per_thread(struct perf_evsel *evsel,
86 struct thread_map *threads, bool group, bool inherit); 86 struct thread_map *threads, bool group);
87int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 87int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
88 struct thread_map *threads, bool group, bool inherit); 88 struct thread_map *threads, bool group);
89 89
90#define perf_evsel__match(evsel, t, c) \ 90#define perf_evsel__match(evsel, t, c) \
91 (evsel->attr.type == PERF_TYPE_##t && \ 91 (evsel->attr.type == PERF_TYPE_##t && \
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index a9f2d7e1204d..f5e38451fdc5 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -498,11 +498,11 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
498 struct cpu_map *cpus = NULL; 498 struct cpu_map *cpus = NULL;
499 struct thread_map *threads = NULL; 499 struct thread_map *threads = NULL;
500 PyObject *pcpus = NULL, *pthreads = NULL; 500 PyObject *pcpus = NULL, *pthreads = NULL;
501 int group = 0, overwrite = 0; 501 int group = 0, inherit = 0;
502 static char *kwlist[] = {"cpus", "threads", "group", "overwrite", NULL, NULL}; 502 static char *kwlist[] = {"cpus", "threads", "group", "inherit", NULL, NULL};
503 503
504 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, 504 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
505 &pcpus, &pthreads, &group, &overwrite)) 505 &pcpus, &pthreads, &group, &inherit))
506 return NULL; 506 return NULL;
507 507
508 if (pthreads != NULL) 508 if (pthreads != NULL)
@@ -511,7 +511,8 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
511 if (pcpus != NULL) 511 if (pcpus != NULL)
512 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; 512 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
513 513
514 if (perf_evsel__open(evsel, cpus, threads, group, overwrite) < 0) { 514 evsel->attr.inherit = inherit;
515 if (perf_evsel__open(evsel, cpus, threads, group) < 0) {
515 PyErr_SetFromErrno(PyExc_OSError); 516 PyErr_SetFromErrno(PyExc_OSError);
516 return NULL; 517 return NULL;
517 } 518 }
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c
index 8c17a8730e4a..15633d608133 100644
--- a/tools/perf/util/ui/browsers/annotate.c
+++ b/tools/perf/util/ui/browsers/annotate.c
@@ -256,10 +256,9 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
256 int refresh) 256 int refresh)
257{ 257{
258 struct objdump_line *pos, *n; 258 struct objdump_line *pos, *n;
259 struct annotation *notes = symbol__annotation(sym); 259 struct annotation *notes;
260 struct annotate_browser browser = { 260 struct annotate_browser browser = {
261 .b = { 261 .b = {
262 .entries = &notes->src->source,
263 .refresh = ui_browser__list_head_refresh, 262 .refresh = ui_browser__list_head_refresh,
264 .seek = ui_browser__list_head_seek, 263 .seek = ui_browser__list_head_seek,
265 .write = annotate_browser__write, 264 .write = annotate_browser__write,
@@ -281,6 +280,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
281 280
282 ui_helpline__push("Press <- or ESC to exit"); 281 ui_helpline__push("Press <- or ESC to exit");
283 282
283 notes = symbol__annotation(sym);
284
284 list_for_each_entry(pos, &notes->src->source, node) { 285 list_for_each_entry(pos, &notes->src->source, node) {
285 struct objdump_line_rb_node *rbpos; 286 struct objdump_line_rb_node *rbpos;
286 size_t line_len = strlen(pos->line); 287 size_t line_len = strlen(pos->line);
@@ -291,6 +292,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
291 rbpos->idx = browser.b.nr_entries++; 292 rbpos->idx = browser.b.nr_entries++;
292 } 293 }
293 294
295 browser.b.entries = &notes->src->source,
294 browser.b.width += 18; /* Percentage */ 296 browser.b.width += 18; /* Percentage */
295 ret = annotate_browser__run(&browser, evidx, refresh); 297 ret = annotate_browser__run(&browser, evidx, refresh);
296 list_for_each_entry_safe(pos, n, &notes->src->source, node) { 298 list_for_each_entry_safe(pos, n, &notes->src->source, node) {
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index 798efdca3ead..5d767c622dfc 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -851,7 +851,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel,
851 goto out_free_stack; 851 goto out_free_stack;
852 case 'a': 852 case 'a':
853 if (browser->selection == NULL || 853 if (browser->selection == NULL ||
854 browser->selection->map == NULL || 854 browser->selection->sym == NULL ||
855 browser->selection->map->dso->annotate_warned) 855 browser->selection->map->dso->annotate_warned)
856 continue; 856 continue;
857 goto do_annotate; 857 goto do_annotate;