aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/page.h7
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h151
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c9
-rw-r--r--arch/x86/kernel/mpparse.c16
-rw-r--r--arch/x86/kernel/tlb_uv.c756
-rw-r--r--arch/x86/lib/atomic64_386_32.S238
-rw-r--r--arch/x86/mm/fault.c4
-rw-r--r--arch/x86/oprofile/nmi_int.c1
-rw-r--r--drivers/isdn/hardware/avm/c4.c1
-rw-r--r--drivers/isdn/hardware/avm/t1pci.c1
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c5
-rw-r--r--drivers/media/IR/Kconfig9
-rw-r--r--drivers/media/dvb/dm1105/Kconfig2
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig2
-rw-r--r--drivers/media/dvb/siano/Kconfig2
-rw-r--r--drivers/media/dvb/ttpci/Kconfig2
-rw-r--r--drivers/media/video/bt8xx/Kconfig2
-rw-r--r--drivers/media/video/cx18/Kconfig2
-rw-r--r--drivers/media/video/cx231xx/Kconfig2
-rw-r--r--drivers/media/video/cx23885/Kconfig2
-rw-r--r--drivers/media/video/cx88/Kconfig2
-rw-r--r--drivers/media/video/em28xx/Kconfig2
-rw-r--r--drivers/media/video/ivtv/Kconfig2
-rw-r--r--drivers/media/video/saa7134/Kconfig2
-rw-r--r--drivers/media/video/tlg2300/Kconfig2
-rw-r--r--drivers/media/video/v4l2-ctrls.c1
-rw-r--r--drivers/net/caif/caif_spi_slave.c4
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c72
-rw-r--r--drivers/net/usb/usbnet.c22
-rw-r--r--drivers/net/wan/farsync.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c85
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c5
-rw-r--r--drivers/net/wireless/libertas/cfg.c1
-rw-r--r--drivers/net/wireless/p54/p54pci.c2
-rw-r--r--drivers/s390/net/claw.c118
-rw-r--r--drivers/s390/net/claw.h4
-rw-r--r--drivers/s390/net/ctcm_fsms.c60
-rw-r--r--drivers/s390/net/ctcm_main.c80
-rw-r--r--drivers/s390/net/ctcm_main.h4
-rw-r--r--drivers/s390/net/ctcm_mpc.c64
-rw-r--r--drivers/s390/net/ctcm_sysfs.c20
-rw-r--r--fs/afs/cell.c56
-rw-r--r--fs/afs/dir.c47
-rw-r--r--fs/afs/inode.c86
-rw-r--r--fs/afs/internal.h11
-rw-r--r--fs/afs/mntpt.c78
-rw-r--r--fs/afs/proc.c2
-rw-r--r--fs/afs/super.c20
-rw-r--r--fs/cifs/README10
-rw-r--r--fs/nfs/Kconfig17
-rw-r--r--fs/nfs/dns_resolve.c24
-rw-r--r--fs/nfs/dns_resolve.h12
-rw-r--r--fs/ocfs2/acl.c33
-rw-r--r--fs/ocfs2/cluster/tcp.c17
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c9
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c22
-rw-r--r--fs/ocfs2/dlm/dlmthread.c114
-rw-r--r--fs/ocfs2/refcounttree.c20
-rw-r--r--include/linux/etherdevice.h2
-rw-r--r--include/linux/netpoll.h2
-rw-r--r--include/net/bluetooth/l2cap.h4
-rw-r--r--include/net/sock.h4
-rw-r--r--mm/memory.c4
-rw-r--r--net/bluetooth/l2cap.c11
-rw-r--r--net/caif/cfpkt_skbuff.c2
-rw-r--r--net/can/bcm.c41
-rw-r--r--net/dns_resolver/dns_key.c92
-rw-r--r--net/dns_resolver/dns_query.c5
-rw-r--r--net/dsa/Kconfig2
-rw-r--r--net/sched/sch_api.c22
-rw-r--r--net/sched/sch_atm.c4
-rw-r--r--net/sched/sch_sfq.c14
-rw-r--r--net/sched/sch_tbf.c4
-rw-r--r--net/sched/sch_teql.c2
-rw-r--r--net/wireless/mlme.c8
-rw-r--r--tools/perf/Makefile31
-rw-r--r--tools/perf/builtin-annotate.c2
-rw-r--r--tools/perf/builtin-report.c31
-rw-r--r--tools/perf/builtin-timechart.c4
-rw-r--r--tools/perf/builtin-trace.c19
-rw-r--r--tools/perf/util/debug.c2
-rw-r--r--tools/perf/util/debug.h9
-rw-r--r--tools/perf/util/hist.c16
-rw-r--r--tools/perf/util/hist.h3
-rw-r--r--tools/perf/util/include/linux/list.h8
-rw-r--r--tools/perf/util/include/linux/types.h12
-rw-r--r--tools/perf/util/probe-event.c11
-rw-r--r--tools/perf/util/probe-finder.c9
-rw-r--r--tools/perf/util/pstack.h2
-rw-r--r--tools/perf/util/sort.c6
-rw-r--r--tools/perf/util/symbol.c40
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/ui/browser.c329
-rw-r--r--tools/perf/util/ui/browser.h46
-rw-r--r--tools/perf/util/ui/browsers/annotate.c240
-rw-r--r--tools/perf/util/ui/browsers/hists.c (renamed from tools/perf/util/newt.c)1454
-rw-r--r--tools/perf/util/ui/browsers/map.c161
-rw-r--r--tools/perf/util/ui/browsers/map.h6
-rw-r--r--tools/perf/util/ui/helpline.c69
-rw-r--r--tools/perf/util/ui/helpline.h11
-rw-r--r--tools/perf/util/ui/libslang.h27
-rw-r--r--tools/perf/util/ui/progress.c60
-rw-r--r--tools/perf/util/ui/progress.h11
-rw-r--r--tools/perf/util/ui/setup.c42
-rw-r--r--tools/perf/util/ui/util.c114
-rw-r--r--tools/perf/util/ui/util.h10
118 files changed, 3454 insertions, 2050 deletions
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 625c3f0e741a..8ca82839288a 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -37,6 +37,13 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
37#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x)) 37#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
38/* __pa_symbol should be used for C visible symbols. 38/* __pa_symbol should be used for C visible symbols.
39 This seems to be the official gcc blessed way to do such arithmetic. */ 39 This seems to be the official gcc blessed way to do such arithmetic. */
40/*
41 * We need __phys_reloc_hide() here because gcc may assume that there is no
42 * overflow during __pa() calculation and can optimize it unexpectedly.
43 * Newer versions of gcc provide -fno-strict-overflow switch to handle this
44 * case properly. Once all supported versions of gcc understand it, we can
45 * remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
46 */
40#define __pa_symbol(x) __pa(__phys_reloc_hide((unsigned long)(x))) 47#define __pa_symbol(x) __pa(__phys_reloc_hide((unsigned long)(x)))
41 48
42#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) 49#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index aa558ac0306e..42d412fd8b02 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -34,6 +34,7 @@
34 */ 34 */
35 35
36#define UV_ITEMS_PER_DESCRIPTOR 8 36#define UV_ITEMS_PER_DESCRIPTOR 8
37/* the 'throttle' to prevent the hardware stay-busy bug */
37#define MAX_BAU_CONCURRENT 3 38#define MAX_BAU_CONCURRENT 3
38#define UV_CPUS_PER_ACT_STATUS 32 39#define UV_CPUS_PER_ACT_STATUS 32
39#define UV_ACT_STATUS_MASK 0x3 40#define UV_ACT_STATUS_MASK 0x3
@@ -45,10 +46,26 @@
45#define UV_DESC_BASE_PNODE_SHIFT 49 46#define UV_DESC_BASE_PNODE_SHIFT 49
46#define UV_PAYLOADQ_PNODE_SHIFT 49 47#define UV_PAYLOADQ_PNODE_SHIFT 49
47#define UV_PTC_BASENAME "sgi_uv/ptc_statistics" 48#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
49#define UV_BAU_BASENAME "sgi_uv/bau_tunables"
50#define UV_BAU_TUNABLES_DIR "sgi_uv"
51#define UV_BAU_TUNABLES_FILE "bau_tunables"
52#define WHITESPACE " \t\n"
48#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask)) 53#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
49#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15 54#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
50#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16 55#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
51#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL 56#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x0000000009UL
57/* [19:16] SOFT_ACK timeout period 19: 1 is urgency 7 17:16 1 is multiplier */
58#define BAU_MISC_CONTROL_MULT_MASK 3
59
60#define UVH_AGING_PRESCALE_SEL 0x000000b000UL
61/* [30:28] URGENCY_7 an index into a table of times */
62#define BAU_URGENCY_7_SHIFT 28
63#define BAU_URGENCY_7_MASK 7
64
65#define UVH_TRANSACTION_TIMEOUT 0x000000b200UL
66/* [45:40] BAU - BAU transaction timeout select - a multiplier */
67#define BAU_TRANS_SHIFT 40
68#define BAU_TRANS_MASK 0x3f
52 69
53/* 70/*
54 * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1 71 * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
@@ -59,24 +76,21 @@
59#define DESC_STATUS_SOURCE_TIMEOUT 3 76#define DESC_STATUS_SOURCE_TIMEOUT 3
60 77
61/* 78/*
62 * source side threshholds at which message retries print a warning 79 * delay for 'plugged' timeout retries, in microseconds
63 */
64#define SOURCE_TIMEOUT_LIMIT 20
65#define DESTINATION_TIMEOUT_LIMIT 20
66
67/*
68 * misc. delays, in microseconds
69 */ 80 */
70#define THROTTLE_DELAY 10 81#define PLUGGED_DELAY 10
71#define TIMEOUT_DELAY 10
72#define BIOS_TO 1000
73/* BIOS is assumed to set the destination timeout to 1003520 nanoseconds */
74 82
75/* 83/*
76 * threshholds at which to use IPI to free resources 84 * threshholds at which to use IPI to free resources
77 */ 85 */
86/* after this # consecutive 'plugged' timeouts, use IPI to release resources */
78#define PLUGSB4RESET 100 87#define PLUGSB4RESET 100
79#define TIMEOUTSB4RESET 100 88/* after this many consecutive timeouts, use IPI to release resources */
89#define TIMEOUTSB4RESET 1
90/* at this number uses of IPI to release resources, giveup the request */
91#define IPI_RESET_LIMIT 1
92/* after this # consecutive successes, bump up the throttle if it was lowered */
93#define COMPLETE_THRESHOLD 5
80 94
81/* 95/*
82 * number of entries in the destination side payload queue 96 * number of entries in the destination side payload queue
@@ -96,6 +110,13 @@
96#define FLUSH_COMPLETE 4 110#define FLUSH_COMPLETE 4
97 111
98/* 112/*
113 * tuning the action when the numalink network is extremely delayed
114 */
115#define CONGESTED_RESPONSE_US 1000 /* 'long' response time, in microseconds */
116#define CONGESTED_REPS 10 /* long delays averaged over this many broadcasts */
117#define CONGESTED_PERIOD 30 /* time for the bau to be disabled, in seconds */
118
119/*
99 * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor) 120 * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
100 * If the 'multilevel' flag in the header portion of the descriptor 121 * If the 'multilevel' flag in the header portion of the descriptor
101 * has been set to 0, then endpoint multi-unicast mode is selected. 122 * has been set to 0, then endpoint multi-unicast mode is selected.
@@ -300,37 +321,16 @@ struct bau_payload_queue_entry {
300 /* bytes 24-31 */ 321 /* bytes 24-31 */
301}; 322};
302 323
303/* 324struct msg_desc {
304 * one per-cpu; to locate the software tables 325 struct bau_payload_queue_entry *msg;
305 */ 326 int msg_slot;
306struct bau_control { 327 int sw_ack_slot;
307 struct bau_desc *descriptor_base;
308 struct bau_payload_queue_entry *va_queue_first; 328 struct bau_payload_queue_entry *va_queue_first;
309 struct bau_payload_queue_entry *va_queue_last; 329 struct bau_payload_queue_entry *va_queue_last;
310 struct bau_payload_queue_entry *bau_msg_head; 330};
311 struct bau_control *uvhub_master; 331
312 struct bau_control *socket_master; 332struct reset_args {
313 unsigned long timeout_interval; 333 int sender;
314 atomic_t active_descriptor_count;
315 int max_concurrent;
316 int max_concurrent_constant;
317 int retry_message_scans;
318 int plugged_tries;
319 int timeout_tries;
320 int ipi_attempts;
321 int conseccompletes;
322 short cpu;
323 short uvhub_cpu;
324 short uvhub;
325 short cpus_in_socket;
326 short cpus_in_uvhub;
327 unsigned short message_number;
328 unsigned short uvhub_quiesce;
329 short socket_acknowledge_count[DEST_Q_SIZE];
330 cycles_t send_message;
331 spinlock_t masks_lock;
332 spinlock_t uvhub_lock;
333 spinlock_t queue_lock;
334}; 334};
335 335
336/* 336/*
@@ -344,18 +344,25 @@ struct ptc_stats {
344 unsigned long s_dtimeout; /* destination side timeouts */ 344 unsigned long s_dtimeout; /* destination side timeouts */
345 unsigned long s_time; /* time spent in sending side */ 345 unsigned long s_time; /* time spent in sending side */
346 unsigned long s_retriesok; /* successful retries */ 346 unsigned long s_retriesok; /* successful retries */
347 unsigned long s_ntargcpu; /* number of cpus targeted */ 347 unsigned long s_ntargcpu; /* total number of cpu's targeted */
348 unsigned long s_ntarguvhub; /* number of uvhubs targeted */ 348 unsigned long s_ntargself; /* times the sending cpu was targeted */
349 unsigned long s_ntarguvhub16; /* number of times >= 16 target hubs */ 349 unsigned long s_ntarglocals; /* targets of cpus on the local blade */
350 unsigned long s_ntarguvhub8; /* number of times >= 8 target hubs */ 350 unsigned long s_ntargremotes; /* targets of cpus on remote blades */
351 unsigned long s_ntarguvhub4; /* number of times >= 4 target hubs */ 351 unsigned long s_ntarglocaluvhub; /* targets of the local hub */
352 unsigned long s_ntarguvhub2; /* number of times >= 2 target hubs */ 352 unsigned long s_ntargremoteuvhub; /* remotes hubs targeted */
353 unsigned long s_ntarguvhub1; /* number of times == 1 target hub */ 353 unsigned long s_ntarguvhub; /* total number of uvhubs targeted */
354 unsigned long s_ntarguvhub16; /* number of times target hubs >= 16*/
355 unsigned long s_ntarguvhub8; /* number of times target hubs >= 8 */
356 unsigned long s_ntarguvhub4; /* number of times target hubs >= 4 */
357 unsigned long s_ntarguvhub2; /* number of times target hubs >= 2 */
358 unsigned long s_ntarguvhub1; /* number of times target hubs == 1 */
354 unsigned long s_resets_plug; /* ipi-style resets from plug state */ 359 unsigned long s_resets_plug; /* ipi-style resets from plug state */
355 unsigned long s_resets_timeout; /* ipi-style resets from timeouts */ 360 unsigned long s_resets_timeout; /* ipi-style resets from timeouts */
356 unsigned long s_busy; /* status stayed busy past s/w timer */ 361 unsigned long s_busy; /* status stayed busy past s/w timer */
357 unsigned long s_throttles; /* waits in throttle */ 362 unsigned long s_throttles; /* waits in throttle */
358 unsigned long s_retry_messages; /* retry broadcasts */ 363 unsigned long s_retry_messages; /* retry broadcasts */
364 unsigned long s_bau_reenabled; /* for bau enable/disable */
365 unsigned long s_bau_disabled; /* for bau enable/disable */
359 /* destination statistics */ 366 /* destination statistics */
360 unsigned long d_alltlb; /* times all tlb's on this cpu were flushed */ 367 unsigned long d_alltlb; /* times all tlb's on this cpu were flushed */
361 unsigned long d_onetlb; /* times just one tlb on this cpu was flushed */ 368 unsigned long d_onetlb; /* times just one tlb on this cpu was flushed */
@@ -370,6 +377,52 @@ struct ptc_stats {
370 unsigned long d_rcanceled; /* number of messages canceled by resets */ 377 unsigned long d_rcanceled; /* number of messages canceled by resets */
371}; 378};
372 379
380/*
381 * one per-cpu; to locate the software tables
382 */
383struct bau_control {
384 struct bau_desc *descriptor_base;
385 struct bau_payload_queue_entry *va_queue_first;
386 struct bau_payload_queue_entry *va_queue_last;
387 struct bau_payload_queue_entry *bau_msg_head;
388 struct bau_control *uvhub_master;
389 struct bau_control *socket_master;
390 struct ptc_stats *statp;
391 unsigned long timeout_interval;
392 unsigned long set_bau_on_time;
393 atomic_t active_descriptor_count;
394 int plugged_tries;
395 int timeout_tries;
396 int ipi_attempts;
397 int conseccompletes;
398 int baudisabled;
399 int set_bau_off;
400 short cpu;
401 short uvhub_cpu;
402 short uvhub;
403 short cpus_in_socket;
404 short cpus_in_uvhub;
405 unsigned short message_number;
406 unsigned short uvhub_quiesce;
407 short socket_acknowledge_count[DEST_Q_SIZE];
408 cycles_t send_message;
409 spinlock_t uvhub_lock;
410 spinlock_t queue_lock;
411 /* tunables */
412 int max_bau_concurrent;
413 int max_bau_concurrent_constant;
414 int plugged_delay;
415 int plugsb4reset;
416 int timeoutsb4reset;
417 int ipi_reset_limit;
418 int complete_threshold;
419 int congested_response_us;
420 int congested_reps;
421 int congested_period;
422 cycles_t period_time;
423 long period_requests;
424};
425
373static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp) 426static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp)
374{ 427{
375 return constant_test_bit(uvhub, &dstp->bits[0]); 428 return constant_test_bit(uvhub, &dstp->bits[0]);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 980508c79082..e3b534cda49a 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1606,7 +1606,7 @@ void __init init_apic_mappings(void)
1606 * acpi lapic path already maps that address in 1606 * acpi lapic path already maps that address in
1607 * acpi_register_lapic_address() 1607 * acpi_register_lapic_address()
1608 */ 1608 */
1609 if (!acpi_lapic) 1609 if (!acpi_lapic && !smp_found_config)
1610 set_fixmap_nocache(FIX_APIC_BASE, apic_phys); 1610 set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
1611 1611
1612 apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n", 1612 apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n",
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 107711bf0ee8..febb12cea795 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -656,6 +656,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
656 cpuc = &__get_cpu_var(cpu_hw_events); 656 cpuc = &__get_cpu_var(cpu_hw_events);
657 657
658 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 658 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
659 int overflow;
659 660
660 if (!test_bit(idx, cpuc->active_mask)) 661 if (!test_bit(idx, cpuc->active_mask))
661 continue; 662 continue;
@@ -666,12 +667,14 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
666 WARN_ON_ONCE(hwc->idx != idx); 667 WARN_ON_ONCE(hwc->idx != idx);
667 668
668 /* it might be unflagged overflow */ 669 /* it might be unflagged overflow */
669 handled = p4_pmu_clear_cccr_ovf(hwc); 670 overflow = p4_pmu_clear_cccr_ovf(hwc);
670 671
671 val = x86_perf_event_update(event); 672 val = x86_perf_event_update(event);
672 if (!handled && (val & (1ULL << (x86_pmu.cntval_bits - 1)))) 673 if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
673 continue; 674 continue;
674 675
676 handled += overflow;
677
675 /* event overflow for sure */ 678 /* event overflow for sure */
676 data.period = event->hw.last_period; 679 data.period = event->hw.last_period;
677 680
@@ -687,7 +690,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
687 inc_irq_stat(apic_perf_irqs); 690 inc_irq_stat(apic_perf_irqs);
688 } 691 }
689 692
690 return handled; 693 return handled > 0;
691} 694}
692 695
693/* 696/*
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index d86dbf7e54be..d7b6f7fb4fec 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -274,6 +274,18 @@ static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
274 274
275void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { } 275void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
276 276
277static void __init smp_register_lapic_address(unsigned long address)
278{
279 mp_lapic_addr = address;
280
281 set_fixmap_nocache(FIX_APIC_BASE, address);
282 if (boot_cpu_physical_apicid == -1U) {
283 boot_cpu_physical_apicid = read_apic_id();
284 apic_version[boot_cpu_physical_apicid] =
285 GET_APIC_VERSION(apic_read(APIC_LVR));
286 }
287}
288
277static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) 289static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
278{ 290{
279 char str[16]; 291 char str[16];
@@ -295,6 +307,10 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
295 if (early) 307 if (early)
296 return 1; 308 return 1;
297 309
310 /* Initialize the lapic mapping */
311 if (!acpi_lapic)
312 smp_register_lapic_address(mpc->lapic);
313
298 if (mpc->oemptr) 314 if (mpc->oemptr)
299 x86_init.mpparse.smp_read_mpc_oem(mpc); 315 x86_init.mpparse.smp_read_mpc_oem(mpc);
300 316
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 7fea555929e2..59efb5390b37 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -8,6 +8,7 @@
8 */ 8 */
9#include <linux/seq_file.h> 9#include <linux/seq_file.h>
10#include <linux/proc_fs.h> 10#include <linux/proc_fs.h>
11#include <linux/debugfs.h>
11#include <linux/kernel.h> 12#include <linux/kernel.h>
12#include <linux/slab.h> 13#include <linux/slab.h>
13 14
@@ -22,19 +23,37 @@
22#include <asm/irq_vectors.h> 23#include <asm/irq_vectors.h>
23#include <asm/timer.h> 24#include <asm/timer.h>
24 25
25struct msg_desc { 26/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
26 struct bau_payload_queue_entry *msg; 27static int timeout_base_ns[] = {
27 int msg_slot; 28 20,
28 int sw_ack_slot; 29 160,
29 struct bau_payload_queue_entry *va_queue_first; 30 1280,
30 struct bau_payload_queue_entry *va_queue_last; 31 10240,
32 81920,
33 655360,
34 5242880,
35 167772160
31}; 36};
32 37static int timeout_us;
33#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
34
35static int uv_bau_max_concurrent __read_mostly;
36
37static int nobau; 38static int nobau;
39static int baudisabled;
40static spinlock_t disable_lock;
41static cycles_t congested_cycles;
42
43/* tunables: */
44static int max_bau_concurrent = MAX_BAU_CONCURRENT;
45static int max_bau_concurrent_constant = MAX_BAU_CONCURRENT;
46static int plugged_delay = PLUGGED_DELAY;
47static int plugsb4reset = PLUGSB4RESET;
48static int timeoutsb4reset = TIMEOUTSB4RESET;
49static int ipi_reset_limit = IPI_RESET_LIMIT;
50static int complete_threshold = COMPLETE_THRESHOLD;
51static int congested_response_us = CONGESTED_RESPONSE_US;
52static int congested_reps = CONGESTED_REPS;
53static int congested_period = CONGESTED_PERIOD;
54static struct dentry *tunables_dir;
55static struct dentry *tunables_file;
56
38static int __init setup_nobau(char *arg) 57static int __init setup_nobau(char *arg)
39{ 58{
40 nobau = 1; 59 nobau = 1;
@@ -52,10 +71,6 @@ static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
52static DEFINE_PER_CPU(struct bau_control, bau_control); 71static DEFINE_PER_CPU(struct bau_control, bau_control);
53static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask); 72static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
54 73
55struct reset_args {
56 int sender;
57};
58
59/* 74/*
60 * Determine the first node on a uvhub. 'Nodes' are used for kernel 75 * Determine the first node on a uvhub. 'Nodes' are used for kernel
61 * memory allocation. 76 * memory allocation.
@@ -126,7 +141,7 @@ static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
126 struct ptc_stats *stat; 141 struct ptc_stats *stat;
127 142
128 msg = mdp->msg; 143 msg = mdp->msg;
129 stat = &per_cpu(ptcstats, bcp->cpu); 144 stat = bcp->statp;
130 stat->d_retries++; 145 stat->d_retries++;
131 /* 146 /*
132 * cancel any message from msg+1 to the retry itself 147 * cancel any message from msg+1 to the retry itself
@@ -146,15 +161,14 @@ static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
146 slot2 = msg2 - mdp->va_queue_first; 161 slot2 = msg2 - mdp->va_queue_first;
147 mmr = uv_read_local_mmr 162 mmr = uv_read_local_mmr
148 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); 163 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
149 msg_res = ((msg2->sw_ack_vector << 8) | 164 msg_res = msg2->sw_ack_vector;
150 msg2->sw_ack_vector);
151 /* 165 /*
152 * This is a message retry; clear the resources held 166 * This is a message retry; clear the resources held
153 * by the previous message only if they timed out. 167 * by the previous message only if they timed out.
154 * If it has not timed out we have an unexpected 168 * If it has not timed out we have an unexpected
155 * situation to report. 169 * situation to report.
156 */ 170 */
157 if (mmr & (msg_res << 8)) { 171 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
158 /* 172 /*
159 * is the resource timed out? 173 * is the resource timed out?
160 * make everyone ignore the cancelled message. 174 * make everyone ignore the cancelled message.
@@ -164,9 +178,9 @@ static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
164 cancel_count++; 178 cancel_count++;
165 uv_write_local_mmr( 179 uv_write_local_mmr(
166 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, 180 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
167 (msg_res << 8) | msg_res); 181 (msg_res << UV_SW_ACK_NPENDING) |
168 } else 182 msg_res);
169 printk(KERN_INFO "note bau retry: no effect\n"); 183 }
170 } 184 }
171 } 185 }
172 if (!cancel_count) 186 if (!cancel_count)
@@ -190,7 +204,7 @@ static void uv_bau_process_message(struct msg_desc *mdp,
190 * This must be a normal message, or retry of a normal message 204 * This must be a normal message, or retry of a normal message
191 */ 205 */
192 msg = mdp->msg; 206 msg = mdp->msg;
193 stat = &per_cpu(ptcstats, bcp->cpu); 207 stat = bcp->statp;
194 if (msg->address == TLB_FLUSH_ALL) { 208 if (msg->address == TLB_FLUSH_ALL) {
195 local_flush_tlb(); 209 local_flush_tlb();
196 stat->d_alltlb++; 210 stat->d_alltlb++;
@@ -274,7 +288,7 @@ uv_do_reset(void *ptr)
274 288
275 bcp = &per_cpu(bau_control, smp_processor_id()); 289 bcp = &per_cpu(bau_control, smp_processor_id());
276 rap = (struct reset_args *)ptr; 290 rap = (struct reset_args *)ptr;
277 stat = &per_cpu(ptcstats, bcp->cpu); 291 stat = bcp->statp;
278 stat->d_resets++; 292 stat->d_resets++;
279 293
280 /* 294 /*
@@ -302,13 +316,13 @@ uv_do_reset(void *ptr)
302 */ 316 */
303 mmr = uv_read_local_mmr 317 mmr = uv_read_local_mmr
304 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); 318 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
305 msg_res = ((msg->sw_ack_vector << 8) | 319 msg_res = msg->sw_ack_vector;
306 msg->sw_ack_vector);
307 if (mmr & msg_res) { 320 if (mmr & msg_res) {
308 stat->d_rcanceled++; 321 stat->d_rcanceled++;
309 uv_write_local_mmr( 322 uv_write_local_mmr(
310 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, 323 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
311 msg_res); 324 (msg_res << UV_SW_ACK_NPENDING) |
325 msg_res);
312 } 326 }
313 } 327 }
314 } 328 }
@@ -386,17 +400,12 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
386 unsigned long mmr_offset, int right_shift, int this_cpu, 400 unsigned long mmr_offset, int right_shift, int this_cpu,
387 struct bau_control *bcp, struct bau_control *smaster, long try) 401 struct bau_control *bcp, struct bau_control *smaster, long try)
388{ 402{
389 int relaxes = 0;
390 unsigned long descriptor_status; 403 unsigned long descriptor_status;
391 unsigned long mmr;
392 unsigned long mask;
393 cycles_t ttime; 404 cycles_t ttime;
394 cycles_t timeout_time; 405 struct ptc_stats *stat = bcp->statp;
395 struct ptc_stats *stat = &per_cpu(ptcstats, this_cpu);
396 struct bau_control *hmaster; 406 struct bau_control *hmaster;
397 407
398 hmaster = bcp->uvhub_master; 408 hmaster = bcp->uvhub_master;
399 timeout_time = get_cycles() + bcp->timeout_interval;
400 409
401 /* spin on the status MMR, waiting for it to go idle */ 410 /* spin on the status MMR, waiting for it to go idle */
402 while ((descriptor_status = (((unsigned long) 411 while ((descriptor_status = (((unsigned long)
@@ -423,7 +432,8 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
423 * pending. In that case hardware returns the 432 * pending. In that case hardware returns the
424 * ERROR that looks like a destination timeout. 433 * ERROR that looks like a destination timeout.
425 */ 434 */
426 if (cycles_2_us(ttime - bcp->send_message) < BIOS_TO) { 435 if (cycles_2_us(ttime - bcp->send_message) <
436 timeout_us) {
427 bcp->conseccompletes = 0; 437 bcp->conseccompletes = 0;
428 return FLUSH_RETRY_PLUGGED; 438 return FLUSH_RETRY_PLUGGED;
429 } 439 }
@@ -435,26 +445,6 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
435 * descriptor_status is still BUSY 445 * descriptor_status is still BUSY
436 */ 446 */
437 cpu_relax(); 447 cpu_relax();
438 relaxes++;
439 if (relaxes >= 10000) {
440 relaxes = 0;
441 if (get_cycles() > timeout_time) {
442 quiesce_local_uvhub(hmaster);
443
444 /* single-thread the register change */
445 spin_lock(&hmaster->masks_lock);
446 mmr = uv_read_local_mmr(mmr_offset);
447 mask = 0UL;
448 mask |= (3UL < right_shift);
449 mask = ~mask;
450 mmr &= mask;
451 uv_write_local_mmr(mmr_offset, mmr);
452 spin_unlock(&hmaster->masks_lock);
453 end_uvhub_quiesce(hmaster);
454 stat->s_busy++;
455 return FLUSH_GIVEUP;
456 }
457 }
458 } 448 }
459 } 449 }
460 bcp->conseccompletes++; 450 bcp->conseccompletes++;
@@ -494,56 +484,116 @@ static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
494 return 1; 484 return 1;
495} 485}
496 486
487/*
488 * Our retries are blocked by all destination swack resources being
489 * in use, and a timeout is pending. In that case hardware immediately
490 * returns the ERROR that looks like a destination timeout.
491 */
492static void
493destination_plugged(struct bau_desc *bau_desc, struct bau_control *bcp,
494 struct bau_control *hmaster, struct ptc_stats *stat)
495{
496 udelay(bcp->plugged_delay);
497 bcp->plugged_tries++;
498 if (bcp->plugged_tries >= bcp->plugsb4reset) {
499 bcp->plugged_tries = 0;
500 quiesce_local_uvhub(hmaster);
501 spin_lock(&hmaster->queue_lock);
502 uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
503 spin_unlock(&hmaster->queue_lock);
504 end_uvhub_quiesce(hmaster);
505 bcp->ipi_attempts++;
506 stat->s_resets_plug++;
507 }
508}
509
510static void
511destination_timeout(struct bau_desc *bau_desc, struct bau_control *bcp,
512 struct bau_control *hmaster, struct ptc_stats *stat)
513{
514 hmaster->max_bau_concurrent = 1;
515 bcp->timeout_tries++;
516 if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
517 bcp->timeout_tries = 0;
518 quiesce_local_uvhub(hmaster);
519 spin_lock(&hmaster->queue_lock);
520 uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
521 spin_unlock(&hmaster->queue_lock);
522 end_uvhub_quiesce(hmaster);
523 bcp->ipi_attempts++;
524 stat->s_resets_timeout++;
525 }
526}
527
528/*
529 * Completions are taking a very long time due to a congested numalink
530 * network.
531 */
532static void
533disable_for_congestion(struct bau_control *bcp, struct ptc_stats *stat)
534{
535 int tcpu;
536 struct bau_control *tbcp;
537
538 /* let only one cpu do this disabling */
539 spin_lock(&disable_lock);
540 if (!baudisabled && bcp->period_requests &&
541 ((bcp->period_time / bcp->period_requests) > congested_cycles)) {
542 /* it becomes this cpu's job to turn on the use of the
543 BAU again */
544 baudisabled = 1;
545 bcp->set_bau_off = 1;
546 bcp->set_bau_on_time = get_cycles() +
547 sec_2_cycles(bcp->congested_period);
548 stat->s_bau_disabled++;
549 for_each_present_cpu(tcpu) {
550 tbcp = &per_cpu(bau_control, tcpu);
551 tbcp->baudisabled = 1;
552 }
553 }
554 spin_unlock(&disable_lock);
555}
556
497/** 557/**
498 * uv_flush_send_and_wait 558 * uv_flush_send_and_wait
499 * 559 *
500 * Send a broadcast and wait for it to complete. 560 * Send a broadcast and wait for it to complete.
501 * 561 *
502 * The flush_mask contains the cpus the broadcast is to be sent to, plus 562 * The flush_mask contains the cpus the broadcast is to be sent to including
503 * cpus that are on the local uvhub. 563 * cpus that are on the local uvhub.
504 * 564 *
505 * Returns NULL if all flushing represented in the mask was done. The mask 565 * Returns 0 if all flushing represented in the mask was done.
506 * is zeroed. 566 * Returns 1 if it gives up entirely and the original cpu mask is to be
507 * Returns @flush_mask if some remote flushing remains to be done. The 567 * returned to the kernel.
508 * mask will have some bits still set, representing any cpus on the local
509 * uvhub (not current cpu) and any on remote uvhubs if the broadcast failed.
510 */ 568 */
511const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc, 569int uv_flush_send_and_wait(struct bau_desc *bau_desc,
512 struct cpumask *flush_mask, 570 struct cpumask *flush_mask, struct bau_control *bcp)
513 struct bau_control *bcp)
514{ 571{
515 int right_shift; 572 int right_shift;
516 int uvhub;
517 int bit;
518 int completion_status = 0; 573 int completion_status = 0;
519 int seq_number = 0; 574 int seq_number = 0;
520 long try = 0; 575 long try = 0;
521 int cpu = bcp->uvhub_cpu; 576 int cpu = bcp->uvhub_cpu;
522 int this_cpu = bcp->cpu; 577 int this_cpu = bcp->cpu;
523 int this_uvhub = bcp->uvhub;
524 unsigned long mmr_offset; 578 unsigned long mmr_offset;
525 unsigned long index; 579 unsigned long index;
526 cycles_t time1; 580 cycles_t time1;
527 cycles_t time2; 581 cycles_t time2;
528 struct ptc_stats *stat = &per_cpu(ptcstats, bcp->cpu); 582 cycles_t elapsed;
583 struct ptc_stats *stat = bcp->statp;
529 struct bau_control *smaster = bcp->socket_master; 584 struct bau_control *smaster = bcp->socket_master;
530 struct bau_control *hmaster = bcp->uvhub_master; 585 struct bau_control *hmaster = bcp->uvhub_master;
531 586
532 /*
533 * Spin here while there are hmaster->max_concurrent or more active
534 * descriptors. This is the per-uvhub 'throttle'.
535 */
536 if (!atomic_inc_unless_ge(&hmaster->uvhub_lock, 587 if (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
537 &hmaster->active_descriptor_count, 588 &hmaster->active_descriptor_count,
538 hmaster->max_concurrent)) { 589 hmaster->max_bau_concurrent)) {
539 stat->s_throttles++; 590 stat->s_throttles++;
540 do { 591 do {
541 cpu_relax(); 592 cpu_relax();
542 } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock, 593 } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
543 &hmaster->active_descriptor_count, 594 &hmaster->active_descriptor_count,
544 hmaster->max_concurrent)); 595 hmaster->max_bau_concurrent));
545 } 596 }
546
547 while (hmaster->uvhub_quiesce) 597 while (hmaster->uvhub_quiesce)
548 cpu_relax(); 598 cpu_relax();
549 599
@@ -557,23 +607,10 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
557 } 607 }
558 time1 = get_cycles(); 608 time1 = get_cycles();
559 do { 609 do {
560 /*
561 * Every message from any given cpu gets a unique message
562 * sequence number. But retries use that same number.
563 * Our message may have timed out at the destination because
564 * all sw-ack resources are in use and there is a timeout
565 * pending there. In that case, our last send never got
566 * placed into the queue and we need to persist until it
567 * does.
568 *
569 * Make any retry a type MSG_RETRY so that the destination will
570 * free any resource held by a previous message from this cpu.
571 */
572 if (try == 0) { 610 if (try == 0) {
573 /* use message type set by the caller the first time */ 611 bau_desc->header.msg_type = MSG_REGULAR;
574 seq_number = bcp->message_number++; 612 seq_number = bcp->message_number++;
575 } else { 613 } else {
576 /* use RETRY type on all the rest; same sequence */
577 bau_desc->header.msg_type = MSG_RETRY; 614 bau_desc->header.msg_type = MSG_RETRY;
578 stat->s_retry_messages++; 615 stat->s_retry_messages++;
579 } 616 }
@@ -581,50 +618,17 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
581 index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) | 618 index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
582 bcp->uvhub_cpu; 619 bcp->uvhub_cpu;
583 bcp->send_message = get_cycles(); 620 bcp->send_message = get_cycles();
584
585 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index); 621 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
586
587 try++; 622 try++;
588 completion_status = uv_wait_completion(bau_desc, mmr_offset, 623 completion_status = uv_wait_completion(bau_desc, mmr_offset,
589 right_shift, this_cpu, bcp, smaster, try); 624 right_shift, this_cpu, bcp, smaster, try);
590 625
591 if (completion_status == FLUSH_RETRY_PLUGGED) { 626 if (completion_status == FLUSH_RETRY_PLUGGED) {
592 /* 627 destination_plugged(bau_desc, bcp, hmaster, stat);
593 * Our retries may be blocked by all destination swack
594 * resources being consumed, and a timeout pending. In
595 * that case hardware immediately returns the ERROR
596 * that looks like a destination timeout.
597 */
598 udelay(TIMEOUT_DELAY);
599 bcp->plugged_tries++;
600 if (bcp->plugged_tries >= PLUGSB4RESET) {
601 bcp->plugged_tries = 0;
602 quiesce_local_uvhub(hmaster);
603 spin_lock(&hmaster->queue_lock);
604 uv_reset_with_ipi(&bau_desc->distribution,
605 this_cpu);
606 spin_unlock(&hmaster->queue_lock);
607 end_uvhub_quiesce(hmaster);
608 bcp->ipi_attempts++;
609 stat->s_resets_plug++;
610 }
611 } else if (completion_status == FLUSH_RETRY_TIMEOUT) { 628 } else if (completion_status == FLUSH_RETRY_TIMEOUT) {
612 hmaster->max_concurrent = 1; 629 destination_timeout(bau_desc, bcp, hmaster, stat);
613 bcp->timeout_tries++;
614 udelay(TIMEOUT_DELAY);
615 if (bcp->timeout_tries >= TIMEOUTSB4RESET) {
616 bcp->timeout_tries = 0;
617 quiesce_local_uvhub(hmaster);
618 spin_lock(&hmaster->queue_lock);
619 uv_reset_with_ipi(&bau_desc->distribution,
620 this_cpu);
621 spin_unlock(&hmaster->queue_lock);
622 end_uvhub_quiesce(hmaster);
623 bcp->ipi_attempts++;
624 stat->s_resets_timeout++;
625 }
626 } 630 }
627 if (bcp->ipi_attempts >= 3) { 631 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
628 bcp->ipi_attempts = 0; 632 bcp->ipi_attempts = 0;
629 completion_status = FLUSH_GIVEUP; 633 completion_status = FLUSH_GIVEUP;
630 break; 634 break;
@@ -633,49 +637,36 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
633 } while ((completion_status == FLUSH_RETRY_PLUGGED) || 637 } while ((completion_status == FLUSH_RETRY_PLUGGED) ||
634 (completion_status == FLUSH_RETRY_TIMEOUT)); 638 (completion_status == FLUSH_RETRY_TIMEOUT));
635 time2 = get_cycles(); 639 time2 = get_cycles();
636 640 bcp->plugged_tries = 0;
637 if ((completion_status == FLUSH_COMPLETE) && (bcp->conseccompletes > 5) 641 bcp->timeout_tries = 0;
638 && (hmaster->max_concurrent < hmaster->max_concurrent_constant)) 642 if ((completion_status == FLUSH_COMPLETE) &&
639 hmaster->max_concurrent++; 643 (bcp->conseccompletes > bcp->complete_threshold) &&
640 644 (hmaster->max_bau_concurrent <
641 /* 645 hmaster->max_bau_concurrent_constant))
642 * hold any cpu not timing out here; no other cpu currently held by 646 hmaster->max_bau_concurrent++;
643 * the 'throttle' should enter the activation code
644 */
645 while (hmaster->uvhub_quiesce) 647 while (hmaster->uvhub_quiesce)
646 cpu_relax(); 648 cpu_relax();
647 atomic_dec(&hmaster->active_descriptor_count); 649 atomic_dec(&hmaster->active_descriptor_count);
648 650 if (time2 > time1) {
649 /* guard against cycles wrap */ 651 elapsed = time2 - time1;
650 if (time2 > time1) 652 stat->s_time += elapsed;
651 stat->s_time += (time2 - time1); 653 if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
652 else 654 bcp->period_requests++;
653 stat->s_requestor--; /* don't count this one */ 655 bcp->period_time += elapsed;
656 if ((elapsed > congested_cycles) &&
657 (bcp->period_requests > bcp->congested_reps)) {
658 disable_for_congestion(bcp, stat);
659 }
660 }
661 } else
662 stat->s_requestor--;
654 if (completion_status == FLUSH_COMPLETE && try > 1) 663 if (completion_status == FLUSH_COMPLETE && try > 1)
655 stat->s_retriesok++; 664 stat->s_retriesok++;
656 else if (completion_status == FLUSH_GIVEUP) { 665 else if (completion_status == FLUSH_GIVEUP) {
657 /*
658 * Cause the caller to do an IPI-style TLB shootdown on
659 * the target cpu's, all of which are still in the mask.
660 */
661 stat->s_giveup++; 666 stat->s_giveup++;
662 return flush_mask; 667 return 1;
663 }
664
665 /*
666 * Success, so clear the remote cpu's from the mask so we don't
667 * use the IPI method of shootdown on them.
668 */
669 for_each_cpu(bit, flush_mask) {
670 uvhub = uv_cpu_to_blade_id(bit);
671 if (uvhub == this_uvhub)
672 continue;
673 cpumask_clear_cpu(bit, flush_mask);
674 } 668 }
675 if (!cpumask_empty(flush_mask)) 669 return 0;
676 return flush_mask;
677
678 return NULL;
679} 670}
680 671
681/** 672/**
@@ -707,70 +698,89 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
707 struct mm_struct *mm, 698 struct mm_struct *mm,
708 unsigned long va, unsigned int cpu) 699 unsigned long va, unsigned int cpu)
709{ 700{
710 int remotes;
711 int tcpu; 701 int tcpu;
712 int uvhub; 702 int uvhub;
713 int locals = 0; 703 int locals = 0;
704 int remotes = 0;
705 int hubs = 0;
714 struct bau_desc *bau_desc; 706 struct bau_desc *bau_desc;
715 struct cpumask *flush_mask; 707 struct cpumask *flush_mask;
716 struct ptc_stats *stat; 708 struct ptc_stats *stat;
717 struct bau_control *bcp; 709 struct bau_control *bcp;
710 struct bau_control *tbcp;
718 711
712 /* kernel was booted 'nobau' */
719 if (nobau) 713 if (nobau)
720 return cpumask; 714 return cpumask;
721 715
722 bcp = &per_cpu(bau_control, cpu); 716 bcp = &per_cpu(bau_control, cpu);
717 stat = bcp->statp;
718
719 /* bau was disabled due to slow response */
720 if (bcp->baudisabled) {
721 /* the cpu that disabled it must re-enable it */
722 if (bcp->set_bau_off) {
723 if (get_cycles() >= bcp->set_bau_on_time) {
724 stat->s_bau_reenabled++;
725 baudisabled = 0;
726 for_each_present_cpu(tcpu) {
727 tbcp = &per_cpu(bau_control, tcpu);
728 tbcp->baudisabled = 0;
729 tbcp->period_requests = 0;
730 tbcp->period_time = 0;
731 }
732 }
733 }
734 return cpumask;
735 }
736
723 /* 737 /*
724 * Each sending cpu has a per-cpu mask which it fills from the caller's 738 * Each sending cpu has a per-cpu mask which it fills from the caller's
725 * cpu mask. Only remote cpus are converted to uvhubs and copied. 739 * cpu mask. All cpus are converted to uvhubs and copied to the
740 * activation descriptor.
726 */ 741 */
727 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu); 742 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
728 /* 743 /* don't actually do a shootdown of the local cpu */
729 * copy cpumask to flush_mask, removing current cpu
730 * (current cpu should already have been flushed by the caller and
731 * should never be returned if we return flush_mask)
732 */
733 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); 744 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
734 if (cpu_isset(cpu, *cpumask)) 745 if (cpu_isset(cpu, *cpumask))
735 locals++; /* current cpu was targeted */ 746 stat->s_ntargself++;
736 747
737 bau_desc = bcp->descriptor_base; 748 bau_desc = bcp->descriptor_base;
738 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; 749 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
739
740 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); 750 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
741 remotes = 0; 751
752 /* cpu statistics */
742 for_each_cpu(tcpu, flush_mask) { 753 for_each_cpu(tcpu, flush_mask) {
743 uvhub = uv_cpu_to_blade_id(tcpu); 754 uvhub = uv_cpu_to_blade_id(tcpu);
744 if (uvhub == bcp->uvhub) {
745 locals++;
746 continue;
747 }
748 bau_uvhub_set(uvhub, &bau_desc->distribution); 755 bau_uvhub_set(uvhub, &bau_desc->distribution);
749 remotes++; 756 if (uvhub == bcp->uvhub)
750 } 757 locals++;
751 if (remotes == 0) {
752 /*
753 * No off_hub flushing; return status for local hub.
754 * Return the caller's mask if all were local (the current
755 * cpu may be in that mask).
756 */
757 if (locals)
758 return cpumask;
759 else 758 else
760 return NULL; 759 remotes++;
761 } 760 }
762 stat = &per_cpu(ptcstats, cpu); 761 if ((locals + remotes) == 0)
762 return NULL;
763 stat->s_requestor++; 763 stat->s_requestor++;
764 stat->s_ntargcpu += remotes; 764 stat->s_ntargcpu += remotes + locals;
765 stat->s_ntargremotes += remotes;
766 stat->s_ntarglocals += locals;
765 remotes = bau_uvhub_weight(&bau_desc->distribution); 767 remotes = bau_uvhub_weight(&bau_desc->distribution);
766 stat->s_ntarguvhub += remotes; 768
767 if (remotes >= 16) 769 /* uvhub statistics */
770 hubs = bau_uvhub_weight(&bau_desc->distribution);
771 if (locals) {
772 stat->s_ntarglocaluvhub++;
773 stat->s_ntargremoteuvhub += (hubs - 1);
774 } else
775 stat->s_ntargremoteuvhub += hubs;
776 stat->s_ntarguvhub += hubs;
777 if (hubs >= 16)
768 stat->s_ntarguvhub16++; 778 stat->s_ntarguvhub16++;
769 else if (remotes >= 8) 779 else if (hubs >= 8)
770 stat->s_ntarguvhub8++; 780 stat->s_ntarguvhub8++;
771 else if (remotes >= 4) 781 else if (hubs >= 4)
772 stat->s_ntarguvhub4++; 782 stat->s_ntarguvhub4++;
773 else if (remotes >= 2) 783 else if (hubs >= 2)
774 stat->s_ntarguvhub2++; 784 stat->s_ntarguvhub2++;
775 else 785 else
776 stat->s_ntarguvhub1++; 786 stat->s_ntarguvhub1++;
@@ -779,10 +789,13 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
779 bau_desc->payload.sending_cpu = cpu; 789 bau_desc->payload.sending_cpu = cpu;
780 790
781 /* 791 /*
782 * uv_flush_send_and_wait returns null if all cpu's were messaged, or 792 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
783 * the adjusted flush_mask if any cpu's were not messaged. 793 * or 1 if it gave up and the original cpumask should be returned.
784 */ 794 */
785 return uv_flush_send_and_wait(bau_desc, flush_mask, bcp); 795 if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
796 return NULL;
797 else
798 return cpumask;
786} 799}
787 800
788/* 801/*
@@ -810,7 +823,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
810 823
811 time_start = get_cycles(); 824 time_start = get_cycles();
812 bcp = &per_cpu(bau_control, smp_processor_id()); 825 bcp = &per_cpu(bau_control, smp_processor_id());
813 stat = &per_cpu(ptcstats, smp_processor_id()); 826 stat = bcp->statp;
814 msgdesc.va_queue_first = bcp->va_queue_first; 827 msgdesc.va_queue_first = bcp->va_queue_first;
815 msgdesc.va_queue_last = bcp->va_queue_last; 828 msgdesc.va_queue_last = bcp->va_queue_last;
816 msg = bcp->bau_msg_head; 829 msg = bcp->bau_msg_head;
@@ -908,12 +921,12 @@ static void uv_ptc_seq_stop(struct seq_file *file, void *data)
908} 921}
909 922
910static inline unsigned long long 923static inline unsigned long long
911millisec_2_cycles(unsigned long millisec) 924microsec_2_cycles(unsigned long microsec)
912{ 925{
913 unsigned long ns; 926 unsigned long ns;
914 unsigned long long cyc; 927 unsigned long long cyc;
915 928
916 ns = millisec * 1000; 929 ns = microsec * 1000;
917 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); 930 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
918 return cyc; 931 return cyc;
919} 932}
@@ -931,15 +944,19 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
931 944
932 if (!cpu) { 945 if (!cpu) {
933 seq_printf(file, 946 seq_printf(file,
934 "# cpu sent stime numuvhubs numuvhubs16 numuvhubs8 "); 947 "# cpu sent stime self locals remotes ncpus localhub ");
935 seq_printf(file, 948 seq_printf(file,
936 "numuvhubs4 numuvhubs2 numuvhubs1 numcpus dto "); 949 "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
950 seq_printf(file,
951 "numuvhubs4 numuvhubs2 numuvhubs1 dto ");
937 seq_printf(file, 952 seq_printf(file,
938 "retries rok resetp resett giveup sto bz throt "); 953 "retries rok resetp resett giveup sto bz throt ");
939 seq_printf(file, 954 seq_printf(file,
940 "sw_ack recv rtime all "); 955 "sw_ack recv rtime all ");
941 seq_printf(file, 956 seq_printf(file,
942 "one mult none retry canc nocan reset rcan\n"); 957 "one mult none retry canc nocan reset rcan ");
958 seq_printf(file,
959 "disable enable\n");
943 } 960 }
944 if (cpu < num_possible_cpus() && cpu_online(cpu)) { 961 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
945 stat = &per_cpu(ptcstats, cpu); 962 stat = &per_cpu(ptcstats, cpu);
@@ -947,18 +964,23 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
947 seq_printf(file, 964 seq_printf(file,
948 "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ", 965 "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
949 cpu, stat->s_requestor, cycles_2_us(stat->s_time), 966 cpu, stat->s_requestor, cycles_2_us(stat->s_time),
950 stat->s_ntarguvhub, stat->s_ntarguvhub16, 967 stat->s_ntargself, stat->s_ntarglocals,
968 stat->s_ntargremotes, stat->s_ntargcpu,
969 stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
970 stat->s_ntarguvhub, stat->s_ntarguvhub16);
971 seq_printf(file, "%ld %ld %ld %ld %ld ",
951 stat->s_ntarguvhub8, stat->s_ntarguvhub4, 972 stat->s_ntarguvhub8, stat->s_ntarguvhub4,
952 stat->s_ntarguvhub2, stat->s_ntarguvhub1, 973 stat->s_ntarguvhub2, stat->s_ntarguvhub1,
953 stat->s_ntargcpu, stat->s_dtimeout); 974 stat->s_dtimeout);
954 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ", 975 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
955 stat->s_retry_messages, stat->s_retriesok, 976 stat->s_retry_messages, stat->s_retriesok,
956 stat->s_resets_plug, stat->s_resets_timeout, 977 stat->s_resets_plug, stat->s_resets_timeout,
957 stat->s_giveup, stat->s_stimeout, 978 stat->s_giveup, stat->s_stimeout,
958 stat->s_busy, stat->s_throttles); 979 stat->s_busy, stat->s_throttles);
980
959 /* destination side statistics */ 981 /* destination side statistics */
960 seq_printf(file, 982 seq_printf(file,
961 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", 983 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
962 uv_read_global_mmr64(uv_cpu_to_pnode(cpu), 984 uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
963 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), 985 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
964 stat->d_requestee, cycles_2_us(stat->d_time), 986 stat->d_requestee, cycles_2_us(stat->d_time),
@@ -966,15 +988,36 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
966 stat->d_nomsg, stat->d_retries, stat->d_canceled, 988 stat->d_nomsg, stat->d_retries, stat->d_canceled,
967 stat->d_nocanceled, stat->d_resets, 989 stat->d_nocanceled, stat->d_resets,
968 stat->d_rcanceled); 990 stat->d_rcanceled);
991 seq_printf(file, "%ld %ld\n",
992 stat->s_bau_disabled, stat->s_bau_reenabled);
969 } 993 }
970 994
971 return 0; 995 return 0;
972} 996}
973 997
974/* 998/*
999 * Display the tunables thru debugfs
1000 */
1001static ssize_t tunables_read(struct file *file, char __user *userbuf,
1002 size_t count, loff_t *ppos)
1003{
1004 char buf[300];
1005 int ret;
1006
1007 ret = snprintf(buf, 300, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
1008 "max_bau_concurrent plugged_delay plugsb4reset",
1009 "timeoutsb4reset ipi_reset_limit complete_threshold",
1010 "congested_response_us congested_reps congested_period",
1011 max_bau_concurrent, plugged_delay, plugsb4reset,
1012 timeoutsb4reset, ipi_reset_limit, complete_threshold,
1013 congested_response_us, congested_reps, congested_period);
1014
1015 return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
1016}
1017
1018/*
975 * -1: resetf the statistics 1019 * -1: resetf the statistics
976 * 0: display meaning of the statistics 1020 * 0: display meaning of the statistics
977 * >0: maximum concurrent active descriptors per uvhub (throttle)
978 */ 1021 */
979static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user, 1022static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
980 size_t count, loff_t *data) 1023 size_t count, loff_t *data)
@@ -983,7 +1026,6 @@ static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
983 long input_arg; 1026 long input_arg;
984 char optstr[64]; 1027 char optstr[64];
985 struct ptc_stats *stat; 1028 struct ptc_stats *stat;
986 struct bau_control *bcp;
987 1029
988 if (count == 0 || count > sizeof(optstr)) 1030 if (count == 0 || count > sizeof(optstr))
989 return -EINVAL; 1031 return -EINVAL;
@@ -1059,29 +1101,158 @@ static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
1059 "reset: number of ipi-style reset requests processed\n"); 1101 "reset: number of ipi-style reset requests processed\n");
1060 printk(KERN_DEBUG 1102 printk(KERN_DEBUG
1061 "rcan: number messages canceled by reset requests\n"); 1103 "rcan: number messages canceled by reset requests\n");
1104 printk(KERN_DEBUG
1105 "disable: number times use of the BAU was disabled\n");
1106 printk(KERN_DEBUG
1107 "enable: number times use of the BAU was re-enabled\n");
1062 } else if (input_arg == -1) { 1108 } else if (input_arg == -1) {
1063 for_each_present_cpu(cpu) { 1109 for_each_present_cpu(cpu) {
1064 stat = &per_cpu(ptcstats, cpu); 1110 stat = &per_cpu(ptcstats, cpu);
1065 memset(stat, 0, sizeof(struct ptc_stats)); 1111 memset(stat, 0, sizeof(struct ptc_stats));
1066 } 1112 }
1067 } else { 1113 }
1068 uv_bau_max_concurrent = input_arg; 1114
1069 bcp = &per_cpu(bau_control, smp_processor_id()); 1115 return count;
1070 if (uv_bau_max_concurrent < 1 || 1116}
1071 uv_bau_max_concurrent > bcp->cpus_in_uvhub) { 1117
1072 printk(KERN_DEBUG 1118static int local_atoi(const char *name)
1073 "Error: BAU max concurrent %d; %d is invalid\n", 1119{
1074 bcp->max_concurrent, uv_bau_max_concurrent); 1120 int val = 0;
1075 return -EINVAL; 1121
1076 } 1122 for (;; name++) {
1077 printk(KERN_DEBUG "Set BAU max concurrent:%d\n", 1123 switch (*name) {
1078 uv_bau_max_concurrent); 1124 case '0' ... '9':
1079 for_each_present_cpu(cpu) { 1125 val = 10*val+(*name-'0');
1080 bcp = &per_cpu(bau_control, cpu); 1126 break;
1081 bcp->max_concurrent = uv_bau_max_concurrent; 1127 default:
1128 return val;
1082 } 1129 }
1083 } 1130 }
1131}
1132
1133/*
1134 * set the tunables
1135 * 0 values reset them to defaults
1136 */
1137static ssize_t tunables_write(struct file *file, const char __user *user,
1138 size_t count, loff_t *data)
1139{
1140 int cpu;
1141 int cnt = 0;
1142 int val;
1143 char *p;
1144 char *q;
1145 char instr[64];
1146 struct bau_control *bcp;
1147
1148 if (count == 0 || count > sizeof(instr)-1)
1149 return -EINVAL;
1150 if (copy_from_user(instr, user, count))
1151 return -EFAULT;
1152
1153 instr[count] = '\0';
1154 /* count the fields */
1155 p = instr + strspn(instr, WHITESPACE);
1156 q = p;
1157 for (; *p; p = q + strspn(q, WHITESPACE)) {
1158 q = p + strcspn(p, WHITESPACE);
1159 cnt++;
1160 if (q == p)
1161 break;
1162 }
1163 if (cnt != 9) {
1164 printk(KERN_INFO "bau tunable error: should be 9 numbers\n");
1165 return -EINVAL;
1166 }
1084 1167
1168 p = instr + strspn(instr, WHITESPACE);
1169 q = p;
1170 for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
1171 q = p + strcspn(p, WHITESPACE);
1172 val = local_atoi(p);
1173 switch (cnt) {
1174 case 0:
1175 if (val == 0) {
1176 max_bau_concurrent = MAX_BAU_CONCURRENT;
1177 max_bau_concurrent_constant =
1178 MAX_BAU_CONCURRENT;
1179 continue;
1180 }
1181 bcp = &per_cpu(bau_control, smp_processor_id());
1182 if (val < 1 || val > bcp->cpus_in_uvhub) {
1183 printk(KERN_DEBUG
1184 "Error: BAU max concurrent %d is invalid\n",
1185 val);
1186 return -EINVAL;
1187 }
1188 max_bau_concurrent = val;
1189 max_bau_concurrent_constant = val;
1190 continue;
1191 case 1:
1192 if (val == 0)
1193 plugged_delay = PLUGGED_DELAY;
1194 else
1195 plugged_delay = val;
1196 continue;
1197 case 2:
1198 if (val == 0)
1199 plugsb4reset = PLUGSB4RESET;
1200 else
1201 plugsb4reset = val;
1202 continue;
1203 case 3:
1204 if (val == 0)
1205 timeoutsb4reset = TIMEOUTSB4RESET;
1206 else
1207 timeoutsb4reset = val;
1208 continue;
1209 case 4:
1210 if (val == 0)
1211 ipi_reset_limit = IPI_RESET_LIMIT;
1212 else
1213 ipi_reset_limit = val;
1214 continue;
1215 case 5:
1216 if (val == 0)
1217 complete_threshold = COMPLETE_THRESHOLD;
1218 else
1219 complete_threshold = val;
1220 continue;
1221 case 6:
1222 if (val == 0)
1223 congested_response_us = CONGESTED_RESPONSE_US;
1224 else
1225 congested_response_us = val;
1226 continue;
1227 case 7:
1228 if (val == 0)
1229 congested_reps = CONGESTED_REPS;
1230 else
1231 congested_reps = val;
1232 continue;
1233 case 8:
1234 if (val == 0)
1235 congested_period = CONGESTED_PERIOD;
1236 else
1237 congested_period = val;
1238 continue;
1239 }
1240 if (q == p)
1241 break;
1242 }
1243 for_each_present_cpu(cpu) {
1244 bcp = &per_cpu(bau_control, cpu);
1245 bcp->max_bau_concurrent = max_bau_concurrent;
1246 bcp->max_bau_concurrent_constant = max_bau_concurrent;
1247 bcp->plugged_delay = plugged_delay;
1248 bcp->plugsb4reset = plugsb4reset;
1249 bcp->timeoutsb4reset = timeoutsb4reset;
1250 bcp->ipi_reset_limit = ipi_reset_limit;
1251 bcp->complete_threshold = complete_threshold;
1252 bcp->congested_response_us = congested_response_us;
1253 bcp->congested_reps = congested_reps;
1254 bcp->congested_period = congested_period;
1255 }
1085 return count; 1256 return count;
1086} 1257}
1087 1258
@@ -1097,6 +1268,11 @@ static int uv_ptc_proc_open(struct inode *inode, struct file *file)
1097 return seq_open(file, &uv_ptc_seq_ops); 1268 return seq_open(file, &uv_ptc_seq_ops);
1098} 1269}
1099 1270
1271static int tunables_open(struct inode *inode, struct file *file)
1272{
1273 return 0;
1274}
1275
1100static const struct file_operations proc_uv_ptc_operations = { 1276static const struct file_operations proc_uv_ptc_operations = {
1101 .open = uv_ptc_proc_open, 1277 .open = uv_ptc_proc_open,
1102 .read = seq_read, 1278 .read = seq_read,
@@ -1105,6 +1281,12 @@ static const struct file_operations proc_uv_ptc_operations = {
1105 .release = seq_release, 1281 .release = seq_release,
1106}; 1282};
1107 1283
1284static const struct file_operations tunables_fops = {
1285 .open = tunables_open,
1286 .read = tunables_read,
1287 .write = tunables_write,
1288};
1289
1108static int __init uv_ptc_init(void) 1290static int __init uv_ptc_init(void)
1109{ 1291{
1110 struct proc_dir_entry *proc_uv_ptc; 1292 struct proc_dir_entry *proc_uv_ptc;
@@ -1119,6 +1301,20 @@ static int __init uv_ptc_init(void)
1119 UV_PTC_BASENAME); 1301 UV_PTC_BASENAME);
1120 return -EINVAL; 1302 return -EINVAL;
1121 } 1303 }
1304
1305 tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
1306 if (!tunables_dir) {
1307 printk(KERN_ERR "unable to create debugfs directory %s\n",
1308 UV_BAU_TUNABLES_DIR);
1309 return -EINVAL;
1310 }
1311 tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
1312 tunables_dir, NULL, &tunables_fops);
1313 if (!tunables_file) {
1314 printk(KERN_ERR "unable to create debugfs file %s\n",
1315 UV_BAU_TUNABLES_FILE);
1316 return -EINVAL;
1317 }
1122 return 0; 1318 return 0;
1123} 1319}
1124 1320
@@ -1259,15 +1455,44 @@ static void __init uv_init_uvhub(int uvhub, int vector)
1259} 1455}
1260 1456
1261/* 1457/*
1458 * We will set BAU_MISC_CONTROL with a timeout period.
1459 * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
1460 * So the destination timeout period has be be calculated from them.
1461 */
1462static int
1463calculate_destination_timeout(void)
1464{
1465 unsigned long mmr_image;
1466 int mult1;
1467 int mult2;
1468 int index;
1469 int base;
1470 int ret;
1471 unsigned long ts_ns;
1472
1473 mult1 = UV_INTD_SOFT_ACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
1474 mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1475 index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
1476 mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
1477 mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
1478 base = timeout_base_ns[index];
1479 ts_ns = base * mult1 * mult2;
1480 ret = ts_ns / 1000;
1481 return ret;
1482}
1483
1484/*
1262 * initialize the bau_control structure for each cpu 1485 * initialize the bau_control structure for each cpu
1263 */ 1486 */
1264static void uv_init_per_cpu(int nuvhubs) 1487static void uv_init_per_cpu(int nuvhubs)
1265{ 1488{
1266 int i, j, k; 1489 int i;
1267 int cpu; 1490 int cpu;
1268 int pnode; 1491 int pnode;
1269 int uvhub; 1492 int uvhub;
1270 short socket = 0; 1493 short socket = 0;
1494 unsigned short socket_mask;
1495 unsigned int uvhub_mask;
1271 struct bau_control *bcp; 1496 struct bau_control *bcp;
1272 struct uvhub_desc *bdp; 1497 struct uvhub_desc *bdp;
1273 struct socket_desc *sdp; 1498 struct socket_desc *sdp;
@@ -1278,7 +1503,7 @@ static void uv_init_per_cpu(int nuvhubs)
1278 short cpu_number[16]; 1503 short cpu_number[16];
1279 }; 1504 };
1280 struct uvhub_desc { 1505 struct uvhub_desc {
1281 short num_sockets; 1506 unsigned short socket_mask;
1282 short num_cpus; 1507 short num_cpus;
1283 short uvhub; 1508 short uvhub;
1284 short pnode; 1509 short pnode;
@@ -1286,57 +1511,83 @@ static void uv_init_per_cpu(int nuvhubs)
1286 }; 1511 };
1287 struct uvhub_desc *uvhub_descs; 1512 struct uvhub_desc *uvhub_descs;
1288 1513
1514 timeout_us = calculate_destination_timeout();
1515
1289 uvhub_descs = (struct uvhub_desc *) 1516 uvhub_descs = (struct uvhub_desc *)
1290 kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL); 1517 kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
1291 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); 1518 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
1292 for_each_present_cpu(cpu) { 1519 for_each_present_cpu(cpu) {
1293 bcp = &per_cpu(bau_control, cpu); 1520 bcp = &per_cpu(bau_control, cpu);
1294 memset(bcp, 0, sizeof(struct bau_control)); 1521 memset(bcp, 0, sizeof(struct bau_control));
1295 spin_lock_init(&bcp->masks_lock);
1296 bcp->max_concurrent = uv_bau_max_concurrent;
1297 pnode = uv_cpu_hub_info(cpu)->pnode; 1522 pnode = uv_cpu_hub_info(cpu)->pnode;
1298 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; 1523 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1524 uvhub_mask |= (1 << uvhub);
1299 bdp = &uvhub_descs[uvhub]; 1525 bdp = &uvhub_descs[uvhub];
1300 bdp->num_cpus++; 1526 bdp->num_cpus++;
1301 bdp->uvhub = uvhub; 1527 bdp->uvhub = uvhub;
1302 bdp->pnode = pnode; 1528 bdp->pnode = pnode;
1303 /* time interval to catch a hardware stay-busy bug */ 1529 /* kludge: 'assuming' one node per socket, and assuming that
1304 bcp->timeout_interval = millisec_2_cycles(3); 1530 disabling a socket just leaves a gap in node numbers */
1305 /* kludge: assume uv_hub.h is constant */ 1531 socket = (cpu_to_node(cpu) & 1);;
1306 socket = (cpu_physical_id(cpu)>>5)&1; 1532 bdp->socket_mask |= (1 << socket);
1307 if (socket >= bdp->num_sockets)
1308 bdp->num_sockets = socket+1;
1309 sdp = &bdp->socket[socket]; 1533 sdp = &bdp->socket[socket];
1310 sdp->cpu_number[sdp->num_cpus] = cpu; 1534 sdp->cpu_number[sdp->num_cpus] = cpu;
1311 sdp->num_cpus++; 1535 sdp->num_cpus++;
1312 } 1536 }
1313 socket = 0; 1537 uvhub = 0;
1314 for_each_possible_blade(uvhub) { 1538 while (uvhub_mask) {
1539 if (!(uvhub_mask & 1))
1540 goto nexthub;
1315 bdp = &uvhub_descs[uvhub]; 1541 bdp = &uvhub_descs[uvhub];
1316 for (i = 0; i < bdp->num_sockets; i++) { 1542 socket_mask = bdp->socket_mask;
1317 sdp = &bdp->socket[i]; 1543 socket = 0;
1318 for (j = 0; j < sdp->num_cpus; j++) { 1544 while (socket_mask) {
1319 cpu = sdp->cpu_number[j]; 1545 if (!(socket_mask & 1))
1546 goto nextsocket;
1547 sdp = &bdp->socket[socket];
1548 for (i = 0; i < sdp->num_cpus; i++) {
1549 cpu = sdp->cpu_number[i];
1320 bcp = &per_cpu(bau_control, cpu); 1550 bcp = &per_cpu(bau_control, cpu);
1321 bcp->cpu = cpu; 1551 bcp->cpu = cpu;
1322 if (j == 0) { 1552 if (i == 0) {
1323 smaster = bcp; 1553 smaster = bcp;
1324 if (i == 0) 1554 if (socket == 0)
1325 hmaster = bcp; 1555 hmaster = bcp;
1326 } 1556 }
1327 bcp->cpus_in_uvhub = bdp->num_cpus; 1557 bcp->cpus_in_uvhub = bdp->num_cpus;
1328 bcp->cpus_in_socket = sdp->num_cpus; 1558 bcp->cpus_in_socket = sdp->num_cpus;
1329 bcp->socket_master = smaster; 1559 bcp->socket_master = smaster;
1560 bcp->uvhub = bdp->uvhub;
1330 bcp->uvhub_master = hmaster; 1561 bcp->uvhub_master = hmaster;
1331 for (k = 0; k < DEST_Q_SIZE; k++) 1562 bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->
1332 bcp->socket_acknowledge_count[k] = 0; 1563 blade_processor_id;
1333 bcp->uvhub_cpu =
1334 uv_cpu_hub_info(cpu)->blade_processor_id;
1335 } 1564 }
1565nextsocket:
1336 socket++; 1566 socket++;
1567 socket_mask = (socket_mask >> 1);
1337 } 1568 }
1569nexthub:
1570 uvhub++;
1571 uvhub_mask = (uvhub_mask >> 1);
1338 } 1572 }
1339 kfree(uvhub_descs); 1573 kfree(uvhub_descs);
1574 for_each_present_cpu(cpu) {
1575 bcp = &per_cpu(bau_control, cpu);
1576 bcp->baudisabled = 0;
1577 bcp->statp = &per_cpu(ptcstats, cpu);
1578 /* time interval to catch a hardware stay-busy bug */
1579 bcp->timeout_interval = microsec_2_cycles(2*timeout_us);
1580 bcp->max_bau_concurrent = max_bau_concurrent;
1581 bcp->max_bau_concurrent_constant = max_bau_concurrent;
1582 bcp->plugged_delay = plugged_delay;
1583 bcp->plugsb4reset = plugsb4reset;
1584 bcp->timeoutsb4reset = timeoutsb4reset;
1585 bcp->ipi_reset_limit = ipi_reset_limit;
1586 bcp->complete_threshold = complete_threshold;
1587 bcp->congested_response_us = congested_response_us;
1588 bcp->congested_reps = congested_reps;
1589 bcp->congested_period = congested_period;
1590 }
1340} 1591}
1341 1592
1342/* 1593/*
@@ -1361,10 +1612,11 @@ static int __init uv_bau_init(void)
1361 zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), 1612 zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
1362 GFP_KERNEL, cpu_to_node(cur_cpu)); 1613 GFP_KERNEL, cpu_to_node(cur_cpu));
1363 1614
1364 uv_bau_max_concurrent = MAX_BAU_CONCURRENT;
1365 uv_nshift = uv_hub_info->m_val; 1615 uv_nshift = uv_hub_info->m_val;
1366 uv_mmask = (1UL << uv_hub_info->m_val) - 1; 1616 uv_mmask = (1UL << uv_hub_info->m_val) - 1;
1367 nuvhubs = uv_num_possible_blades(); 1617 nuvhubs = uv_num_possible_blades();
1618 spin_lock_init(&disable_lock);
1619 congested_cycles = microsec_2_cycles(congested_response_us);
1368 1620
1369 uv_init_per_cpu(nuvhubs); 1621 uv_init_per_cpu(nuvhubs);
1370 1622
@@ -1383,15 +1635,19 @@ static int __init uv_bau_init(void)
1383 alloc_intr_gate(vector, uv_bau_message_intr1); 1635 alloc_intr_gate(vector, uv_bau_message_intr1);
1384 1636
1385 for_each_possible_blade(uvhub) { 1637 for_each_possible_blade(uvhub) {
1386 pnode = uv_blade_to_pnode(uvhub); 1638 if (uv_blade_nr_possible_cpus(uvhub)) {
1387 /* INIT the bau */ 1639 pnode = uv_blade_to_pnode(uvhub);
1388 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, 1640 /* INIT the bau */
1389 ((unsigned long)1 << 63)); 1641 uv_write_global_mmr64(pnode,
1390 mmr = 1; /* should be 1 to broadcast to both sockets */ 1642 UVH_LB_BAU_SB_ACTIVATION_CONTROL,
1391 uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST, mmr); 1643 ((unsigned long)1 << 63));
1644 mmr = 1; /* should be 1 to broadcast to both sockets */
1645 uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST,
1646 mmr);
1647 }
1392 } 1648 }
1393 1649
1394 return 0; 1650 return 0;
1395} 1651}
1396core_initcall(uv_bau_init); 1652core_initcall(uv_bau_init);
1397core_initcall(uv_ptc_init); 1653fs_initcall(uv_ptc_init);
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
index 4a5979aa6883..2cda60a06e65 100644
--- a/arch/x86/lib/atomic64_386_32.S
+++ b/arch/x86/lib/atomic64_386_32.S
@@ -25,150 +25,172 @@
25 CFI_ADJUST_CFA_OFFSET -4 25 CFI_ADJUST_CFA_OFFSET -4
26.endm 26.endm
27 27
28.macro BEGIN func reg 28#define BEGIN(op) \
29$v = \reg 29.macro endp; \
30 30 CFI_ENDPROC; \
31ENTRY(atomic64_\func\()_386) 31ENDPROC(atomic64_##op##_386); \
32 CFI_STARTPROC 32.purgem endp; \
33 LOCK $v 33.endm; \
34 34ENTRY(atomic64_##op##_386); \
35.macro RETURN 35 CFI_STARTPROC; \
36 UNLOCK $v 36 LOCK v;
37
38#define ENDP endp
39
40#define RET \
41 UNLOCK v; \
37 ret 42 ret
38.endm
39
40.macro END_
41 CFI_ENDPROC
42ENDPROC(atomic64_\func\()_386)
43.purgem RETURN
44.purgem END_
45.purgem END
46.endm
47
48.macro END
49RETURN
50END_
51.endm
52.endm
53 43
54BEGIN read %ecx 44#define RET_ENDP \
55 movl ($v), %eax 45 RET; \
56 movl 4($v), %edx 46 ENDP
57END 47
58 48#define v %ecx
59BEGIN set %esi 49BEGIN(read)
60 movl %ebx, ($v) 50 movl (v), %eax
61 movl %ecx, 4($v) 51 movl 4(v), %edx
62END 52RET_ENDP
63 53#undef v
64BEGIN xchg %esi 54
65 movl ($v), %eax 55#define v %esi
66 movl 4($v), %edx 56BEGIN(set)
67 movl %ebx, ($v) 57 movl %ebx, (v)
68 movl %ecx, 4($v) 58 movl %ecx, 4(v)
69END 59RET_ENDP
70 60#undef v
71BEGIN add %ecx 61
72 addl %eax, ($v) 62#define v %esi
73 adcl %edx, 4($v) 63BEGIN(xchg)
74END 64 movl (v), %eax
75 65 movl 4(v), %edx
76BEGIN add_return %ecx 66 movl %ebx, (v)
77 addl ($v), %eax 67 movl %ecx, 4(v)
78 adcl 4($v), %edx 68RET_ENDP
79 movl %eax, ($v) 69#undef v
80 movl %edx, 4($v) 70
81END 71#define v %ecx
82 72BEGIN(add)
83BEGIN sub %ecx 73 addl %eax, (v)
84 subl %eax, ($v) 74 adcl %edx, 4(v)
85 sbbl %edx, 4($v) 75RET_ENDP
86END 76#undef v
87 77
88BEGIN sub_return %ecx 78#define v %ecx
79BEGIN(add_return)
80 addl (v), %eax
81 adcl 4(v), %edx
82 movl %eax, (v)
83 movl %edx, 4(v)
84RET_ENDP
85#undef v
86
87#define v %ecx
88BEGIN(sub)
89 subl %eax, (v)
90 sbbl %edx, 4(v)
91RET_ENDP
92#undef v
93
94#define v %ecx
95BEGIN(sub_return)
89 negl %edx 96 negl %edx
90 negl %eax 97 negl %eax
91 sbbl $0, %edx 98 sbbl $0, %edx
92 addl ($v), %eax 99 addl (v), %eax
93 adcl 4($v), %edx 100 adcl 4(v), %edx
94 movl %eax, ($v) 101 movl %eax, (v)
95 movl %edx, 4($v) 102 movl %edx, 4(v)
96END 103RET_ENDP
97 104#undef v
98BEGIN inc %esi 105
99 addl $1, ($v) 106#define v %esi
100 adcl $0, 4($v) 107BEGIN(inc)
101END 108 addl $1, (v)
102 109 adcl $0, 4(v)
103BEGIN inc_return %esi 110RET_ENDP
104 movl ($v), %eax 111#undef v
105 movl 4($v), %edx 112
113#define v %esi
114BEGIN(inc_return)
115 movl (v), %eax
116 movl 4(v), %edx
106 addl $1, %eax 117 addl $1, %eax
107 adcl $0, %edx 118 adcl $0, %edx
108 movl %eax, ($v) 119 movl %eax, (v)
109 movl %edx, 4($v) 120 movl %edx, 4(v)
110END 121RET_ENDP
111 122#undef v
112BEGIN dec %esi 123
113 subl $1, ($v) 124#define v %esi
114 sbbl $0, 4($v) 125BEGIN(dec)
115END 126 subl $1, (v)
116 127 sbbl $0, 4(v)
117BEGIN dec_return %esi 128RET_ENDP
118 movl ($v), %eax 129#undef v
119 movl 4($v), %edx 130
131#define v %esi
132BEGIN(dec_return)
133 movl (v), %eax
134 movl 4(v), %edx
120 subl $1, %eax 135 subl $1, %eax
121 sbbl $0, %edx 136 sbbl $0, %edx
122 movl %eax, ($v) 137 movl %eax, (v)
123 movl %edx, 4($v) 138 movl %edx, 4(v)
124END 139RET_ENDP
140#undef v
125 141
126BEGIN add_unless %ecx 142#define v %ecx
143BEGIN(add_unless)
127 addl %eax, %esi 144 addl %eax, %esi
128 adcl %edx, %edi 145 adcl %edx, %edi
129 addl ($v), %eax 146 addl (v), %eax
130 adcl 4($v), %edx 147 adcl 4(v), %edx
131 cmpl %eax, %esi 148 cmpl %eax, %esi
132 je 3f 149 je 3f
1331: 1501:
134 movl %eax, ($v) 151 movl %eax, (v)
135 movl %edx, 4($v) 152 movl %edx, 4(v)
136 movl $1, %eax 153 movl $1, %eax
1372: 1542:
138RETURN 155 RET
1393: 1563:
140 cmpl %edx, %edi 157 cmpl %edx, %edi
141 jne 1b 158 jne 1b
142 xorl %eax, %eax 159 xorl %eax, %eax
143 jmp 2b 160 jmp 2b
144END_ 161ENDP
162#undef v
145 163
146BEGIN inc_not_zero %esi 164#define v %esi
147 movl ($v), %eax 165BEGIN(inc_not_zero)
148 movl 4($v), %edx 166 movl (v), %eax
167 movl 4(v), %edx
149 testl %eax, %eax 168 testl %eax, %eax
150 je 3f 169 je 3f
1511: 1701:
152 addl $1, %eax 171 addl $1, %eax
153 adcl $0, %edx 172 adcl $0, %edx
154 movl %eax, ($v) 173 movl %eax, (v)
155 movl %edx, 4($v) 174 movl %edx, 4(v)
156 movl $1, %eax 175 movl $1, %eax
1572: 1762:
158RETURN 177 RET
1593: 1783:
160 testl %edx, %edx 179 testl %edx, %edx
161 jne 1b 180 jne 1b
162 jmp 2b 181 jmp 2b
163END_ 182ENDP
183#undef v
164 184
165BEGIN dec_if_positive %esi 185#define v %esi
166 movl ($v), %eax 186BEGIN(dec_if_positive)
167 movl 4($v), %edx 187 movl (v), %eax
188 movl 4(v), %edx
168 subl $1, %eax 189 subl $1, %eax
169 sbbl $0, %edx 190 sbbl $0, %edx
170 js 1f 191 js 1f
171 movl %eax, ($v) 192 movl %eax, (v)
172 movl %edx, 4($v) 193 movl %edx, 4(v)
1731: 1941:
174END 195RET_ENDP
196#undef v
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index f62777940dfb..4c4508e8a204 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -802,8 +802,10 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
802 up_read(&mm->mmap_sem); 802 up_read(&mm->mmap_sem);
803 803
804 /* Kernel mode? Handle exceptions or die: */ 804 /* Kernel mode? Handle exceptions or die: */
805 if (!(error_code & PF_USER)) 805 if (!(error_code & PF_USER)) {
806 no_context(regs, error_code, address); 806 no_context(regs, error_code, address);
807 return;
808 }
807 809
808 /* User-space => ok to do another page fault: */ 810 /* User-space => ok to do another page fault: */
809 if (is_prefetch(regs, error_code, address)) 811 if (is_prefetch(regs, error_code, address))
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 1ba67dc8006a..f6b48f6c5951 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -668,6 +668,7 @@ static int __init ppro_init(char **cpu_type)
668 *cpu_type = "i386/core_2"; 668 *cpu_type = "i386/core_2";
669 break; 669 break;
670 case 0x1a: 670 case 0x1a:
671 case 0x1e:
671 case 0x2e: 672 case 0x2e:
672 spec = &op_arch_perfmon_spec; 673 spec = &op_arch_perfmon_spec;
673 *cpu_type = "i386/core_i7"; 674 *cpu_type = "i386/core_i7";
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 7715d3242ec8..d3530f6e8115 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -1273,6 +1273,7 @@ static int __devinit c4_probe(struct pci_dev *dev,
1273 if (retval != 0) { 1273 if (retval != 0) {
1274 printk(KERN_ERR "c4: no AVM-C%d at i/o %#x, irq %d detected, mem %#x\n", 1274 printk(KERN_ERR "c4: no AVM-C%d at i/o %#x, irq %d detected, mem %#x\n",
1275 nr, param.port, param.irq, param.membase); 1275 nr, param.port, param.irq, param.membase);
1276 pci_disable_device(dev);
1276 return -ENODEV; 1277 return -ENODEV;
1277 } 1278 }
1278 return 0; 1279 return 0;
diff --git a/drivers/isdn/hardware/avm/t1pci.c b/drivers/isdn/hardware/avm/t1pci.c
index 5a3f83098018..a79eb5afb92d 100644
--- a/drivers/isdn/hardware/avm/t1pci.c
+++ b/drivers/isdn/hardware/avm/t1pci.c
@@ -210,6 +210,7 @@ static int __devinit t1pci_probe(struct pci_dev *dev,
210 if (retval != 0) { 210 if (retval != 0) {
211 printk(KERN_ERR "t1pci: no AVM-T1-PCI at i/o %#x, irq %d detected, mem %#x\n", 211 printk(KERN_ERR "t1pci: no AVM-T1-PCI at i/o %#x, irq %d detected, mem %#x\n",
212 param.port, param.irq, param.membase); 212 param.port, param.irq, param.membase);
213 pci_disable_device(dev);
213 return -ENODEV; 214 return -ENODEV;
214 } 215 }
215 return 0; 216 return 0;
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index d2dd61d65d51..af25e1f3efd4 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -1094,6 +1094,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1094 pr_info("mISDN: do not have informations about adapter at %s\n", 1094 pr_info("mISDN: do not have informations about adapter at %s\n",
1095 pci_name(pdev)); 1095 pci_name(pdev));
1096 kfree(card); 1096 kfree(card);
1097 pci_disable_device(pdev);
1097 return -EINVAL; 1098 return -EINVAL;
1098 } else 1099 } else
1099 pr_notice("mISDN: found adapter %s at %s\n", 1100 pr_notice("mISDN: found adapter %s at %s\n",
@@ -1103,7 +1104,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1103 pci_set_drvdata(pdev, card); 1104 pci_set_drvdata(pdev, card);
1104 err = setup_instance(card); 1105 err = setup_instance(card);
1105 if (err) { 1106 if (err) {
1106 pci_disable_device(card->pdev); 1107 pci_disable_device(pdev);
1107 kfree(card); 1108 kfree(card);
1108 pci_set_drvdata(pdev, NULL); 1109 pci_set_drvdata(pdev, NULL);
1109 } else if (ent->driver_data == INF_SCT_1) { 1110 } else if (ent->driver_data == INF_SCT_1) {
@@ -1114,6 +1115,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1114 sc = kzalloc(sizeof(struct inf_hw), GFP_KERNEL); 1115 sc = kzalloc(sizeof(struct inf_hw), GFP_KERNEL);
1115 if (!sc) { 1116 if (!sc) {
1116 release_card(card); 1117 release_card(card);
1118 pci_disable_device(pdev);
1117 return -ENOMEM; 1119 return -ENOMEM;
1118 } 1120 }
1119 sc->irq = card->irq; 1121 sc->irq = card->irq;
@@ -1121,6 +1123,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1121 sc->ci = card->ci + i; 1123 sc->ci = card->ci + i;
1122 err = setup_instance(sc); 1124 err = setup_instance(sc);
1123 if (err) { 1125 if (err) {
1126 pci_disable_device(pdev);
1124 kfree(sc); 1127 kfree(sc);
1125 release_card(card); 1128 release_card(card);
1126 break; 1129 break;
diff --git a/drivers/media/IR/Kconfig b/drivers/media/IR/Kconfig
index 30e04915a256..490c57cc4cfe 100644
--- a/drivers/media/IR/Kconfig
+++ b/drivers/media/IR/Kconfig
@@ -2,14 +2,21 @@ menuconfig IR_CORE
2 tristate "Infrared remote controller adapters" 2 tristate "Infrared remote controller adapters"
3 depends on INPUT 3 depends on INPUT
4 default INPUT 4 default INPUT
5 ---help---
6 Enable support for Remote Controllers on Linux. This is
7 needed in order to support several video capture adapters.
5 8
6if IR_CORE 9 Enable this option if you have a video capture board even
10 if you don't need IR, as otherwise, you may not be able to
11 compile the driver for your adapter.
7 12
8config VIDEO_IR 13config VIDEO_IR
9 tristate 14 tristate
10 depends on IR_CORE 15 depends on IR_CORE
11 default IR_CORE 16 default IR_CORE
12 17
18if IR_CORE
19
13config LIRC 20config LIRC
14 tristate 21 tristate
15 default y 22 default y
diff --git a/drivers/media/dvb/dm1105/Kconfig b/drivers/media/dvb/dm1105/Kconfig
index 695239227cb7..a6ceb08f1183 100644
--- a/drivers/media/dvb/dm1105/Kconfig
+++ b/drivers/media/dvb/dm1105/Kconfig
@@ -9,7 +9,7 @@ config DVB_DM1105
9 select DVB_CX24116 if !DVB_FE_CUSTOMISE 9 select DVB_CX24116 if !DVB_FE_CUSTOMISE
10 select DVB_SI21XX if !DVB_FE_CUSTOMISE 10 select DVB_SI21XX if !DVB_FE_CUSTOMISE
11 select DVB_DS3000 if !DVB_FE_CUSTOMISE 11 select DVB_DS3000 if !DVB_FE_CUSTOMISE
12 select VIDEO_IR 12 depends on VIDEO_IR
13 help 13 help
14 Support for cards based on the SDMC DM1105 PCI chip like 14 Support for cards based on the SDMC DM1105 PCI chip like
15 DvbWorld 2002 15 DvbWorld 2002
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 553b48ac1919..fdc19bba2128 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -1,6 +1,6 @@
1config DVB_USB 1config DVB_USB
2 tristate "Support for various USB DVB devices" 2 tristate "Support for various USB DVB devices"
3 depends on DVB_CORE && USB && I2C && INPUT 3 depends on DVB_CORE && USB && I2C && IR_CORE
4 help 4 help
5 By enabling this you will be able to choose the various supported 5 By enabling this you will be able to choose the various supported
6 USB1.1 and USB2.0 DVB devices. 6 USB1.1 and USB2.0 DVB devices.
diff --git a/drivers/media/dvb/siano/Kconfig b/drivers/media/dvb/siano/Kconfig
index 85a222c4eaa0..e520bceee0af 100644
--- a/drivers/media/dvb/siano/Kconfig
+++ b/drivers/media/dvb/siano/Kconfig
@@ -4,7 +4,7 @@
4 4
5config SMS_SIANO_MDTV 5config SMS_SIANO_MDTV
6 tristate "Siano SMS1xxx based MDTV receiver" 6 tristate "Siano SMS1xxx based MDTV receiver"
7 depends on DVB_CORE && INPUT && HAS_DMA 7 depends on DVB_CORE && IR_CORE && HAS_DMA
8 ---help--- 8 ---help---
9 Choose Y or M here if you have MDTV receiver with a Siano chipset. 9 Choose Y or M here if you have MDTV receiver with a Siano chipset.
10 10
diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig
index 32a7ec65ec42..debea8d1d31c 100644
--- a/drivers/media/dvb/ttpci/Kconfig
+++ b/drivers/media/dvb/ttpci/Kconfig
@@ -98,7 +98,7 @@ config DVB_BUDGET_CI
98 select DVB_LNBP21 if !DVB_FE_CUSTOMISE 98 select DVB_LNBP21 if !DVB_FE_CUSTOMISE
99 select DVB_TDA10023 if !DVB_FE_CUSTOMISE 99 select DVB_TDA10023 if !DVB_FE_CUSTOMISE
100 select MEDIA_TUNER_TDA827X if !MEDIA_TUNER_CUSTOMISE 100 select MEDIA_TUNER_TDA827X if !MEDIA_TUNER_CUSTOMISE
101 select VIDEO_IR 101 depends on VIDEO_IR
102 help 102 help
103 Support for simple SAA7146 based DVB cards 103 Support for simple SAA7146 based DVB cards
104 (so called Budget- or Nova-PCI cards) without onboard 104 (so called Budget- or Nova-PCI cards) without onboard
diff --git a/drivers/media/video/bt8xx/Kconfig b/drivers/media/video/bt8xx/Kconfig
index 3077c45015f5..1a4a89fdf767 100644
--- a/drivers/media/video/bt8xx/Kconfig
+++ b/drivers/media/video/bt8xx/Kconfig
@@ -4,7 +4,7 @@ config VIDEO_BT848
4 select I2C_ALGOBIT 4 select I2C_ALGOBIT
5 select VIDEO_BTCX 5 select VIDEO_BTCX
6 select VIDEOBUF_DMA_SG 6 select VIDEOBUF_DMA_SG
7 select VIDEO_IR 7 depends on VIDEO_IR
8 select VIDEO_TUNER 8 select VIDEO_TUNER
9 select VIDEO_TVEEPROM 9 select VIDEO_TVEEPROM
10 select VIDEO_MSP3400 if VIDEO_HELPER_CHIPS_AUTO 10 select VIDEO_MSP3400 if VIDEO_HELPER_CHIPS_AUTO
diff --git a/drivers/media/video/cx18/Kconfig b/drivers/media/video/cx18/Kconfig
index baf7e91ee0f5..76c054d1eef9 100644
--- a/drivers/media/video/cx18/Kconfig
+++ b/drivers/media/video/cx18/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_CX18
3 depends on VIDEO_V4L2 && DVB_CORE && PCI && I2C && EXPERIMENTAL 3 depends on VIDEO_V4L2 && DVB_CORE && PCI && I2C && EXPERIMENTAL
4 depends on INPUT # due to VIDEO_IR 4 depends on INPUT # due to VIDEO_IR
5 select I2C_ALGOBIT 5 select I2C_ALGOBIT
6 select VIDEO_IR 6 depends on VIDEO_IR
7 select VIDEO_TUNER 7 select VIDEO_TUNER
8 select VIDEO_TVEEPROM 8 select VIDEO_TVEEPROM
9 select VIDEO_CX2341X 9 select VIDEO_CX2341X
diff --git a/drivers/media/video/cx231xx/Kconfig b/drivers/media/video/cx231xx/Kconfig
index 477d4ab5e9ac..5ac7eceececa 100644
--- a/drivers/media/video/cx231xx/Kconfig
+++ b/drivers/media/video/cx231xx/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_CX231XX
3 depends on VIDEO_DEV && I2C && INPUT 3 depends on VIDEO_DEV && I2C && INPUT
4 select VIDEO_TUNER 4 select VIDEO_TUNER
5 select VIDEO_TVEEPROM 5 select VIDEO_TVEEPROM
6 select VIDEO_IR 6 depends on VIDEO_IR
7 select VIDEOBUF_VMALLOC 7 select VIDEOBUF_VMALLOC
8 select VIDEO_CX25840 8 select VIDEO_CX25840
9 9
diff --git a/drivers/media/video/cx23885/Kconfig b/drivers/media/video/cx23885/Kconfig
index 768f000e4b21..e1367b35647a 100644
--- a/drivers/media/video/cx23885/Kconfig
+++ b/drivers/media/video/cx23885/Kconfig
@@ -5,7 +5,7 @@ config VIDEO_CX23885
5 select VIDEO_BTCX 5 select VIDEO_BTCX
6 select VIDEO_TUNER 6 select VIDEO_TUNER
7 select VIDEO_TVEEPROM 7 select VIDEO_TVEEPROM
8 select IR_CORE 8 depends on IR_CORE
9 select VIDEOBUF_DVB 9 select VIDEOBUF_DVB
10 select VIDEOBUF_DMA_SG 10 select VIDEOBUF_DMA_SG
11 select VIDEO_CX25840 11 select VIDEO_CX25840
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index c7e5851d3486..99dbae117591 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -6,7 +6,7 @@ config VIDEO_CX88
6 select VIDEOBUF_DMA_SG 6 select VIDEOBUF_DMA_SG
7 select VIDEO_TUNER 7 select VIDEO_TUNER
8 select VIDEO_TVEEPROM 8 select VIDEO_TVEEPROM
9 select VIDEO_IR 9 depends on VIDEO_IR
10 select VIDEO_WM8775 if VIDEO_HELPER_CHIPS_AUTO 10 select VIDEO_WM8775 if VIDEO_HELPER_CHIPS_AUTO
11 ---help--- 11 ---help---
12 This is a video4linux driver for Conexant 2388x based 12 This is a video4linux driver for Conexant 2388x based
diff --git a/drivers/media/video/em28xx/Kconfig b/drivers/media/video/em28xx/Kconfig
index c7be0e097828..66aefd6eef55 100644
--- a/drivers/media/video/em28xx/Kconfig
+++ b/drivers/media/video/em28xx/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_EM28XX
3 depends on VIDEO_DEV && I2C && INPUT 3 depends on VIDEO_DEV && I2C && INPUT
4 select VIDEO_TUNER 4 select VIDEO_TUNER
5 select VIDEO_TVEEPROM 5 select VIDEO_TVEEPROM
6 select VIDEO_IR 6 depends on VIDEO_IR
7 select VIDEOBUF_VMALLOC 7 select VIDEOBUF_VMALLOC
8 select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO 8 select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO
9 select VIDEO_TVP5150 if VIDEO_HELPER_CHIPS_AUTO 9 select VIDEO_TVP5150 if VIDEO_HELPER_CHIPS_AUTO
diff --git a/drivers/media/video/ivtv/Kconfig b/drivers/media/video/ivtv/Kconfig
index c46bfb1569e3..be4af1fa557e 100644
--- a/drivers/media/video/ivtv/Kconfig
+++ b/drivers/media/video/ivtv/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_IVTV
3 depends on VIDEO_V4L2 && PCI && I2C 3 depends on VIDEO_V4L2 && PCI && I2C
4 depends on INPUT # due to VIDEO_IR 4 depends on INPUT # due to VIDEO_IR
5 select I2C_ALGOBIT 5 select I2C_ALGOBIT
6 select VIDEO_IR 6 depends on VIDEO_IR
7 select VIDEO_TUNER 7 select VIDEO_TUNER
8 select VIDEO_TVEEPROM 8 select VIDEO_TVEEPROM
9 select VIDEO_CX2341X 9 select VIDEO_CX2341X
diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig
index 22bfd62c9551..fda005e01670 100644
--- a/drivers/media/video/saa7134/Kconfig
+++ b/drivers/media/video/saa7134/Kconfig
@@ -2,7 +2,7 @@ config VIDEO_SAA7134
2 tristate "Philips SAA7134 support" 2 tristate "Philips SAA7134 support"
3 depends on VIDEO_DEV && PCI && I2C && INPUT 3 depends on VIDEO_DEV && PCI && I2C && INPUT
4 select VIDEOBUF_DMA_SG 4 select VIDEOBUF_DMA_SG
5 select VIDEO_IR 5 depends on VIDEO_IR
6 select VIDEO_TUNER 6 select VIDEO_TUNER
7 select VIDEO_TVEEPROM 7 select VIDEO_TVEEPROM
8 select CRC32 8 select CRC32
diff --git a/drivers/media/video/tlg2300/Kconfig b/drivers/media/video/tlg2300/Kconfig
index 2c29ec659b4e..1686ebfa6951 100644
--- a/drivers/media/video/tlg2300/Kconfig
+++ b/drivers/media/video/tlg2300/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_TLG2300
3 depends on VIDEO_DEV && I2C && INPUT && SND && DVB_CORE 3 depends on VIDEO_DEV && I2C && INPUT && SND && DVB_CORE
4 select VIDEO_TUNER 4 select VIDEO_TUNER
5 select VIDEO_TVEEPROM 5 select VIDEO_TVEEPROM
6 select VIDEO_IR 6 depends on VIDEO_IR
7 select VIDEOBUF_VMALLOC 7 select VIDEOBUF_VMALLOC
8 select SND_PCM 8 select SND_PCM
9 select VIDEOBUF_DVB 9 select VIDEOBUF_DVB
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index 84c1a53a727a..ea8d32cd425d 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#include <linux/ctype.h> 21#include <linux/ctype.h>
22#include <linux/slab.h>
22#include <media/v4l2-ioctl.h> 23#include <media/v4l2-ioctl.h>
23#include <media/v4l2-device.h> 24#include <media/v4l2-device.h>
24#include <media/v4l2-ctrls.h> 25#include <media/v4l2-ctrls.h>
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index 077ccf840edf..2111dbfea6fe 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -22,13 +22,13 @@
22#include <net/caif/caif_spi.h> 22#include <net/caif/caif_spi.h>
23 23
24#ifndef CONFIG_CAIF_SPI_SYNC 24#ifndef CONFIG_CAIF_SPI_SYNC
25#define SPI_DATA_POS SPI_CMD_SZ 25#define SPI_DATA_POS 0
26static inline int forward_to_spi_cmd(struct cfspi *cfspi) 26static inline int forward_to_spi_cmd(struct cfspi *cfspi)
27{ 27{
28 return cfspi->rx_cpck_len; 28 return cfspi->rx_cpck_len;
29} 29}
30#else 30#else
31#define SPI_DATA_POS 0 31#define SPI_DATA_POS SPI_CMD_SZ
32static inline int forward_to_spi_cmd(struct cfspi *cfspi) 32static inline int forward_to_spi_cmd(struct cfspi *cfspi)
33{ 33{
34 return 0; 34 return 0;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index a527e37728cd..eb799b36c86a 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -5,7 +5,7 @@
5menuconfig PHYLIB 5menuconfig PHYLIB
6 tristate "PHY Device support and infrastructure" 6 tristate "PHY Device support and infrastructure"
7 depends on !S390 7 depends on !S390
8 depends on NET_ETHERNET 8 depends on NETDEVICES
9 help 9 help
10 Ethernet controllers are usually attached to PHY 10 Ethernet controllers are usually attached to PHY
11 devices. This option provides infrastructure for 11 devices. This option provides infrastructure for
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 5130db8f5c4e..1bb16cb79433 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -301,7 +301,7 @@ EXPORT_SYMBOL(phy_ethtool_gset);
301/** 301/**
302 * phy_mii_ioctl - generic PHY MII ioctl interface 302 * phy_mii_ioctl - generic PHY MII ioctl interface
303 * @phydev: the phy_device struct 303 * @phydev: the phy_device struct
304 * @mii_data: MII ioctl data 304 * @ifr: &struct ifreq for socket ioctl's
305 * @cmd: ioctl cmd to execute 305 * @cmd: ioctl cmd to execute
306 * 306 *
307 * Note that this function is currently incompatible with the 307 * Note that this function is currently incompatible with the
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index b9615bd745ea..bf6d87adda4f 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -473,48 +473,58 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
473static int 473static int
474qlcnic_init_pci_info(struct qlcnic_adapter *adapter) 474qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
475{ 475{
476 struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC]; 476 struct qlcnic_pci_info *pci_info;
477 int i, ret = 0, err; 477 int i, ret = 0, err;
478 u8 pfn; 478 u8 pfn;
479 479
480 if (!adapter->npars) 480 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
481 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) * 481 if (!pci_info)
482 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
483 if (!adapter->npars)
484 return -ENOMEM; 482 return -ENOMEM;
485 483
486 if (!adapter->eswitch) 484 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
487 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) * 485 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
486 if (!adapter->npars) {
487 err = -ENOMEM;
488 goto err_pci_info;
489 }
490
491 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
488 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL); 492 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
489 if (!adapter->eswitch) { 493 if (!adapter->eswitch) {
490 err = -ENOMEM; 494 err = -ENOMEM;
491 goto err_eswitch; 495 goto err_npars;
492 } 496 }
493 497
494 ret = qlcnic_get_pci_info(adapter, pci_info); 498 ret = qlcnic_get_pci_info(adapter, pci_info);
495 if (!ret) { 499 if (ret)
496 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 500 goto err_eswitch;
497 pfn = pci_info[i].id;
498 if (pfn > QLCNIC_MAX_PCI_FUNC)
499 return QL_STATUS_INVALID_PARAM;
500 adapter->npars[pfn].active = pci_info[i].active;
501 adapter->npars[pfn].type = pci_info[i].type;
502 adapter->npars[pfn].phy_port = pci_info[i].default_port;
503 adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
504 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
505 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
506 }
507
508 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
509 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
510 501
511 return ret; 502 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
503 pfn = pci_info[i].id;
504 if (pfn > QLCNIC_MAX_PCI_FUNC)
505 return QL_STATUS_INVALID_PARAM;
506 adapter->npars[pfn].active = pci_info[i].active;
507 adapter->npars[pfn].type = pci_info[i].type;
508 adapter->npars[pfn].phy_port = pci_info[i].default_port;
509 adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
510 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
511 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
512 } 512 }
513 513
514 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
515 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
516
517 kfree(pci_info);
518 return 0;
519
520err_eswitch:
514 kfree(adapter->eswitch); 521 kfree(adapter->eswitch);
515 adapter->eswitch = NULL; 522 adapter->eswitch = NULL;
516err_eswitch: 523err_npars:
517 kfree(adapter->npars); 524 kfree(adapter->npars);
525 adapter->npars = NULL;
526err_pci_info:
527 kfree(pci_info);
518 528
519 return ret; 529 return ret;
520} 530}
@@ -3361,15 +3371,21 @@ qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3361 struct device *dev = container_of(kobj, struct device, kobj); 3371 struct device *dev = container_of(kobj, struct device, kobj);
3362 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 3372 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3363 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC]; 3373 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
3364 struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC]; 3374 struct qlcnic_pci_info *pci_info;
3365 int i, ret; 3375 int i, ret;
3366 3376
3367 if (size != sizeof(pci_cfg)) 3377 if (size != sizeof(pci_cfg))
3368 return QL_STATUS_INVALID_PARAM; 3378 return QL_STATUS_INVALID_PARAM;
3369 3379
3380 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
3381 if (!pci_info)
3382 return -ENOMEM;
3383
3370 ret = qlcnic_get_pci_info(adapter, pci_info); 3384 ret = qlcnic_get_pci_info(adapter, pci_info);
3371 if (ret) 3385 if (ret) {
3386 kfree(pci_info);
3372 return ret; 3387 return ret;
3388 }
3373 3389
3374 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) { 3390 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3375 pci_cfg[i].pci_func = pci_info[i].id; 3391 pci_cfg[i].pci_func = pci_info[i].id;
@@ -3380,8 +3396,8 @@ qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3380 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN); 3396 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3381 } 3397 }
3382 memcpy(buf, &pci_cfg, size); 3398 memcpy(buf, &pci_cfg, size);
3399 kfree(pci_info);
3383 return size; 3400 return size;
3384
3385} 3401}
3386static struct bin_attribute bin_attr_npar_config = { 3402static struct bin_attribute bin_attr_npar_config = {
3387 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)}, 3403 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7f62e2dea28f..ca7fc9df1ccf 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
315 315
316static void rx_complete (struct urb *urb); 316static void rx_complete (struct urb *urb);
317 317
318static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) 318static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
319{ 319{
320 struct sk_buff *skb; 320 struct sk_buff *skb;
321 struct skb_data *entry; 321 struct skb_data *entry;
@@ -327,7 +327,7 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
327 netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); 327 netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
328 usbnet_defer_kevent (dev, EVENT_RX_MEMORY); 328 usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
329 usb_free_urb (urb); 329 usb_free_urb (urb);
330 return; 330 return -ENOMEM;
331 } 331 }
332 skb_reserve (skb, NET_IP_ALIGN); 332 skb_reserve (skb, NET_IP_ALIGN);
333 333
@@ -357,6 +357,9 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
357 netif_dbg(dev, ifdown, dev->net, "device gone\n"); 357 netif_dbg(dev, ifdown, dev->net, "device gone\n");
358 netif_device_detach (dev->net); 358 netif_device_detach (dev->net);
359 break; 359 break;
360 case -EHOSTUNREACH:
361 retval = -ENOLINK;
362 break;
360 default: 363 default:
361 netif_dbg(dev, rx_err, dev->net, 364 netif_dbg(dev, rx_err, dev->net,
362 "rx submit, %d\n", retval); 365 "rx submit, %d\n", retval);
@@ -374,6 +377,7 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
374 dev_kfree_skb_any (skb); 377 dev_kfree_skb_any (skb);
375 usb_free_urb (urb); 378 usb_free_urb (urb);
376 } 379 }
380 return retval;
377} 381}
378 382
379 383
@@ -912,6 +916,7 @@ fail_halt:
912 /* tasklet could resubmit itself forever if memory is tight */ 916 /* tasklet could resubmit itself forever if memory is tight */
913 if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { 917 if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
914 struct urb *urb = NULL; 918 struct urb *urb = NULL;
919 int resched = 1;
915 920
916 if (netif_running (dev->net)) 921 if (netif_running (dev->net))
917 urb = usb_alloc_urb (0, GFP_KERNEL); 922 urb = usb_alloc_urb (0, GFP_KERNEL);
@@ -922,10 +927,12 @@ fail_halt:
922 status = usb_autopm_get_interface(dev->intf); 927 status = usb_autopm_get_interface(dev->intf);
923 if (status < 0) 928 if (status < 0)
924 goto fail_lowmem; 929 goto fail_lowmem;
925 rx_submit (dev, urb, GFP_KERNEL); 930 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
931 resched = 0;
926 usb_autopm_put_interface(dev->intf); 932 usb_autopm_put_interface(dev->intf);
927fail_lowmem: 933fail_lowmem:
928 tasklet_schedule (&dev->bh); 934 if (resched)
935 tasklet_schedule (&dev->bh);
929 } 936 }
930 } 937 }
931 938
@@ -1175,8 +1182,11 @@ static void usbnet_bh (unsigned long param)
1175 // don't refill the queue all at once 1182 // don't refill the queue all at once
1176 for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) { 1183 for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
1177 urb = usb_alloc_urb (0, GFP_ATOMIC); 1184 urb = usb_alloc_urb (0, GFP_ATOMIC);
1178 if (urb != NULL) 1185 if (urb != NULL) {
1179 rx_submit (dev, urb, GFP_ATOMIC); 1186 if (rx_submit (dev, urb, GFP_ATOMIC) ==
1187 -ENOLINK)
1188 return;
1189 }
1180 } 1190 }
1181 if (temp != dev->rxq.qlen) 1191 if (temp != dev->rxq.qlen)
1182 netif_dbg(dev, link, dev->net, 1192 netif_dbg(dev, link, dev->net,
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index ad7719fe6d0a..e050bd65e037 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -885,20 +885,21 @@ fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
885 * Receive a frame through the DMA 885 * Receive a frame through the DMA
886 */ 886 */
887static inline void 887static inline void
888fst_rx_dma(struct fst_card_info *card, unsigned char *skb, 888fst_rx_dma(struct fst_card_info *card, dma_addr_t skb,
889 unsigned char *mem, int len) 889 dma_addr_t mem, int len)
890{ 890{
891 /* 891 /*
892 * This routine will setup the DMA and start it 892 * This routine will setup the DMA and start it
893 */ 893 */
894 894
895 dbg(DBG_RX, "In fst_rx_dma %p %p %d\n", skb, mem, len); 895 dbg(DBG_RX, "In fst_rx_dma %lx %lx %d\n",
896 (unsigned long) skb, (unsigned long) mem, len);
896 if (card->dmarx_in_progress) { 897 if (card->dmarx_in_progress) {
897 dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n"); 898 dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
898 } 899 }
899 900
900 outl((unsigned long) skb, card->pci_conf + DMAPADR0); /* Copy to here */ 901 outl(skb, card->pci_conf + DMAPADR0); /* Copy to here */
901 outl((unsigned long) mem, card->pci_conf + DMALADR0); /* from here */ 902 outl(mem, card->pci_conf + DMALADR0); /* from here */
902 outl(len, card->pci_conf + DMASIZ0); /* for this length */ 903 outl(len, card->pci_conf + DMASIZ0); /* for this length */
903 outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */ 904 outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */
904 905
@@ -1309,8 +1310,8 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
1309 card->dma_port_rx = port; 1310 card->dma_port_rx = port;
1310 card->dma_len_rx = len; 1311 card->dma_len_rx = len;
1311 card->dma_rxpos = rxp; 1312 card->dma_rxpos = rxp;
1312 fst_rx_dma(card, (char *) card->rx_dma_handle_card, 1313 fst_rx_dma(card, card->rx_dma_handle_card,
1313 (char *) BUF_OFFSET(rxBuffer[pi][rxp][0]), len); 1314 BUF_OFFSET(rxBuffer[pi][rxp][0]), len);
1314 } 1315 }
1315 if (rxp != port->rxpos) { 1316 if (rxp != port->rxpos) {
1316 dbg(DBG_ASS, "About to increment rxpos by more than 1\n"); 1317 dbg(DBG_ASS, "About to increment rxpos by more than 1\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 8848333bc3a9..fec026212326 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -260,7 +260,7 @@ struct iwl_cfg iwl1000_bgn_cfg = {
260 .shadow_ram_support = false, 260 .shadow_ram_support = false,
261 .ht_greenfield_support = true, 261 .ht_greenfield_support = true,
262 .led_compensation = 51, 262 .led_compensation = 51,
263 .use_rts_for_ht = true, /* use rts/cts protection */ 263 .use_rts_for_aggregation = true, /* use rts/cts protection */
264 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 264 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
265 .support_ct_kill_exit = true, 265 .support_ct_kill_exit = true,
266 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, 266 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index a07310fefcf2..6950a783913b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -769,22 +769,6 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
769 rts_retry_limit = data_retry_limit; 769 rts_retry_limit = data_retry_limit;
770 tx_cmd->rts_retry_limit = rts_retry_limit; 770 tx_cmd->rts_retry_limit = rts_retry_limit;
771 771
772 if (ieee80211_is_mgmt(fc)) {
773 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
774 case cpu_to_le16(IEEE80211_STYPE_AUTH):
775 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
776 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
777 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
778 if (tx_flags & TX_CMD_FLG_RTS_MSK) {
779 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
780 tx_flags |= TX_CMD_FLG_CTS_MSK;
781 }
782 break;
783 default:
784 break;
785 }
786 }
787
788 tx_cmd->rate = rate; 772 tx_cmd->rate = rate;
789 tx_cmd->tx_flags = tx_flags; 773 tx_cmd->tx_flags = tx_flags;
790 774
@@ -2717,7 +2701,7 @@ static struct iwl_lib_ops iwl3945_lib = {
2717static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { 2701static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2718 .get_hcmd_size = iwl3945_get_hcmd_size, 2702 .get_hcmd_size = iwl3945_get_hcmd_size,
2719 .build_addsta_hcmd = iwl3945_build_addsta_hcmd, 2703 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
2720 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag, 2704 .tx_cmd_protection = iwlcore_tx_cmd_protection,
2721 .request_scan = iwl3945_request_scan, 2705 .request_scan = iwl3945_request_scan,
2722}; 2706};
2723 2707
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index d6531ad3906a..d6da356608fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2223,7 +2223,7 @@ static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2223 .build_addsta_hcmd = iwl4965_build_addsta_hcmd, 2223 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
2224 .chain_noise_reset = iwl4965_chain_noise_reset, 2224 .chain_noise_reset = iwl4965_chain_noise_reset,
2225 .gain_computation = iwl4965_gain_computation, 2225 .gain_computation = iwl4965_gain_computation,
2226 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag, 2226 .tx_cmd_protection = iwlcore_tx_cmd_protection,
2227 .calc_rssi = iwl4965_calc_rssi, 2227 .calc_rssi = iwl4965_calc_rssi,
2228 .request_scan = iwlagn_request_scan, 2228 .request_scan = iwlagn_request_scan,
2229}; 2229};
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 8093ce2804fb..aacf3770f075 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -506,7 +506,7 @@ struct iwl_cfg iwl5300_agn_cfg = {
506 .use_bsm = false, 506 .use_bsm = false,
507 .ht_greenfield_support = true, 507 .ht_greenfield_support = true,
508 .led_compensation = 51, 508 .led_compensation = 51,
509 .use_rts_for_ht = true, /* use rts/cts protection */ 509 .use_rts_for_aggregation = true, /* use rts/cts protection */
510 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 510 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
511 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 511 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
512 .chain_noise_scale = 1000, 512 .chain_noise_scale = 1000,
@@ -537,7 +537,7 @@ struct iwl_cfg iwl5100_bgn_cfg = {
537 .use_bsm = false, 537 .use_bsm = false,
538 .ht_greenfield_support = true, 538 .ht_greenfield_support = true,
539 .led_compensation = 51, 539 .led_compensation = 51,
540 .use_rts_for_ht = true, /* use rts/cts protection */ 540 .use_rts_for_aggregation = true, /* use rts/cts protection */
541 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 541 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
542 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 542 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
543 .chain_noise_scale = 1000, 543 .chain_noise_scale = 1000,
@@ -597,7 +597,7 @@ struct iwl_cfg iwl5100_agn_cfg = {
597 .use_bsm = false, 597 .use_bsm = false,
598 .ht_greenfield_support = true, 598 .ht_greenfield_support = true,
599 .led_compensation = 51, 599 .led_compensation = 51,
600 .use_rts_for_ht = true, /* use rts/cts protection */ 600 .use_rts_for_aggregation = true, /* use rts/cts protection */
601 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 601 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
602 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 602 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
603 .chain_noise_scale = 1000, 603 .chain_noise_scale = 1000,
@@ -628,7 +628,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
628 .use_bsm = false, 628 .use_bsm = false,
629 .ht_greenfield_support = true, 629 .ht_greenfield_support = true,
630 .led_compensation = 51, 630 .led_compensation = 51,
631 .use_rts_for_ht = true, /* use rts/cts protection */ 631 .use_rts_for_aggregation = true, /* use rts/cts protection */
632 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 632 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
633 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 633 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
634 .chain_noise_scale = 1000, 634 .chain_noise_scale = 1000,
@@ -659,7 +659,7 @@ struct iwl_cfg iwl5150_agn_cfg = {
659 .use_bsm = false, 659 .use_bsm = false,
660 .ht_greenfield_support = true, 660 .ht_greenfield_support = true,
661 .led_compensation = 51, 661 .led_compensation = 51,
662 .use_rts_for_ht = true, /* use rts/cts protection */ 662 .use_rts_for_aggregation = true, /* use rts/cts protection */
663 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 663 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
664 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 664 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
665 .chain_noise_scale = 1000, 665 .chain_noise_scale = 1000,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 58270529a0e4..af4fd50f3405 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -381,7 +381,7 @@ struct iwl_cfg iwl6000g2a_2agn_cfg = {
381 .shadow_ram_support = true, 381 .shadow_ram_support = true,
382 .ht_greenfield_support = true, 382 .ht_greenfield_support = true,
383 .led_compensation = 51, 383 .led_compensation = 51,
384 .use_rts_for_ht = true, /* use rts/cts protection */ 384 .use_rts_for_aggregation = true, /* use rts/cts protection */
385 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 385 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
386 .supports_idle = true, 386 .supports_idle = true,
387 .adv_thermal_throttle = true, 387 .adv_thermal_throttle = true,
@@ -489,7 +489,7 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = {
489 .shadow_ram_support = true, 489 .shadow_ram_support = true,
490 .ht_greenfield_support = true, 490 .ht_greenfield_support = true,
491 .led_compensation = 51, 491 .led_compensation = 51,
492 .use_rts_for_ht = true, /* use rts/cts protection */ 492 .use_rts_for_aggregation = true, /* use rts/cts protection */
493 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 493 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
494 .supports_idle = true, 494 .supports_idle = true,
495 .adv_thermal_throttle = true, 495 .adv_thermal_throttle = true,
@@ -563,7 +563,7 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = {
563 .shadow_ram_support = true, 563 .shadow_ram_support = true,
564 .ht_greenfield_support = true, 564 .ht_greenfield_support = true,
565 .led_compensation = 51, 565 .led_compensation = 51,
566 .use_rts_for_ht = true, /* use rts/cts protection */ 566 .use_rts_for_aggregation = true, /* use rts/cts protection */
567 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 567 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
568 .supports_idle = true, 568 .supports_idle = true,
569 .adv_thermal_throttle = true, 569 .adv_thermal_throttle = true,
@@ -637,7 +637,7 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = {
637 .shadow_ram_support = true, 637 .shadow_ram_support = true,
638 .ht_greenfield_support = true, 638 .ht_greenfield_support = true,
639 .led_compensation = 51, 639 .led_compensation = 51,
640 .use_rts_for_ht = true, /* use rts/cts protection */ 640 .use_rts_for_aggregation = true, /* use rts/cts protection */
641 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 641 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
642 .supports_idle = true, 642 .supports_idle = true,
643 .adv_thermal_throttle = true, 643 .adv_thermal_throttle = true,
@@ -714,7 +714,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
714 .shadow_ram_support = true, 714 .shadow_ram_support = true,
715 .ht_greenfield_support = true, 715 .ht_greenfield_support = true,
716 .led_compensation = 51, 716 .led_compensation = 51,
717 .use_rts_for_ht = true, /* use rts/cts protection */ 717 .use_rts_for_aggregation = true, /* use rts/cts protection */
718 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 718 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
719 .supports_idle = true, 719 .supports_idle = true,
720 .adv_thermal_throttle = true, 720 .adv_thermal_throttle = true,
@@ -821,7 +821,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
821 .shadow_ram_support = true, 821 .shadow_ram_support = true,
822 .ht_greenfield_support = true, 822 .ht_greenfield_support = true,
823 .led_compensation = 51, 823 .led_compensation = 51,
824 .use_rts_for_ht = true, /* use rts/cts protection */ 824 .use_rts_for_aggregation = true, /* use rts/cts protection */
825 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 825 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
826 .supports_idle = true, 826 .supports_idle = true,
827 .adv_thermal_throttle = true, 827 .adv_thermal_throttle = true,
@@ -859,7 +859,7 @@ struct iwl_cfg iwl6050g2_bgn_cfg = {
859 .shadow_ram_support = true, 859 .shadow_ram_support = true,
860 .ht_greenfield_support = true, 860 .ht_greenfield_support = true,
861 .led_compensation = 51, 861 .led_compensation = 51,
862 .use_rts_for_ht = true, /* use rts/cts protection */ 862 .use_rts_for_aggregation = true, /* use rts/cts protection */
863 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 863 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
864 .supports_idle = true, 864 .supports_idle = true,
865 .adv_thermal_throttle = true, 865 .adv_thermal_throttle = true,
@@ -933,7 +933,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
933 .shadow_ram_support = true, 933 .shadow_ram_support = true,
934 .ht_greenfield_support = true, 934 .ht_greenfield_support = true,
935 .led_compensation = 51, 935 .led_compensation = 51,
936 .use_rts_for_ht = true, /* use rts/cts protection */ 936 .use_rts_for_aggregation = true, /* use rts/cts protection */
937 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 937 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
938 .supports_idle = true, 938 .supports_idle = true,
939 .adv_thermal_throttle = true, 939 .adv_thermal_throttle = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index a7216dda9786..75b901b3eb1e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -211,10 +211,21 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
211 } 211 }
212} 212}
213 213
214static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info, 214static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
215 __le32 *tx_flags) 215 struct ieee80211_tx_info *info,
216 __le16 fc, __le32 *tx_flags)
216{ 217{
217 *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK; 218 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
219 info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
220 *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
221 return;
222 }
223
224 if (priv->cfg->use_rts_for_aggregation &&
225 info->flags & IEEE80211_TX_CTL_AMPDU) {
226 *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
227 return;
228 }
218} 229}
219 230
220/* Calc max signal level (dBm) among 3 possible receivers */ 231/* Calc max signal level (dBm) among 3 possible receivers */
@@ -268,7 +279,7 @@ struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {
268 .build_addsta_hcmd = iwlagn_build_addsta_hcmd, 279 .build_addsta_hcmd = iwlagn_build_addsta_hcmd,
269 .gain_computation = iwlagn_gain_computation, 280 .gain_computation = iwlagn_gain_computation,
270 .chain_noise_reset = iwlagn_chain_noise_reset, 281 .chain_noise_reset = iwlagn_chain_noise_reset,
271 .rts_tx_cmd_flag = iwlagn_rts_tx_cmd_flag, 282 .tx_cmd_protection = iwlagn_tx_cmd_protection,
272 .calc_rssi = iwlagn_calc_rssi, 283 .calc_rssi = iwlagn_calc_rssi,
273 .request_scan = iwlagn_request_scan, 284 .request_scan = iwlagn_request_scan,
274}; 285};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index d04502d54df3..69155aa448fb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -379,10 +379,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
379 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 379 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
380 } 380 }
381 381
382 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); 382 priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags);
383
384 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
385 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
386 383
387 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); 384 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
388 if (ieee80211_is_mgmt(fc)) { 385 if (ieee80211_is_mgmt(fc)) {
@@ -456,21 +453,6 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
456 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) 453 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
457 rate_flags |= RATE_MCS_CCK_MSK; 454 rate_flags |= RATE_MCS_CCK_MSK;
458 455
459 /* Set up RTS and CTS flags for certain packets */
460 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
461 case cpu_to_le16(IEEE80211_STYPE_AUTH):
462 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
463 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
464 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
465 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
466 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
467 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
468 }
469 break;
470 default:
471 break;
472 }
473
474 /* Set up antennas */ 456 /* Set up antennas */
475 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 457 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
476 priv->hw_params.valid_tx_ant); 458 priv->hw_params.valid_tx_ant);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 35337b1e7cac..c1882fd8345d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -202,13 +202,6 @@ int iwl_commit_rxon(struct iwl_priv *priv)
202 202
203 priv->start_calib = 0; 203 priv->start_calib = 0;
204 if (new_assoc) { 204 if (new_assoc) {
205 /*
206 * allow CTS-to-self if possible for new association.
207 * this is relevant only for 5000 series and up,
208 * but will not damage 4965
209 */
210 priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
211
212 /* Apply the new configuration 205 /* Apply the new configuration
213 * RXON assoc doesn't clear the station table in uCode, 206 * RXON assoc doesn't clear the station table in uCode,
214 */ 207 */
@@ -1618,45 +1611,9 @@ static ssize_t store_tx_power(struct device *d,
1618 1611
1619static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); 1612static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
1620 1613
1621static ssize_t show_rts_ht_protection(struct device *d,
1622 struct device_attribute *attr, char *buf)
1623{
1624 struct iwl_priv *priv = dev_get_drvdata(d);
1625
1626 return sprintf(buf, "%s\n",
1627 priv->cfg->use_rts_for_ht ? "RTS/CTS" : "CTS-to-self");
1628}
1629
1630static ssize_t store_rts_ht_protection(struct device *d,
1631 struct device_attribute *attr,
1632 const char *buf, size_t count)
1633{
1634 struct iwl_priv *priv = dev_get_drvdata(d);
1635 unsigned long val;
1636 int ret;
1637
1638 ret = strict_strtoul(buf, 10, &val);
1639 if (ret)
1640 IWL_INFO(priv, "Input is not in decimal form.\n");
1641 else {
1642 if (!iwl_is_associated(priv))
1643 priv->cfg->use_rts_for_ht = val ? true : false;
1644 else
1645 IWL_ERR(priv, "Sta associated with AP - "
1646 "Change protection mechanism is not allowed\n");
1647 ret = count;
1648 }
1649 return ret;
1650}
1651
1652static DEVICE_ATTR(rts_ht_protection, S_IWUSR | S_IRUGO,
1653 show_rts_ht_protection, store_rts_ht_protection);
1654
1655
1656static struct attribute *iwl_sysfs_entries[] = { 1614static struct attribute *iwl_sysfs_entries[] = {
1657 &dev_attr_temperature.attr, 1615 &dev_attr_temperature.attr,
1658 &dev_attr_tx_power.attr, 1616 &dev_attr_tx_power.attr,
1659 &dev_attr_rts_ht_protection.attr,
1660#ifdef CONFIG_IWLWIFI_DEBUG 1617#ifdef CONFIG_IWLWIFI_DEBUG
1661 &dev_attr_debug_level.attr, 1618 &dev_attr_debug_level.attr,
1662#endif 1619#endif
@@ -3464,25 +3421,6 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3464 return ret; 3421 return ret;
3465} 3422}
3466 3423
3467/*
3468 * switch to RTS/CTS for TX
3469 */
3470static void iwl_enable_rts_cts(struct iwl_priv *priv)
3471{
3472
3473 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3474 return;
3475
3476 priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
3477 if (!test_bit(STATUS_SCANNING, &priv->status)) {
3478 IWL_DEBUG_INFO(priv, "use RTS/CTS protection\n");
3479 iwlcore_commit_rxon(priv);
3480 } else {
3481 /* scanning, defer the request until scan completed */
3482 IWL_DEBUG_INFO(priv, "defer setting RTS/CTS protection\n");
3483 }
3484}
3485
3486static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, 3424static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
3487 struct ieee80211_vif *vif, 3425 struct ieee80211_vif *vif,
3488 enum ieee80211_ampdu_mlme_action action, 3426 enum ieee80211_ampdu_mlme_action action,
@@ -3529,14 +3467,33 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
3529 } 3467 }
3530 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3468 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3531 ret = 0; 3469 ret = 0;
3470 if (priv->cfg->use_rts_for_aggregation) {
3471 struct iwl_station_priv *sta_priv =
3472 (void *) sta->drv_priv;
3473 /*
3474 * switch off RTS/CTS if it was previously enabled
3475 */
3476
3477 sta_priv->lq_sta.lq.general_params.flags &=
3478 ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
3479 iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq,
3480 CMD_ASYNC, false);
3481 }
3532 break; 3482 break;
3533 case IEEE80211_AMPDU_TX_OPERATIONAL: 3483 case IEEE80211_AMPDU_TX_OPERATIONAL:
3534 if (priv->cfg->use_rts_for_ht) { 3484 if (priv->cfg->use_rts_for_aggregation) {
3485 struct iwl_station_priv *sta_priv =
3486 (void *) sta->drv_priv;
3487
3535 /* 3488 /*
3536 * switch to RTS/CTS if it is the prefer protection 3489 * switch to RTS/CTS if it is the prefer protection
3537 * method for HT traffic 3490 * method for HT traffic
3538 */ 3491 */
3539 iwl_enable_rts_cts(priv); 3492
3493 sta_priv->lq_sta.lq.general_params.flags |=
3494 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
3495 iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq,
3496 CMD_ASYNC, false);
3540 } 3497 }
3541 ret = 0; 3498 ret = 0;
3542 break; 3499 break;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 8ccb6d205b6d..2c03c6e20a72 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -401,21 +401,38 @@ void iwlcore_free_geos(struct iwl_priv *priv)
401EXPORT_SYMBOL(iwlcore_free_geos); 401EXPORT_SYMBOL(iwlcore_free_geos);
402 402
403/* 403/*
404 * iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this 404 * iwlcore_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
405 * function. 405 * function.
406 */ 406 */
407void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info, 407void iwlcore_tx_cmd_protection(struct iwl_priv *priv,
408 __le32 *tx_flags) 408 struct ieee80211_tx_info *info,
409 __le16 fc, __le32 *tx_flags)
409{ 410{
410 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { 411 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
411 *tx_flags |= TX_CMD_FLG_RTS_MSK; 412 *tx_flags |= TX_CMD_FLG_RTS_MSK;
412 *tx_flags &= ~TX_CMD_FLG_CTS_MSK; 413 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
414 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
415
416 if (!ieee80211_is_mgmt(fc))
417 return;
418
419 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
420 case cpu_to_le16(IEEE80211_STYPE_AUTH):
421 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
422 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
423 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
424 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
425 *tx_flags |= TX_CMD_FLG_CTS_MSK;
426 break;
427 }
413 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 428 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
414 *tx_flags &= ~TX_CMD_FLG_RTS_MSK; 429 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
415 *tx_flags |= TX_CMD_FLG_CTS_MSK; 430 *tx_flags |= TX_CMD_FLG_CTS_MSK;
431 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
416 } 432 }
417} 433}
418EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag); 434EXPORT_SYMBOL(iwlcore_tx_cmd_protection);
435
419 436
420static bool is_single_rx_stream(struct iwl_priv *priv) 437static bool is_single_rx_stream(struct iwl_priv *priv)
421{ 438{
@@ -1869,6 +1886,10 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1869 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK; 1886 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
1870 else 1887 else
1871 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK; 1888 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
1889 if (bss_conf->use_cts_prot)
1890 priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
1891 else
1892 priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
1872 } 1893 }
1873 1894
1874 if (changes & BSS_CHANGED_BASIC_RATES) { 1895 if (changes & BSS_CHANGED_BASIC_RATES) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index e9d23f2f869d..4a71dfb10a15 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -104,8 +104,9 @@ struct iwl_hcmd_utils_ops {
104 u32 min_average_noise, 104 u32 min_average_noise,
105 u8 default_chain); 105 u8 default_chain);
106 void (*chain_noise_reset)(struct iwl_priv *priv); 106 void (*chain_noise_reset)(struct iwl_priv *priv);
107 void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info, 107 void (*tx_cmd_protection)(struct iwl_priv *priv,
108 __le32 *tx_flags); 108 struct ieee80211_tx_info *info,
109 __le16 fc, __le32 *tx_flags);
109 int (*calc_rssi)(struct iwl_priv *priv, 110 int (*calc_rssi)(struct iwl_priv *priv,
110 struct iwl_rx_phy_res *rx_resp); 111 struct iwl_rx_phy_res *rx_resp);
111 void (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif); 112 void (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
@@ -249,7 +250,7 @@ struct iwl_mod_params {
249 * @led_compensation: compensate on the led on/off time per HW according 250 * @led_compensation: compensate on the led on/off time per HW according
250 * to the deviation to achieve the desired led frequency. 251 * to the deviation to achieve the desired led frequency.
251 * The detail algorithm is described in iwl-led.c 252 * The detail algorithm is described in iwl-led.c
252 * @use_rts_for_ht: use rts/cts protection for HT traffic 253 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
253 * @chain_noise_num_beacons: number of beacons used to compute chain noise 254 * @chain_noise_num_beacons: number of beacons used to compute chain noise
254 * @adv_thermal_throttle: support advance thermal throttle 255 * @adv_thermal_throttle: support advance thermal throttle
255 * @support_ct_kill_exit: support ct kill exit condition 256 * @support_ct_kill_exit: support ct kill exit condition
@@ -318,7 +319,7 @@ struct iwl_cfg {
318 const bool ht_greenfield_support; 319 const bool ht_greenfield_support;
319 u16 led_compensation; 320 u16 led_compensation;
320 const bool broken_powersave; 321 const bool broken_powersave;
321 bool use_rts_for_ht; 322 bool use_rts_for_aggregation;
322 int chain_noise_num_beacons; 323 int chain_noise_num_beacons;
323 const bool supports_idle; 324 const bool supports_idle;
324 bool adv_thermal_throttle; 325 bool adv_thermal_throttle;
@@ -390,8 +391,9 @@ void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif);
390void iwl_mac_reset_tsf(struct ieee80211_hw *hw); 391void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
391int iwl_alloc_txq_mem(struct iwl_priv *priv); 392int iwl_alloc_txq_mem(struct iwl_priv *priv);
392void iwl_free_txq_mem(struct iwl_priv *priv); 393void iwl_free_txq_mem(struct iwl_priv *priv);
393void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info, 394void iwlcore_tx_cmd_protection(struct iwl_priv *priv,
394 __le32 *tx_flags); 395 struct ieee80211_tx_info *info,
396 __le16 fc, __le32 *tx_flags);
395#ifdef CONFIG_IWLWIFI_DEBUGFS 397#ifdef CONFIG_IWLWIFI_DEBUGFS
396int iwl_alloc_traffic_mem(struct iwl_priv *priv); 398int iwl_alloc_traffic_mem(struct iwl_priv *priv);
397void iwl_free_traffic_mem(struct iwl_priv *priv); 399void iwl_free_traffic_mem(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index d24eb47d3705..70c4b8fba0ee 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -435,10 +435,7 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
435 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 435 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
436 } 436 }
437 437
438 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); 438 priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags);
439
440 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
441 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
442 439
443 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); 440 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
444 if (ieee80211_is_mgmt(fc)) { 441 if (ieee80211_is_mgmt(fc)) {
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 2372abb29c2e..3e82f1627209 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -9,6 +9,7 @@
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/wait.h> 10#include <linux/wait.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/sched.h>
12#include <linux/ieee80211.h> 13#include <linux/ieee80211.h>
13#include <net/cfg80211.h> 14#include <net/cfg80211.h>
14#include <asm/unaligned.h> 15#include <asm/unaligned.h>
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 71a101fb2e4e..822f8dc26e9c 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -43,8 +43,6 @@ static DEFINE_PCI_DEVICE_TABLE(p54p_table) = {
43 { PCI_DEVICE(0x1260, 0x3886) }, 43 { PCI_DEVICE(0x1260, 0x3886) },
44 /* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */ 44 /* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */
45 { PCI_DEVICE(0x1260, 0xffff) }, 45 { PCI_DEVICE(0x1260, 0xffff) },
46 /* Standard Microsystems Corp SMC2802W Wireless PCI */
47 { PCI_DEVICE(0x10b8, 0x2802) },
48 { }, 46 { },
49}; 47};
50 48
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index a75ed3083a6a..8e4153d740f3 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -386,7 +386,7 @@ claw_tx(struct sk_buff *skb, struct net_device *dev)
386 struct chbk *p_ch; 386 struct chbk *p_ch;
387 387
388 CLAW_DBF_TEXT(4, trace, "claw_tx"); 388 CLAW_DBF_TEXT(4, trace, "claw_tx");
389 p_ch=&privptr->channel[WRITE]; 389 p_ch = &privptr->channel[WRITE_CHANNEL];
390 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); 390 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
391 rc=claw_hw_tx( skb, dev, 1 ); 391 rc=claw_hw_tx( skb, dev, 1 );
392 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); 392 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
@@ -407,7 +407,7 @@ static struct sk_buff *
407claw_pack_skb(struct claw_privbk *privptr) 407claw_pack_skb(struct claw_privbk *privptr)
408{ 408{
409 struct sk_buff *new_skb,*held_skb; 409 struct sk_buff *new_skb,*held_skb;
410 struct chbk *p_ch = &privptr->channel[WRITE]; 410 struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
411 struct claw_env *p_env = privptr->p_env; 411 struct claw_env *p_env = privptr->p_env;
412 int pkt_cnt,pk_ind,so_far; 412 int pkt_cnt,pk_ind,so_far;
413 413
@@ -515,15 +515,15 @@ claw_open(struct net_device *dev)
515 privptr->p_env->write_size=CLAW_FRAME_SIZE; 515 privptr->p_env->write_size=CLAW_FRAME_SIZE;
516 } 516 }
517 claw_set_busy(dev); 517 claw_set_busy(dev);
518 tasklet_init(&privptr->channel[READ].tasklet, claw_irq_tasklet, 518 tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
519 (unsigned long) &privptr->channel[READ]); 519 (unsigned long) &privptr->channel[READ_CHANNEL]);
520 for ( i = 0; i < 2; i++) { 520 for ( i = 0; i < 2; i++) {
521 CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i); 521 CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
522 init_waitqueue_head(&privptr->channel[i].wait); 522 init_waitqueue_head(&privptr->channel[i].wait);
523 /* skb_queue_head_init(&p_ch->io_queue); */ 523 /* skb_queue_head_init(&p_ch->io_queue); */
524 if (i == WRITE) 524 if (i == WRITE_CHANNEL)
525 skb_queue_head_init( 525 skb_queue_head_init(
526 &privptr->channel[WRITE].collect_queue); 526 &privptr->channel[WRITE_CHANNEL].collect_queue);
527 privptr->channel[i].flag_a = 0; 527 privptr->channel[i].flag_a = 0;
528 privptr->channel[i].IO_active = 0; 528 privptr->channel[i].IO_active = 0;
529 privptr->channel[i].flag &= ~CLAW_TIMER; 529 privptr->channel[i].flag &= ~CLAW_TIMER;
@@ -551,12 +551,12 @@ claw_open(struct net_device *dev)
551 if((privptr->channel[i].flag & CLAW_TIMER) == 0x00) 551 if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
552 del_timer(&timer); 552 del_timer(&timer);
553 } 553 }
554 if ((((privptr->channel[READ].last_dstat | 554 if ((((privptr->channel[READ_CHANNEL].last_dstat |
555 privptr->channel[WRITE].last_dstat) & 555 privptr->channel[WRITE_CHANNEL].last_dstat) &
556 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) || 556 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
557 (((privptr->channel[READ].flag | 557 (((privptr->channel[READ_CHANNEL].flag |
558 privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) { 558 privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
559 dev_info(&privptr->channel[READ].cdev->dev, 559 dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
560 "%s: remote side is not ready\n", dev->name); 560 "%s: remote side is not ready\n", dev->name);
561 CLAW_DBF_TEXT(2, trace, "notrdy"); 561 CLAW_DBF_TEXT(2, trace, "notrdy");
562 562
@@ -608,8 +608,8 @@ claw_open(struct net_device *dev)
608 } 608 }
609 } 609 }
610 privptr->buffs_alloc = 0; 610 privptr->buffs_alloc = 0;
611 privptr->channel[READ].flag= 0x00; 611 privptr->channel[READ_CHANNEL].flag = 0x00;
612 privptr->channel[WRITE].flag = 0x00; 612 privptr->channel[WRITE_CHANNEL].flag = 0x00;
613 privptr->p_buff_ccw=NULL; 613 privptr->p_buff_ccw=NULL;
614 privptr->p_buff_read=NULL; 614 privptr->p_buff_read=NULL;
615 privptr->p_buff_write=NULL; 615 privptr->p_buff_write=NULL;
@@ -652,10 +652,10 @@ claw_irq_handler(struct ccw_device *cdev,
652 } 652 }
653 653
654 /* Try to extract channel from driver data. */ 654 /* Try to extract channel from driver data. */
655 if (privptr->channel[READ].cdev == cdev) 655 if (privptr->channel[READ_CHANNEL].cdev == cdev)
656 p_ch = &privptr->channel[READ]; 656 p_ch = &privptr->channel[READ_CHANNEL];
657 else if (privptr->channel[WRITE].cdev == cdev) 657 else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
658 p_ch = &privptr->channel[WRITE]; 658 p_ch = &privptr->channel[WRITE_CHANNEL];
659 else { 659 else {
660 dev_warn(&cdev->dev, "The device is not a CLAW device\n"); 660 dev_warn(&cdev->dev, "The device is not a CLAW device\n");
661 CLAW_DBF_TEXT(2, trace, "badchan"); 661 CLAW_DBF_TEXT(2, trace, "badchan");
@@ -813,7 +813,7 @@ claw_irq_handler(struct ccw_device *cdev,
813 claw_clearbit_busy(TB_TX, dev); 813 claw_clearbit_busy(TB_TX, dev);
814 claw_clear_busy(dev); 814 claw_clear_busy(dev);
815 } 815 }
816 p_ch_r = (struct chbk *)&privptr->channel[READ]; 816 p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
817 if (test_and_set_bit(CLAW_BH_ACTIVE, 817 if (test_and_set_bit(CLAW_BH_ACTIVE,
818 (void *)&p_ch_r->flag_a) == 0) 818 (void *)&p_ch_r->flag_a) == 0)
819 tasklet_schedule(&p_ch_r->tasklet); 819 tasklet_schedule(&p_ch_r->tasklet);
@@ -878,13 +878,13 @@ claw_release(struct net_device *dev)
878 for ( i = 1; i >=0 ; i--) { 878 for ( i = 1; i >=0 ; i--) {
879 spin_lock_irqsave( 879 spin_lock_irqsave(
880 get_ccwdev_lock(privptr->channel[i].cdev), saveflags); 880 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
881 /* del_timer(&privptr->channel[READ].timer); */ 881 /* del_timer(&privptr->channel[READ_CHANNEL].timer); */
882 privptr->channel[i].claw_state = CLAW_STOP; 882 privptr->channel[i].claw_state = CLAW_STOP;
883 privptr->channel[i].IO_active = 0; 883 privptr->channel[i].IO_active = 0;
884 parm = (unsigned long) &privptr->channel[i]; 884 parm = (unsigned long) &privptr->channel[i];
885 if (i == WRITE) 885 if (i == WRITE_CHANNEL)
886 claw_purge_skb_queue( 886 claw_purge_skb_queue(
887 &privptr->channel[WRITE].collect_queue); 887 &privptr->channel[WRITE_CHANNEL].collect_queue);
888 rc = ccw_device_halt (privptr->channel[i].cdev, parm); 888 rc = ccw_device_halt (privptr->channel[i].cdev, parm);
889 if (privptr->system_validate_comp==0x00) /* never opened? */ 889 if (privptr->system_validate_comp==0x00) /* never opened? */
890 init_waitqueue_head(&privptr->channel[i].wait); 890 init_waitqueue_head(&privptr->channel[i].wait);
@@ -971,16 +971,16 @@ claw_release(struct net_device *dev)
971 privptr->mtc_skipping = 1; 971 privptr->mtc_skipping = 1;
972 privptr->mtc_offset=0; 972 privptr->mtc_offset=0;
973 973
974 if (((privptr->channel[READ].last_dstat | 974 if (((privptr->channel[READ_CHANNEL].last_dstat |
975 privptr->channel[WRITE].last_dstat) & 975 privptr->channel[WRITE_CHANNEL].last_dstat) &
976 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) { 976 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
977 dev_warn(&privptr->channel[READ].cdev->dev, 977 dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
978 "Deactivating %s completed with incorrect" 978 "Deactivating %s completed with incorrect"
979 " subchannel status " 979 " subchannel status "
980 "(read %02x, write %02x)\n", 980 "(read %02x, write %02x)\n",
981 dev->name, 981 dev->name,
982 privptr->channel[READ].last_dstat, 982 privptr->channel[READ_CHANNEL].last_dstat,
983 privptr->channel[WRITE].last_dstat); 983 privptr->channel[WRITE_CHANNEL].last_dstat);
984 CLAW_DBF_TEXT(2, trace, "badclose"); 984 CLAW_DBF_TEXT(2, trace, "badclose");
985 } 985 }
986 CLAW_DBF_TEXT(4, trace, "rlsexit"); 986 CLAW_DBF_TEXT(4, trace, "rlsexit");
@@ -1324,7 +1324,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1324 1324
1325 CLAW_DBF_TEXT(4, trace, "hw_tx"); 1325 CLAW_DBF_TEXT(4, trace, "hw_tx");
1326 privptr = (struct claw_privbk *)(dev->ml_priv); 1326 privptr = (struct claw_privbk *)(dev->ml_priv);
1327 p_ch=(struct chbk *)&privptr->channel[WRITE]; 1327 p_ch = (struct chbk *)&privptr->channel[WRITE_CHANNEL];
1328 p_env =privptr->p_env; 1328 p_env =privptr->p_env;
1329 claw_free_wrt_buf(dev); /* Clean up free chain if posible */ 1329 claw_free_wrt_buf(dev); /* Clean up free chain if posible */
1330 /* scan the write queue to free any completed write packets */ 1330 /* scan the write queue to free any completed write packets */
@@ -1357,7 +1357,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1357 claw_strt_out_IO(dev ); 1357 claw_strt_out_IO(dev );
1358 claw_free_wrt_buf( dev ); 1358 claw_free_wrt_buf( dev );
1359 if (privptr->write_free_count==0) { 1359 if (privptr->write_free_count==0) {
1360 ch = &privptr->channel[WRITE]; 1360 ch = &privptr->channel[WRITE_CHANNEL];
1361 atomic_inc(&skb->users); 1361 atomic_inc(&skb->users);
1362 skb_queue_tail(&ch->collect_queue, skb); 1362 skb_queue_tail(&ch->collect_queue, skb);
1363 goto Done; 1363 goto Done;
@@ -1369,7 +1369,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1369 } 1369 }
1370 /* tx lock */ 1370 /* tx lock */
1371 if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */ 1371 if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
1372 ch = &privptr->channel[WRITE]; 1372 ch = &privptr->channel[WRITE_CHANNEL];
1373 atomic_inc(&skb->users); 1373 atomic_inc(&skb->users);
1374 skb_queue_tail(&ch->collect_queue, skb); 1374 skb_queue_tail(&ch->collect_queue, skb);
1375 claw_strt_out_IO(dev ); 1375 claw_strt_out_IO(dev );
@@ -1385,7 +1385,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1385 privptr->p_write_free_chain == NULL ) { 1385 privptr->p_write_free_chain == NULL ) {
1386 1386
1387 claw_setbit_busy(TB_NOBUFFER,dev); 1387 claw_setbit_busy(TB_NOBUFFER,dev);
1388 ch = &privptr->channel[WRITE]; 1388 ch = &privptr->channel[WRITE_CHANNEL];
1389 atomic_inc(&skb->users); 1389 atomic_inc(&skb->users);
1390 skb_queue_tail(&ch->collect_queue, skb); 1390 skb_queue_tail(&ch->collect_queue, skb);
1391 CLAW_DBF_TEXT(2, trace, "clawbusy"); 1391 CLAW_DBF_TEXT(2, trace, "clawbusy");
@@ -1397,7 +1397,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1397 while (len_of_data > 0) { 1397 while (len_of_data > 0) {
1398 p_this_ccw=privptr->p_write_free_chain; /* get a block */ 1398 p_this_ccw=privptr->p_write_free_chain; /* get a block */
1399 if (p_this_ccw == NULL) { /* lost the race */ 1399 if (p_this_ccw == NULL) { /* lost the race */
1400 ch = &privptr->channel[WRITE]; 1400 ch = &privptr->channel[WRITE_CHANNEL];
1401 atomic_inc(&skb->users); 1401 atomic_inc(&skb->users);
1402 skb_queue_tail(&ch->collect_queue, skb); 1402 skb_queue_tail(&ch->collect_queue, skb);
1403 goto Done2; 1403 goto Done2;
@@ -2067,7 +2067,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2067 *catch up to each other */ 2067 *catch up to each other */
2068 privptr = dev->ml_priv; 2068 privptr = dev->ml_priv;
2069 p_env=privptr->p_env; 2069 p_env=privptr->p_env;
2070 tdev = &privptr->channel[READ].cdev->dev; 2070 tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
2071 memcpy( &temp_host_name, p_env->host_name, 8); 2071 memcpy( &temp_host_name, p_env->host_name, 8);
2072 memcpy( &temp_ws_name, p_env->adapter_name , 8); 2072 memcpy( &temp_ws_name, p_env->adapter_name , 8);
2073 dev_info(tdev, "%s: CLAW device %.8s: " 2073 dev_info(tdev, "%s: CLAW device %.8s: "
@@ -2245,7 +2245,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2245 dev->name, temp_ws_name, 2245 dev->name, temp_ws_name,
2246 p_ctlbk->linkid); 2246 p_ctlbk->linkid);
2247 privptr->active_link_ID = p_ctlbk->linkid; 2247 privptr->active_link_ID = p_ctlbk->linkid;
2248 p_ch = &privptr->channel[WRITE]; 2248 p_ch = &privptr->channel[WRITE_CHANNEL];
2249 wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */ 2249 wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
2250 break; 2250 break;
2251 case CONNECTION_RESPONSE: 2251 case CONNECTION_RESPONSE:
@@ -2296,7 +2296,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
2296 "%s: Confirmed Now packing\n", dev->name); 2296 "%s: Confirmed Now packing\n", dev->name);
2297 p_env->packing = DO_PACKED; 2297 p_env->packing = DO_PACKED;
2298 } 2298 }
2299 p_ch = &privptr->channel[WRITE]; 2299 p_ch = &privptr->channel[WRITE_CHANNEL];
2300 wake_up(&p_ch->wait); 2300 wake_up(&p_ch->wait);
2301 } else { 2301 } else {
2302 dev_warn(tdev, "Activating %s failed because of" 2302 dev_warn(tdev, "Activating %s failed because of"
@@ -2556,7 +2556,7 @@ unpack_read(struct net_device *dev )
2556 p_packd=NULL; 2556 p_packd=NULL;
2557 privptr = dev->ml_priv; 2557 privptr = dev->ml_priv;
2558 2558
2559 p_dev = &privptr->channel[READ].cdev->dev; 2559 p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
2560 p_env = privptr->p_env; 2560 p_env = privptr->p_env;
2561 p_this_ccw=privptr->p_read_active_first; 2561 p_this_ccw=privptr->p_read_active_first;
2562 while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) { 2562 while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
@@ -2728,7 +2728,7 @@ claw_strt_read (struct net_device *dev, int lock )
2728 struct ccwbk*p_ccwbk; 2728 struct ccwbk*p_ccwbk;
2729 struct chbk *p_ch; 2729 struct chbk *p_ch;
2730 struct clawh *p_clawh; 2730 struct clawh *p_clawh;
2731 p_ch=&privptr->channel[READ]; 2731 p_ch = &privptr->channel[READ_CHANNEL];
2732 2732
2733 CLAW_DBF_TEXT(4, trace, "StRdNter"); 2733 CLAW_DBF_TEXT(4, trace, "StRdNter");
2734 p_clawh=(struct clawh *)privptr->p_claw_signal_blk; 2734 p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
@@ -2782,7 +2782,7 @@ claw_strt_out_IO( struct net_device *dev )
2782 return; 2782 return;
2783 } 2783 }
2784 privptr = (struct claw_privbk *)dev->ml_priv; 2784 privptr = (struct claw_privbk *)dev->ml_priv;
2785 p_ch=&privptr->channel[WRITE]; 2785 p_ch = &privptr->channel[WRITE_CHANNEL];
2786 2786
2787 CLAW_DBF_TEXT(4, trace, "strt_io"); 2787 CLAW_DBF_TEXT(4, trace, "strt_io");
2788 p_first_ccw=privptr->p_write_active_first; 2788 p_first_ccw=privptr->p_write_active_first;
@@ -2875,7 +2875,7 @@ claw_free_netdevice(struct net_device * dev, int free_dev)
2875 if (dev->flags & IFF_RUNNING) 2875 if (dev->flags & IFF_RUNNING)
2876 claw_release(dev); 2876 claw_release(dev);
2877 if (privptr) { 2877 if (privptr) {
2878 privptr->channel[READ].ndev = NULL; /* say it's free */ 2878 privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */
2879 } 2879 }
2880 dev->ml_priv = NULL; 2880 dev->ml_priv = NULL;
2881#ifdef MODULE 2881#ifdef MODULE
@@ -2960,18 +2960,18 @@ claw_new_device(struct ccwgroup_device *cgdev)
2960 struct ccw_dev_id dev_id; 2960 struct ccw_dev_id dev_id;
2961 2961
2962 dev_info(&cgdev->dev, "add for %s\n", 2962 dev_info(&cgdev->dev, "add for %s\n",
2963 dev_name(&cgdev->cdev[READ]->dev)); 2963 dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
2964 CLAW_DBF_TEXT(2, setup, "new_dev"); 2964 CLAW_DBF_TEXT(2, setup, "new_dev");
2965 privptr = dev_get_drvdata(&cgdev->dev); 2965 privptr = dev_get_drvdata(&cgdev->dev);
2966 dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr); 2966 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
2967 dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr); 2967 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
2968 if (!privptr) 2968 if (!privptr)
2969 return -ENODEV; 2969 return -ENODEV;
2970 p_env = privptr->p_env; 2970 p_env = privptr->p_env;
2971 ccw_device_get_id(cgdev->cdev[READ], &dev_id); 2971 ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
2972 p_env->devno[READ] = dev_id.devno; 2972 p_env->devno[READ_CHANNEL] = dev_id.devno;
2973 ccw_device_get_id(cgdev->cdev[WRITE], &dev_id); 2973 ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
2974 p_env->devno[WRITE] = dev_id.devno; 2974 p_env->devno[WRITE_CHANNEL] = dev_id.devno;
2975 ret = add_channel(cgdev->cdev[0],0,privptr); 2975 ret = add_channel(cgdev->cdev[0],0,privptr);
2976 if (ret == 0) 2976 if (ret == 0)
2977 ret = add_channel(cgdev->cdev[1],1,privptr); 2977 ret = add_channel(cgdev->cdev[1],1,privptr);
@@ -2980,14 +2980,14 @@ claw_new_device(struct ccwgroup_device *cgdev)
2980 " failed with error code %d\n", ret); 2980 " failed with error code %d\n", ret);
2981 goto out; 2981 goto out;
2982 } 2982 }
2983 ret = ccw_device_set_online(cgdev->cdev[READ]); 2983 ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
2984 if (ret != 0) { 2984 if (ret != 0) {
2985 dev_warn(&cgdev->dev, 2985 dev_warn(&cgdev->dev,
2986 "Setting the read subchannel online" 2986 "Setting the read subchannel online"
2987 " failed with error code %d\n", ret); 2987 " failed with error code %d\n", ret);
2988 goto out; 2988 goto out;
2989 } 2989 }
2990 ret = ccw_device_set_online(cgdev->cdev[WRITE]); 2990 ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
2991 if (ret != 0) { 2991 if (ret != 0) {
2992 dev_warn(&cgdev->dev, 2992 dev_warn(&cgdev->dev,
2993 "Setting the write subchannel online " 2993 "Setting the write subchannel online "
@@ -3002,8 +3002,8 @@ claw_new_device(struct ccwgroup_device *cgdev)
3002 } 3002 }
3003 dev->ml_priv = privptr; 3003 dev->ml_priv = privptr;
3004 dev_set_drvdata(&cgdev->dev, privptr); 3004 dev_set_drvdata(&cgdev->dev, privptr);
3005 dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr); 3005 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
3006 dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr); 3006 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
3007 /* sysfs magic */ 3007 /* sysfs magic */
3008 SET_NETDEV_DEV(dev, &cgdev->dev); 3008 SET_NETDEV_DEV(dev, &cgdev->dev);
3009 if (register_netdev(dev) != 0) { 3009 if (register_netdev(dev) != 0) {
@@ -3021,16 +3021,16 @@ claw_new_device(struct ccwgroup_device *cgdev)
3021 goto out; 3021 goto out;
3022 } 3022 }
3023 } 3023 }
3024 privptr->channel[READ].ndev = dev; 3024 privptr->channel[READ_CHANNEL].ndev = dev;
3025 privptr->channel[WRITE].ndev = dev; 3025 privptr->channel[WRITE_CHANNEL].ndev = dev;
3026 privptr->p_env->ndev = dev; 3026 privptr->p_env->ndev = dev;
3027 3027
3028 dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d " 3028 dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d "
3029 "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n", 3029 "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
3030 dev->name, p_env->read_size, 3030 dev->name, p_env->read_size,
3031 p_env->write_size, p_env->read_buffers, 3031 p_env->write_size, p_env->read_buffers,
3032 p_env->write_buffers, p_env->devno[READ], 3032 p_env->write_buffers, p_env->devno[READ_CHANNEL],
3033 p_env->devno[WRITE]); 3033 p_env->devno[WRITE_CHANNEL]);
3034 dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name " 3034 dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
3035 ":%.8s api_type: %.8s\n", 3035 ":%.8s api_type: %.8s\n",
3036 dev->name, p_env->host_name, 3036 dev->name, p_env->host_name,
@@ -3072,10 +3072,10 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
3072 priv = dev_get_drvdata(&cgdev->dev); 3072 priv = dev_get_drvdata(&cgdev->dev);
3073 if (!priv) 3073 if (!priv)
3074 return -ENODEV; 3074 return -ENODEV;
3075 ndev = priv->channel[READ].ndev; 3075 ndev = priv->channel[READ_CHANNEL].ndev;
3076 if (ndev) { 3076 if (ndev) {
3077 /* Close the device */ 3077 /* Close the device */
3078 dev_info(&cgdev->dev, "%s: shutting down \n", 3078 dev_info(&cgdev->dev, "%s: shutting down\n",
3079 ndev->name); 3079 ndev->name);
3080 if (ndev->flags & IFF_RUNNING) 3080 if (ndev->flags & IFF_RUNNING)
3081 ret = claw_release(ndev); 3081 ret = claw_release(ndev);
@@ -3083,8 +3083,8 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
3083 unregister_netdev(ndev); 3083 unregister_netdev(ndev);
3084 ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */ 3084 ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
3085 claw_free_netdevice(ndev, 1); 3085 claw_free_netdevice(ndev, 1);
3086 priv->channel[READ].ndev = NULL; 3086 priv->channel[READ_CHANNEL].ndev = NULL;
3087 priv->channel[WRITE].ndev = NULL; 3087 priv->channel[WRITE_CHANNEL].ndev = NULL;
3088 priv->p_env->ndev = NULL; 3088 priv->p_env->ndev = NULL;
3089 } 3089 }
3090 ccw_device_set_offline(cgdev->cdev[1]); 3090 ccw_device_set_offline(cgdev->cdev[1]);
@@ -3115,8 +3115,8 @@ claw_remove_device(struct ccwgroup_device *cgdev)
3115 priv->channel[1].irb=NULL; 3115 priv->channel[1].irb=NULL;
3116 kfree(priv); 3116 kfree(priv);
3117 dev_set_drvdata(&cgdev->dev, NULL); 3117 dev_set_drvdata(&cgdev->dev, NULL);
3118 dev_set_drvdata(&cgdev->cdev[READ]->dev, NULL); 3118 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
3119 dev_set_drvdata(&cgdev->cdev[WRITE]->dev, NULL); 3119 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
3120 put_device(&cgdev->dev); 3120 put_device(&cgdev->dev);
3121 3121
3122 return; 3122 return;
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 46d59a13db12..1bc5904df19f 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -74,8 +74,8 @@
74#define MAX_ENVELOPE_SIZE 65536 74#define MAX_ENVELOPE_SIZE 65536
75#define CLAW_DEFAULT_MTU_SIZE 4096 75#define CLAW_DEFAULT_MTU_SIZE 4096
76#define DEF_PACK_BUFSIZE 32768 76#define DEF_PACK_BUFSIZE 32768
77#define READ 0 77#define READ_CHANNEL 0
78#define WRITE 1 78#define WRITE_CHANNEL 1
79 79
80#define TB_TX 0 /* sk buffer handling in process */ 80#define TB_TX 0 /* sk buffer handling in process */
81#define TB_STOP 1 /* network device stop in process */ 81#define TB_STOP 1 /* network device stop in process */
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 70eb7f138414..8c921fc3511a 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -454,7 +454,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
454 if ((fsmstate == CTC_STATE_SETUPWAIT) && 454 if ((fsmstate == CTC_STATE_SETUPWAIT) &&
455 (ch->protocol == CTCM_PROTO_OS390)) { 455 (ch->protocol == CTCM_PROTO_OS390)) {
456 /* OS/390 resp. z/OS */ 456 /* OS/390 resp. z/OS */
457 if (CHANNEL_DIRECTION(ch->flags) == READ) { 457 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
458 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; 458 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
459 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, 459 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
460 CTC_EVENT_TIMER, ch); 460 CTC_EVENT_TIMER, ch);
@@ -472,14 +472,14 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
472 * if in compatibility mode, since VM TCP delays the initial 472 * if in compatibility mode, since VM TCP delays the initial
473 * frame until it has some data to send. 473 * frame until it has some data to send.
474 */ 474 */
475 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) || 475 if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
476 (ch->protocol != CTCM_PROTO_S390)) 476 (ch->protocol != CTCM_PROTO_S390))
477 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 477 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
478 478
479 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; 479 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
480 ch->ccw[1].count = 2; /* Transfer only length */ 480 ch->ccw[1].count = 2; /* Transfer only length */
481 481
482 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ) 482 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
483 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); 483 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
484 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 484 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
485 (unsigned long)ch, 0xff, 0); 485 (unsigned long)ch, 0xff, 0);
@@ -495,7 +495,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
495 * reply from VM TCP which brings up the RX channel to it's 495 * reply from VM TCP which brings up the RX channel to it's
496 * final state. 496 * final state.
497 */ 497 */
498 if ((CHANNEL_DIRECTION(ch->flags) == READ) && 498 if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
499 (ch->protocol == CTCM_PROTO_S390)) { 499 (ch->protocol == CTCM_PROTO_S390)) {
500 struct net_device *dev = ch->netdev; 500 struct net_device *dev = ch->netdev;
501 struct ctcm_priv *priv = dev->ml_priv; 501 struct ctcm_priv *priv = dev->ml_priv;
@@ -600,15 +600,15 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
600 int rc; 600 int rc;
601 601
602 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s", 602 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
603 CTCM_FUNTAIL, ch->id, 603 CTCM_FUNTAIL, ch->id,
604 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); 604 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
605 605
606 if (ch->trans_skb != NULL) { 606 if (ch->trans_skb != NULL) {
607 clear_normalized_cda(&ch->ccw[1]); 607 clear_normalized_cda(&ch->ccw[1]);
608 dev_kfree_skb(ch->trans_skb); 608 dev_kfree_skb(ch->trans_skb);
609 ch->trans_skb = NULL; 609 ch->trans_skb = NULL;
610 } 610 }
611 if (CHANNEL_DIRECTION(ch->flags) == READ) { 611 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
612 ch->ccw[1].cmd_code = CCW_CMD_READ; 612 ch->ccw[1].cmd_code = CCW_CMD_READ;
613 ch->ccw[1].flags = CCW_FLAG_SLI; 613 ch->ccw[1].flags = CCW_FLAG_SLI;
614 ch->ccw[1].count = 0; 614 ch->ccw[1].count = 0;
@@ -622,7 +622,8 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
622 "%s(%s): %s trans_skb alloc delayed " 622 "%s(%s): %s trans_skb alloc delayed "
623 "until first transfer", 623 "until first transfer",
624 CTCM_FUNTAIL, ch->id, 624 CTCM_FUNTAIL, ch->id,
625 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); 625 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
626 "RX" : "TX");
626 } 627 }
627 ch->ccw[0].cmd_code = CCW_CMD_PREPARE; 628 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
628 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 629 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
@@ -720,7 +721,7 @@ static void ctcm_chx_cleanup(fsm_instance *fi, int state,
720 721
721 ch->th_seg = 0x00; 722 ch->th_seg = 0x00;
722 ch->th_seq_num = 0x00; 723 ch->th_seq_num = 0x00;
723 if (CHANNEL_DIRECTION(ch->flags) == READ) { 724 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
724 skb_queue_purge(&ch->io_queue); 725 skb_queue_purge(&ch->io_queue);
725 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 726 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
726 } else { 727 } else {
@@ -799,7 +800,8 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
799 fsm_newstate(fi, CTC_STATE_STARTRETRY); 800 fsm_newstate(fi, CTC_STATE_STARTRETRY);
800 fsm_deltimer(&ch->timer); 801 fsm_deltimer(&ch->timer);
801 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 802 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
802 if (!IS_MPC(ch) && (CHANNEL_DIRECTION(ch->flags) == READ)) { 803 if (!IS_MPC(ch) &&
804 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
803 int rc = ccw_device_halt(ch->cdev, (unsigned long)ch); 805 int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
804 if (rc != 0) 806 if (rc != 0)
805 ctcm_ccw_check_rc(ch, rc, 807 ctcm_ccw_check_rc(ch, rc,
@@ -811,10 +813,10 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
811 CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT, 813 CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
812 "%s(%s) : %s error during %s channel setup state=%s\n", 814 "%s(%s) : %s error during %s channel setup state=%s\n",
813 CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event], 815 CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
814 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX", 816 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
815 fsm_getstate_str(fi)); 817 fsm_getstate_str(fi));
816 818
817 if (CHANNEL_DIRECTION(ch->flags) == READ) { 819 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
818 fsm_newstate(fi, CTC_STATE_RXERR); 820 fsm_newstate(fi, CTC_STATE_RXERR);
819 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 821 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
820 } else { 822 } else {
@@ -945,7 +947,7 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
945 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 947 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
946 948
947 fsm_newstate(fi, CTC_STATE_DTERM); 949 fsm_newstate(fi, CTC_STATE_DTERM);
948 ch2 = priv->channel[WRITE]; 950 ch2 = priv->channel[CTCM_WRITE];
949 fsm_newstate(ch2->fsm, CTC_STATE_DTERM); 951 fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
950 952
951 ccw_device_halt(ch->cdev, (unsigned long)ch); 953 ccw_device_halt(ch->cdev, (unsigned long)ch);
@@ -1074,13 +1076,13 @@ static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
1074 fsm_deltimer(&ch->timer); 1076 fsm_deltimer(&ch->timer);
1075 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 1077 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
1076 "%s: %s: %s unrecoverable channel error", 1078 "%s: %s: %s unrecoverable channel error",
1077 CTCM_FUNTAIL, ch->id, rd == READ ? "RX" : "TX"); 1079 CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
1078 1080
1079 if (IS_MPC(ch)) { 1081 if (IS_MPC(ch)) {
1080 priv->stats.tx_dropped++; 1082 priv->stats.tx_dropped++;
1081 priv->stats.tx_errors++; 1083 priv->stats.tx_errors++;
1082 } 1084 }
1083 if (rd == READ) { 1085 if (rd == CTCM_READ) {
1084 fsm_newstate(fi, CTC_STATE_RXERR); 1086 fsm_newstate(fi, CTC_STATE_RXERR);
1085 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 1087 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
1086 } else { 1088 } else {
@@ -1503,7 +1505,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
1503 switch (fsm_getstate(fi)) { 1505 switch (fsm_getstate(fi)) {
1504 case CTC_STATE_STARTRETRY: 1506 case CTC_STATE_STARTRETRY:
1505 case CTC_STATE_SETUPWAIT: 1507 case CTC_STATE_SETUPWAIT:
1506 if (CHANNEL_DIRECTION(ch->flags) == READ) { 1508 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
1507 ctcmpc_chx_rxidle(fi, event, arg); 1509 ctcmpc_chx_rxidle(fi, event, arg);
1508 } else { 1510 } else {
1509 fsm_newstate(fi, CTC_STATE_TXIDLE); 1511 fsm_newstate(fi, CTC_STATE_TXIDLE);
@@ -1514,7 +1516,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
1514 break; 1516 break;
1515 }; 1517 };
1516 1518
1517 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ) 1519 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
1518 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); 1520 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
1519 1521
1520done: 1522done:
@@ -1753,8 +1755,8 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
1753 struct net_device *dev = ach->netdev; 1755 struct net_device *dev = ach->netdev;
1754 struct ctcm_priv *priv = dev->ml_priv; 1756 struct ctcm_priv *priv = dev->ml_priv;
1755 struct mpc_group *grp = priv->mpcg; 1757 struct mpc_group *grp = priv->mpcg;
1756 struct channel *wch = priv->channel[WRITE]; 1758 struct channel *wch = priv->channel[CTCM_WRITE];
1757 struct channel *rch = priv->channel[READ]; 1759 struct channel *rch = priv->channel[CTCM_READ];
1758 struct sk_buff *skb; 1760 struct sk_buff *skb;
1759 struct th_sweep *header; 1761 struct th_sweep *header;
1760 int rc = 0; 1762 int rc = 0;
@@ -2070,7 +2072,7 @@ static void dev_action_start(fsm_instance *fi, int event, void *arg)
2070 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2072 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2071 if (IS_MPC(priv)) 2073 if (IS_MPC(priv))
2072 priv->mpcg->channels_terminating = 0; 2074 priv->mpcg->channels_terminating = 0;
2073 for (direction = READ; direction <= WRITE; direction++) { 2075 for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
2074 struct channel *ch = priv->channel[direction]; 2076 struct channel *ch = priv->channel[direction];
2075 fsm_event(ch->fsm, CTC_EVENT_START, ch); 2077 fsm_event(ch->fsm, CTC_EVENT_START, ch);
2076 } 2078 }
@@ -2092,7 +2094,7 @@ static void dev_action_stop(fsm_instance *fi, int event, void *arg)
2092 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2094 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2093 2095
2094 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2096 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2095 for (direction = READ; direction <= WRITE; direction++) { 2097 for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
2096 struct channel *ch = priv->channel[direction]; 2098 struct channel *ch = priv->channel[direction];
2097 fsm_event(ch->fsm, CTC_EVENT_STOP, ch); 2099 fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
2098 ch->th_seq_num = 0x00; 2100 ch->th_seq_num = 0x00;
@@ -2183,11 +2185,11 @@ static void dev_action_chup(fsm_instance *fi, int event, void *arg)
2183 2185
2184 if (IS_MPC(priv)) { 2186 if (IS_MPC(priv)) {
2185 if (event == DEV_EVENT_RXUP) 2187 if (event == DEV_EVENT_RXUP)
2186 mpc_channel_action(priv->channel[READ], 2188 mpc_channel_action(priv->channel[CTCM_READ],
2187 READ, MPC_CHANNEL_ADD); 2189 CTCM_READ, MPC_CHANNEL_ADD);
2188 else 2190 else
2189 mpc_channel_action(priv->channel[WRITE], 2191 mpc_channel_action(priv->channel[CTCM_WRITE],
2190 WRITE, MPC_CHANNEL_ADD); 2192 CTCM_WRITE, MPC_CHANNEL_ADD);
2191 } 2193 }
2192} 2194}
2193 2195
@@ -2239,11 +2241,11 @@ static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
2239 } 2241 }
2240 if (IS_MPC(priv)) { 2242 if (IS_MPC(priv)) {
2241 if (event == DEV_EVENT_RXDOWN) 2243 if (event == DEV_EVENT_RXDOWN)
2242 mpc_channel_action(priv->channel[READ], 2244 mpc_channel_action(priv->channel[CTCM_READ],
2243 READ, MPC_CHANNEL_REMOVE); 2245 CTCM_READ, MPC_CHANNEL_REMOVE);
2244 else 2246 else
2245 mpc_channel_action(priv->channel[WRITE], 2247 mpc_channel_action(priv->channel[CTCM_WRITE],
2246 WRITE, MPC_CHANNEL_REMOVE); 2248 CTCM_WRITE, MPC_CHANNEL_REMOVE);
2247 } 2249 }
2248} 2250}
2249 2251
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 4ecafbf91211..6edf20b62de5 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -267,7 +267,7 @@ static struct channel *channel_get(enum ctcm_channel_types type,
267 else { 267 else {
268 ch->flags |= CHANNEL_FLAGS_INUSE; 268 ch->flags |= CHANNEL_FLAGS_INUSE;
269 ch->flags &= ~CHANNEL_FLAGS_RWMASK; 269 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
270 ch->flags |= (direction == WRITE) 270 ch->flags |= (direction == CTCM_WRITE)
271 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ; 271 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
272 fsm_newstate(ch->fsm, CTC_STATE_STOPPED); 272 fsm_newstate(ch->fsm, CTC_STATE_STOPPED);
273 } 273 }
@@ -388,7 +388,8 @@ int ctcm_ch_alloc_buffer(struct channel *ch)
388 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 388 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
389 "%s(%s): %s trans_skb allocation error", 389 "%s(%s): %s trans_skb allocation error",
390 CTCM_FUNTAIL, ch->id, 390 CTCM_FUNTAIL, ch->id,
391 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); 391 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
392 "RX" : "TX");
392 return -ENOMEM; 393 return -ENOMEM;
393 } 394 }
394 395
@@ -399,7 +400,8 @@ int ctcm_ch_alloc_buffer(struct channel *ch)
399 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 400 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
400 "%s(%s): %s set norm_cda failed", 401 "%s(%s): %s set norm_cda failed",
401 CTCM_FUNTAIL, ch->id, 402 CTCM_FUNTAIL, ch->id,
402 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); 403 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
404 "RX" : "TX");
403 return -ENOMEM; 405 return -ENOMEM;
404 } 406 }
405 407
@@ -603,14 +605,14 @@ static void ctcmpc_send_sweep_req(struct channel *rch)
603 605
604 priv = dev->ml_priv; 606 priv = dev->ml_priv;
605 grp = priv->mpcg; 607 grp = priv->mpcg;
606 ch = priv->channel[WRITE]; 608 ch = priv->channel[CTCM_WRITE];
607 609
608 /* sweep processing is not complete until response and request */ 610 /* sweep processing is not complete until response and request */
609 /* has completed for all read channels in group */ 611 /* has completed for all read channels in group */
610 if (grp->in_sweep == 0) { 612 if (grp->in_sweep == 0) {
611 grp->in_sweep = 1; 613 grp->in_sweep = 1;
612 grp->sweep_rsp_pend_num = grp->active_channels[READ]; 614 grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
613 grp->sweep_req_pend_num = grp->active_channels[READ]; 615 grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
614 } 616 }
615 617
616 sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA); 618 sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
@@ -911,7 +913,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
911 return NETDEV_TX_BUSY; 913 return NETDEV_TX_BUSY;
912 914
913 dev->trans_start = jiffies; 915 dev->trans_start = jiffies;
914 if (ctcm_transmit_skb(priv->channel[WRITE], skb) != 0) 916 if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0)
915 return NETDEV_TX_BUSY; 917 return NETDEV_TX_BUSY;
916 return NETDEV_TX_OK; 918 return NETDEV_TX_OK;
917} 919}
@@ -994,7 +996,7 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
994 } 996 }
995 997
996 dev->trans_start = jiffies; 998 dev->trans_start = jiffies;
997 if (ctcmpc_transmit_skb(priv->channel[WRITE], skb) != 0) { 999 if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) {
998 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1000 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
999 "%s(%s): device error - dropped", 1001 "%s(%s): device error - dropped",
1000 CTCM_FUNTAIL, dev->name); 1002 CTCM_FUNTAIL, dev->name);
@@ -1035,7 +1037,7 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
1035 return -EINVAL; 1037 return -EINVAL;
1036 1038
1037 priv = dev->ml_priv; 1039 priv = dev->ml_priv;
1038 max_bufsize = priv->channel[READ]->max_bufsize; 1040 max_bufsize = priv->channel[CTCM_READ]->max_bufsize;
1039 1041
1040 if (IS_MPC(priv)) { 1042 if (IS_MPC(priv)) {
1041 if (new_mtu > max_bufsize - TH_HEADER_LENGTH) 1043 if (new_mtu > max_bufsize - TH_HEADER_LENGTH)
@@ -1226,10 +1228,10 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
1226 priv = dev_get_drvdata(&cgdev->dev); 1228 priv = dev_get_drvdata(&cgdev->dev);
1227 1229
1228 /* Try to extract channel from driver data. */ 1230 /* Try to extract channel from driver data. */
1229 if (priv->channel[READ]->cdev == cdev) 1231 if (priv->channel[CTCM_READ]->cdev == cdev)
1230 ch = priv->channel[READ]; 1232 ch = priv->channel[CTCM_READ];
1231 else if (priv->channel[WRITE]->cdev == cdev) 1233 else if (priv->channel[CTCM_WRITE]->cdev == cdev)
1232 ch = priv->channel[WRITE]; 1234 ch = priv->channel[CTCM_WRITE];
1233 else { 1235 else {
1234 dev_err(&cdev->dev, 1236 dev_err(&cdev->dev,
1235 "%s: Internal error: Can't determine channel for " 1237 "%s: Internal error: Can't determine channel for "
@@ -1587,13 +1589,13 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1587 goto out_ccw2; 1589 goto out_ccw2;
1588 } 1590 }
1589 1591
1590 for (direction = READ; direction <= WRITE; direction++) { 1592 for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
1591 priv->channel[direction] = 1593 priv->channel[direction] =
1592 channel_get(type, direction == READ ? read_id : write_id, 1594 channel_get(type, direction == CTCM_READ ?
1593 direction); 1595 read_id : write_id, direction);
1594 if (priv->channel[direction] == NULL) { 1596 if (priv->channel[direction] == NULL) {
1595 if (direction == WRITE) 1597 if (direction == CTCM_WRITE)
1596 channel_free(priv->channel[READ]); 1598 channel_free(priv->channel[CTCM_READ]);
1597 goto out_dev; 1599 goto out_dev;
1598 } 1600 }
1599 priv->channel[direction]->netdev = dev; 1601 priv->channel[direction]->netdev = dev;
@@ -1617,13 +1619,13 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1617 1619
1618 dev_info(&dev->dev, 1620 dev_info(&dev->dev,
1619 "setup OK : r/w = %s/%s, protocol : %d\n", 1621 "setup OK : r/w = %s/%s, protocol : %d\n",
1620 priv->channel[READ]->id, 1622 priv->channel[CTCM_READ]->id,
1621 priv->channel[WRITE]->id, priv->protocol); 1623 priv->channel[CTCM_WRITE]->id, priv->protocol);
1622 1624
1623 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, 1625 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1624 "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name, 1626 "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name,
1625 priv->channel[READ]->id, 1627 priv->channel[CTCM_READ]->id,
1626 priv->channel[WRITE]->id, priv->protocol); 1628 priv->channel[CTCM_WRITE]->id, priv->protocol);
1627 1629
1628 return 0; 1630 return 0;
1629out_unregister: 1631out_unregister:
@@ -1635,10 +1637,10 @@ out_ccw2:
1635out_ccw1: 1637out_ccw1:
1636 ccw_device_set_offline(cgdev->cdev[0]); 1638 ccw_device_set_offline(cgdev->cdev[0]);
1637out_remove_channel2: 1639out_remove_channel2:
1638 readc = channel_get(type, read_id, READ); 1640 readc = channel_get(type, read_id, CTCM_READ);
1639 channel_remove(readc); 1641 channel_remove(readc);
1640out_remove_channel1: 1642out_remove_channel1:
1641 writec = channel_get(type, write_id, WRITE); 1643 writec = channel_get(type, write_id, CTCM_WRITE);
1642 channel_remove(writec); 1644 channel_remove(writec);
1643out_err_result: 1645out_err_result:
1644 return result; 1646 return result;
@@ -1660,19 +1662,19 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
1660 if (!priv) 1662 if (!priv)
1661 return -ENODEV; 1663 return -ENODEV;
1662 1664
1663 if (priv->channel[READ]) { 1665 if (priv->channel[CTCM_READ]) {
1664 dev = priv->channel[READ]->netdev; 1666 dev = priv->channel[CTCM_READ]->netdev;
1665 CTCM_DBF_DEV(SETUP, dev, ""); 1667 CTCM_DBF_DEV(SETUP, dev, "");
1666 /* Close the device */ 1668 /* Close the device */
1667 ctcm_close(dev); 1669 ctcm_close(dev);
1668 dev->flags &= ~IFF_RUNNING; 1670 dev->flags &= ~IFF_RUNNING;
1669 ctcm_remove_attributes(&cgdev->dev); 1671 ctcm_remove_attributes(&cgdev->dev);
1670 channel_free(priv->channel[READ]); 1672 channel_free(priv->channel[CTCM_READ]);
1671 } else 1673 } else
1672 dev = NULL; 1674 dev = NULL;
1673 1675
1674 if (priv->channel[WRITE]) 1676 if (priv->channel[CTCM_WRITE])
1675 channel_free(priv->channel[WRITE]); 1677 channel_free(priv->channel[CTCM_WRITE]);
1676 1678
1677 if (dev) { 1679 if (dev) {
1678 unregister_netdev(dev); 1680 unregister_netdev(dev);
@@ -1685,11 +1687,11 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
1685 ccw_device_set_offline(cgdev->cdev[1]); 1687 ccw_device_set_offline(cgdev->cdev[1]);
1686 ccw_device_set_offline(cgdev->cdev[0]); 1688 ccw_device_set_offline(cgdev->cdev[0]);
1687 1689
1688 if (priv->channel[READ]) 1690 if (priv->channel[CTCM_READ])
1689 channel_remove(priv->channel[READ]); 1691 channel_remove(priv->channel[CTCM_READ]);
1690 if (priv->channel[WRITE]) 1692 if (priv->channel[CTCM_WRITE])
1691 channel_remove(priv->channel[WRITE]); 1693 channel_remove(priv->channel[CTCM_WRITE]);
1692 priv->channel[READ] = priv->channel[WRITE] = NULL; 1694 priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL;
1693 1695
1694 return 0; 1696 return 0;
1695 1697
@@ -1720,11 +1722,11 @@ static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
1720 1722
1721 if (gdev->state == CCWGROUP_OFFLINE) 1723 if (gdev->state == CCWGROUP_OFFLINE)
1722 return 0; 1724 return 0;
1723 netif_device_detach(priv->channel[READ]->netdev); 1725 netif_device_detach(priv->channel[CTCM_READ]->netdev);
1724 ctcm_close(priv->channel[READ]->netdev); 1726 ctcm_close(priv->channel[CTCM_READ]->netdev);
1725 if (!wait_event_timeout(priv->fsm->wait_q, 1727 if (!wait_event_timeout(priv->fsm->wait_q,
1726 fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) { 1728 fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
1727 netif_device_attach(priv->channel[READ]->netdev); 1729 netif_device_attach(priv->channel[CTCM_READ]->netdev);
1728 return -EBUSY; 1730 return -EBUSY;
1729 } 1731 }
1730 ccw_device_set_offline(gdev->cdev[1]); 1732 ccw_device_set_offline(gdev->cdev[1]);
@@ -1745,9 +1747,9 @@ static int ctcm_pm_resume(struct ccwgroup_device *gdev)
1745 rc = ccw_device_set_online(gdev->cdev[0]); 1747 rc = ccw_device_set_online(gdev->cdev[0]);
1746 if (rc) 1748 if (rc)
1747 goto err_out; 1749 goto err_out;
1748 ctcm_open(priv->channel[READ]->netdev); 1750 ctcm_open(priv->channel[CTCM_READ]->netdev);
1749err_out: 1751err_out:
1750 netif_device_attach(priv->channel[READ]->netdev); 1752 netif_device_attach(priv->channel[CTCM_READ]->netdev);
1751 return rc; 1753 return rc;
1752} 1754}
1753 1755
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index d34fa14f44e7..24d5215eb0c4 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -111,8 +111,8 @@ enum ctcm_channel_types {
111 111
112#define CTCM_INITIAL_BLOCKLEN 2 112#define CTCM_INITIAL_BLOCKLEN 2
113 113
114#define READ 0 114#define CTCM_READ 0
115#define WRITE 1 115#define CTCM_WRITE 1
116 116
117#define CTCM_ID_SIZE 20+3 117#define CTCM_ID_SIZE 20+3
118 118
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 87c24d2936d6..2861e78773cb 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -419,8 +419,8 @@ void ctc_mpc_establish_connectivity(int port_num,
419 return; 419 return;
420 priv = dev->ml_priv; 420 priv = dev->ml_priv;
421 grp = priv->mpcg; 421 grp = priv->mpcg;
422 rch = priv->channel[READ]; 422 rch = priv->channel[CTCM_READ];
423 wch = priv->channel[WRITE]; 423 wch = priv->channel[CTCM_WRITE];
424 424
425 CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO, 425 CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
426 "%s(%s): state=%s", 426 "%s(%s): state=%s",
@@ -578,7 +578,7 @@ void ctc_mpc_flow_control(int port_num, int flowc)
578 "%s: %s: flowc = %d", 578 "%s: %s: flowc = %d",
579 CTCM_FUNTAIL, dev->name, flowc); 579 CTCM_FUNTAIL, dev->name, flowc);
580 580
581 rch = priv->channel[READ]; 581 rch = priv->channel[CTCM_READ];
582 582
583 mpcg_state = fsm_getstate(grp->fsm); 583 mpcg_state = fsm_getstate(grp->fsm);
584 switch (flowc) { 584 switch (flowc) {
@@ -622,7 +622,7 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
622 struct net_device *dev = rch->netdev; 622 struct net_device *dev = rch->netdev;
623 struct ctcm_priv *priv = dev->ml_priv; 623 struct ctcm_priv *priv = dev->ml_priv;
624 struct mpc_group *grp = priv->mpcg; 624 struct mpc_group *grp = priv->mpcg;
625 struct channel *ch = priv->channel[WRITE]; 625 struct channel *ch = priv->channel[CTCM_WRITE];
626 626
627 CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id); 627 CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id);
628 CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH); 628 CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
@@ -656,7 +656,7 @@ static void ctcmpc_send_sweep_resp(struct channel *rch)
656 int rc = 0; 656 int rc = 0;
657 struct th_sweep *header; 657 struct th_sweep *header;
658 struct sk_buff *sweep_skb; 658 struct sk_buff *sweep_skb;
659 struct channel *ch = priv->channel[WRITE]; 659 struct channel *ch = priv->channel[CTCM_WRITE];
660 660
661 CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id); 661 CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id);
662 662
@@ -712,7 +712,7 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
712 struct net_device *dev = rch->netdev; 712 struct net_device *dev = rch->netdev;
713 struct ctcm_priv *priv = dev->ml_priv; 713 struct ctcm_priv *priv = dev->ml_priv;
714 struct mpc_group *grp = priv->mpcg; 714 struct mpc_group *grp = priv->mpcg;
715 struct channel *ch = priv->channel[WRITE]; 715 struct channel *ch = priv->channel[CTCM_WRITE];
716 716
717 if (do_debug) 717 if (do_debug)
718 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, 718 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
@@ -721,8 +721,8 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
721 if (grp->in_sweep == 0) { 721 if (grp->in_sweep == 0) {
722 grp->in_sweep = 1; 722 grp->in_sweep = 1;
723 ctcm_test_and_set_busy(dev); 723 ctcm_test_and_set_busy(dev);
724 grp->sweep_req_pend_num = grp->active_channels[READ]; 724 grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
725 grp->sweep_rsp_pend_num = grp->active_channels[READ]; 725 grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
726 } 726 }
727 727
728 CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH); 728 CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
@@ -906,14 +906,14 @@ void mpc_group_ready(unsigned long adev)
906 fsm_newstate(grp->fsm, MPCG_STATE_READY); 906 fsm_newstate(grp->fsm, MPCG_STATE_READY);
907 907
908 /* Put up a read on the channel */ 908 /* Put up a read on the channel */
909 ch = priv->channel[READ]; 909 ch = priv->channel[CTCM_READ];
910 ch->pdu_seq = 0; 910 ch->pdu_seq = 0;
911 CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" , 911 CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" ,
912 __func__, ch->pdu_seq); 912 __func__, ch->pdu_seq);
913 913
914 ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch); 914 ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch);
915 /* Put the write channel in idle state */ 915 /* Put the write channel in idle state */
916 ch = priv->channel[WRITE]; 916 ch = priv->channel[CTCM_WRITE];
917 if (ch->collect_len > 0) { 917 if (ch->collect_len > 0) {
918 spin_lock(&ch->collect_lock); 918 spin_lock(&ch->collect_lock);
919 ctcm_purge_skb_queue(&ch->collect_queue); 919 ctcm_purge_skb_queue(&ch->collect_queue);
@@ -960,7 +960,8 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
960 "%s: %i / Grp:%s total_channels=%i, active_channels: " 960 "%s: %i / Grp:%s total_channels=%i, active_channels: "
961 "read=%i, write=%i\n", __func__, action, 961 "read=%i, write=%i\n", __func__, action,
962 fsm_getstate_str(grp->fsm), grp->num_channel_paths, 962 fsm_getstate_str(grp->fsm), grp->num_channel_paths,
963 grp->active_channels[READ], grp->active_channels[WRITE]); 963 grp->active_channels[CTCM_READ],
964 grp->active_channels[CTCM_WRITE]);
964 965
965 if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) { 966 if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) {
966 grp->num_channel_paths++; 967 grp->num_channel_paths++;
@@ -994,10 +995,11 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
994 grp->xid_skb->data, 995 grp->xid_skb->data,
995 grp->xid_skb->len); 996 grp->xid_skb->len);
996 997
997 ch->xid->xid2_dlc_type = ((CHANNEL_DIRECTION(ch->flags) == READ) 998 ch->xid->xid2_dlc_type =
999 ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
998 ? XID2_READ_SIDE : XID2_WRITE_SIDE); 1000 ? XID2_READ_SIDE : XID2_WRITE_SIDE);
999 1001
1000 if (CHANNEL_DIRECTION(ch->flags) == WRITE) 1002 if (CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE)
1001 ch->xid->xid2_buf_len = 0x00; 1003 ch->xid->xid2_buf_len = 0x00;
1002 1004
1003 ch->xid_skb->data = ch->xid_skb_data; 1005 ch->xid_skb->data = ch->xid_skb_data;
@@ -1006,8 +1008,8 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
1006 1008
1007 fsm_newstate(ch->fsm, CH_XID0_PENDING); 1009 fsm_newstate(ch->fsm, CH_XID0_PENDING);
1008 1010
1009 if ((grp->active_channels[READ] > 0) && 1011 if ((grp->active_channels[CTCM_READ] > 0) &&
1010 (grp->active_channels[WRITE] > 0) && 1012 (grp->active_channels[CTCM_WRITE] > 0) &&
1011 (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) { 1013 (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
1012 fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW); 1014 fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
1013 CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE, 1015 CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
@@ -1027,10 +1029,10 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
1027 if (grp->channels_terminating) 1029 if (grp->channels_terminating)
1028 goto done; 1030 goto done;
1029 1031
1030 if (((grp->active_channels[READ] == 0) && 1032 if (((grp->active_channels[CTCM_READ] == 0) &&
1031 (grp->active_channels[WRITE] > 0)) 1033 (grp->active_channels[CTCM_WRITE] > 0))
1032 || ((grp->active_channels[WRITE] == 0) && 1034 || ((grp->active_channels[CTCM_WRITE] == 0) &&
1033 (grp->active_channels[READ] > 0))) 1035 (grp->active_channels[CTCM_READ] > 0)))
1034 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1036 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1035 } 1037 }
1036done: 1038done:
@@ -1038,7 +1040,8 @@ done:
1038 "exit %s: %i / Grp:%s total_channels=%i, active_channels: " 1040 "exit %s: %i / Grp:%s total_channels=%i, active_channels: "
1039 "read=%i, write=%i\n", __func__, action, 1041 "read=%i, write=%i\n", __func__, action,
1040 fsm_getstate_str(grp->fsm), grp->num_channel_paths, 1042 fsm_getstate_str(grp->fsm), grp->num_channel_paths,
1041 grp->active_channels[READ], grp->active_channels[WRITE]); 1043 grp->active_channels[CTCM_READ],
1044 grp->active_channels[CTCM_WRITE]);
1042 1045
1043 CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id); 1046 CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
1044} 1047}
@@ -1392,8 +1395,8 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
1392 (grp->port_persist == 0)) 1395 (grp->port_persist == 0))
1393 fsm_deltimer(&priv->restart_timer); 1396 fsm_deltimer(&priv->restart_timer);
1394 1397
1395 wch = priv->channel[WRITE]; 1398 wch = priv->channel[CTCM_WRITE];
1396 rch = priv->channel[READ]; 1399 rch = priv->channel[CTCM_READ];
1397 1400
1398 switch (grp->saved_state) { 1401 switch (grp->saved_state) {
1399 case MPCG_STATE_RESET: 1402 case MPCG_STATE_RESET:
@@ -1480,8 +1483,8 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
1480 1483
1481 priv = dev->ml_priv; 1484 priv = dev->ml_priv;
1482 grp = priv->mpcg; 1485 grp = priv->mpcg;
1483 wch = priv->channel[WRITE]; 1486 wch = priv->channel[CTCM_WRITE];
1484 rch = priv->channel[READ]; 1487 rch = priv->channel[CTCM_READ];
1485 1488
1486 switch (fsm_getstate(grp->fsm)) { 1489 switch (fsm_getstate(grp->fsm)) {
1487 case MPCG_STATE_XID2INITW: 1490 case MPCG_STATE_XID2INITW:
@@ -1586,7 +1589,7 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo)
1586 CTCM_D3_DUMP((char *)xid, XID2_LENGTH); 1589 CTCM_D3_DUMP((char *)xid, XID2_LENGTH);
1587 1590
1588 /*the received direction should be the opposite of ours */ 1591 /*the received direction should be the opposite of ours */
1589 if (((CHANNEL_DIRECTION(ch->flags) == READ) ? XID2_WRITE_SIDE : 1592 if (((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_WRITE_SIDE :
1590 XID2_READ_SIDE) != xid->xid2_dlc_type) { 1593 XID2_READ_SIDE) != xid->xid2_dlc_type) {
1591 rc = 2; 1594 rc = 2;
1592 /* XID REJECTED: r/w channel pairing mismatch */ 1595 /* XID REJECTED: r/w channel pairing mismatch */
@@ -1912,7 +1915,7 @@ static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg)
1912 if (grp == NULL) 1915 if (grp == NULL)
1913 return; 1916 return;
1914 1917
1915 for (direction = READ; direction <= WRITE; direction++) { 1918 for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
1916 struct channel *ch = priv->channel[direction]; 1919 struct channel *ch = priv->channel[direction];
1917 struct xid2 *thisxid = ch->xid; 1920 struct xid2 *thisxid = ch->xid;
1918 ch->xid_skb->data = ch->xid_skb_data; 1921 ch->xid_skb->data = ch->xid_skb_data;
@@ -2152,14 +2155,15 @@ static int mpc_send_qllc_discontact(struct net_device *dev)
2152 return -ENOMEM; 2155 return -ENOMEM;
2153 } 2156 }
2154 2157
2155 *((__u32 *)skb_push(skb, 4)) = priv->channel[READ]->pdu_seq; 2158 *((__u32 *)skb_push(skb, 4)) =
2156 priv->channel[READ]->pdu_seq++; 2159 priv->channel[CTCM_READ]->pdu_seq;
2160 priv->channel[CTCM_READ]->pdu_seq++;
2157 CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n", 2161 CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n",
2158 __func__, priv->channel[READ]->pdu_seq); 2162 __func__, priv->channel[CTCM_READ]->pdu_seq);
2159 2163
2160 /* receipt of CC03 resets anticipated sequence number on 2164 /* receipt of CC03 resets anticipated sequence number on
2161 receiving side */ 2165 receiving side */
2162 priv->channel[READ]->pdu_seq = 0x00; 2166 priv->channel[CTCM_READ]->pdu_seq = 0x00;
2163 skb_reset_mac_header(skb); 2167 skb_reset_mac_header(skb);
2164 skb->dev = dev; 2168 skb->dev = dev;
2165 skb->protocol = htons(ETH_P_SNAP); 2169 skb->protocol = htons(ETH_P_SNAP);
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 2b24550e865e..8305319b2a84 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -38,8 +38,8 @@ static ssize_t ctcm_buffer_write(struct device *dev,
38 int bs1; 38 int bs1;
39 struct ctcm_priv *priv = dev_get_drvdata(dev); 39 struct ctcm_priv *priv = dev_get_drvdata(dev);
40 40
41 if (!(priv && priv->channel[READ] && 41 ndev = priv->channel[CTCM_READ]->netdev;
42 (ndev = priv->channel[READ]->netdev))) { 42 if (!(priv && priv->channel[CTCM_READ] && ndev)) {
43 CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev"); 43 CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
44 return -ENODEV; 44 return -ENODEV;
45 } 45 }
@@ -55,12 +55,12 @@ static ssize_t ctcm_buffer_write(struct device *dev,
55 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2))) 55 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
56 goto einval; 56 goto einval;
57 57
58 priv->channel[READ]->max_bufsize = bs1; 58 priv->channel[CTCM_READ]->max_bufsize = bs1;
59 priv->channel[WRITE]->max_bufsize = bs1; 59 priv->channel[CTCM_WRITE]->max_bufsize = bs1;
60 if (!(ndev->flags & IFF_RUNNING)) 60 if (!(ndev->flags & IFF_RUNNING))
61 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2; 61 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
62 priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; 62 priv->channel[CTCM_READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
63 priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; 63 priv->channel[CTCM_WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
64 64
65 CTCM_DBF_DEV(SETUP, ndev, buf); 65 CTCM_DBF_DEV(SETUP, ndev, buf);
66 return count; 66 return count;
@@ -85,9 +85,9 @@ static void ctcm_print_statistics(struct ctcm_priv *priv)
85 p += sprintf(p, " Device FSM state: %s\n", 85 p += sprintf(p, " Device FSM state: %s\n",
86 fsm_getstate_str(priv->fsm)); 86 fsm_getstate_str(priv->fsm));
87 p += sprintf(p, " RX channel FSM state: %s\n", 87 p += sprintf(p, " RX channel FSM state: %s\n",
88 fsm_getstate_str(priv->channel[READ]->fsm)); 88 fsm_getstate_str(priv->channel[CTCM_READ]->fsm));
89 p += sprintf(p, " TX channel FSM state: %s\n", 89 p += sprintf(p, " TX channel FSM state: %s\n",
90 fsm_getstate_str(priv->channel[WRITE]->fsm)); 90 fsm_getstate_str(priv->channel[CTCM_WRITE]->fsm));
91 p += sprintf(p, " Max. TX buffer used: %ld\n", 91 p += sprintf(p, " Max. TX buffer used: %ld\n",
92 priv->channel[WRITE]->prof.maxmulti); 92 priv->channel[WRITE]->prof.maxmulti);
93 p += sprintf(p, " Max. chained SKBs: %ld\n", 93 p += sprintf(p, " Max. chained SKBs: %ld\n",
@@ -102,7 +102,7 @@ static void ctcm_print_statistics(struct ctcm_priv *priv)
102 priv->channel[WRITE]->prof.tx_time); 102 priv->channel[WRITE]->prof.tx_time);
103 103
104 printk(KERN_INFO "Statistics for %s:\n%s", 104 printk(KERN_INFO "Statistics for %s:\n%s",
105 priv->channel[WRITE]->netdev->name, sbuf); 105 priv->channel[CTCM_WRITE]->netdev->name, sbuf);
106 kfree(sbuf); 106 kfree(sbuf);
107 return; 107 return;
108} 108}
@@ -125,7 +125,7 @@ static ssize_t stats_write(struct device *dev, struct device_attribute *attr,
125 return -ENODEV; 125 return -ENODEV;
126 /* Reset statistics */ 126 /* Reset statistics */
127 memset(&priv->channel[WRITE]->prof, 0, 127 memset(&priv->channel[WRITE]->prof, 0,
128 sizeof(priv->channel[WRITE]->prof)); 128 sizeof(priv->channel[CTCM_WRITE]->prof));
129 return count; 129 return count;
130} 130}
131 131
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index ffea35c63879..0d5eeadf6121 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -31,21 +31,20 @@ static struct afs_cell *afs_cell_root;
31 * allocate a cell record and fill in its name, VL server address list and 31 * allocate a cell record and fill in its name, VL server address list and
32 * allocate an anonymous key 32 * allocate an anonymous key
33 */ 33 */
34static struct afs_cell *afs_cell_alloc(const char *name, char *vllist) 34static struct afs_cell *afs_cell_alloc(const char *name, unsigned namelen,
35 char *vllist)
35{ 36{
36 struct afs_cell *cell; 37 struct afs_cell *cell;
37 struct key *key; 38 struct key *key;
38 size_t namelen;
39 char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp, *next; 39 char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp, *next;
40 char *dvllist = NULL, *_vllist = NULL; 40 char *dvllist = NULL, *_vllist = NULL;
41 char delimiter = ':'; 41 char delimiter = ':';
42 int ret; 42 int ret;
43 43
44 _enter("%s,%s", name, vllist); 44 _enter("%*.*s,%s", namelen, namelen, name ?: "", vllist);
45 45
46 BUG_ON(!name); /* TODO: want to look up "this cell" in the cache */ 46 BUG_ON(!name); /* TODO: want to look up "this cell" in the cache */
47 47
48 namelen = strlen(name);
49 if (namelen > AFS_MAXCELLNAME) { 48 if (namelen > AFS_MAXCELLNAME) {
50 _leave(" = -ENAMETOOLONG"); 49 _leave(" = -ENAMETOOLONG");
51 return ERR_PTR(-ENAMETOOLONG); 50 return ERR_PTR(-ENAMETOOLONG);
@@ -73,6 +72,10 @@ static struct afs_cell *afs_cell_alloc(const char *name, char *vllist)
73 if (!vllist || strlen(vllist) < 7) { 72 if (!vllist || strlen(vllist) < 7) {
74 ret = dns_query("afsdb", name, namelen, "ipv4", &dvllist, NULL); 73 ret = dns_query("afsdb", name, namelen, "ipv4", &dvllist, NULL);
75 if (ret < 0) { 74 if (ret < 0) {
75 if (ret == -ENODATA || ret == -EAGAIN || ret == -ENOKEY)
76 /* translate these errors into something
77 * userspace might understand */
78 ret = -EDESTADDRREQ;
76 _leave(" = %d", ret); 79 _leave(" = %d", ret);
77 return ERR_PTR(ret); 80 return ERR_PTR(ret);
78 } 81 }
@@ -138,26 +141,29 @@ error:
138} 141}
139 142
140/* 143/*
141 * create a cell record 144 * afs_cell_crate() - create a cell record
142 * - "name" is the name of the cell 145 * @name: is the name of the cell.
143 * - "vllist" is a colon separated list of IP addresses in "a.b.c.d" format 146 * @namsesz: is the strlen of the cell name.
147 * @vllist: is a colon separated list of IP addresses in "a.b.c.d" format.
148 * @retref: is T to return the cell reference when the cell exists.
144 */ 149 */
145struct afs_cell *afs_cell_create(const char *name, char *vllist) 150struct afs_cell *afs_cell_create(const char *name, unsigned namesz,
151 char *vllist, bool retref)
146{ 152{
147 struct afs_cell *cell; 153 struct afs_cell *cell;
148 int ret; 154 int ret;
149 155
150 _enter("%s,%s", name, vllist); 156 _enter("%*.*s,%s", namesz, namesz, name ?: "", vllist);
151 157
152 down_write(&afs_cells_sem); 158 down_write(&afs_cells_sem);
153 read_lock(&afs_cells_lock); 159 read_lock(&afs_cells_lock);
154 list_for_each_entry(cell, &afs_cells, link) { 160 list_for_each_entry(cell, &afs_cells, link) {
155 if (strcasecmp(cell->name, name) == 0) 161 if (strncasecmp(cell->name, name, namesz) == 0)
156 goto duplicate_name; 162 goto duplicate_name;
157 } 163 }
158 read_unlock(&afs_cells_lock); 164 read_unlock(&afs_cells_lock);
159 165
160 cell = afs_cell_alloc(name, vllist); 166 cell = afs_cell_alloc(name, namesz, vllist);
161 if (IS_ERR(cell)) { 167 if (IS_ERR(cell)) {
162 _leave(" = %ld", PTR_ERR(cell)); 168 _leave(" = %ld", PTR_ERR(cell));
163 up_write(&afs_cells_sem); 169 up_write(&afs_cells_sem);
@@ -197,8 +203,18 @@ error:
197 return ERR_PTR(ret); 203 return ERR_PTR(ret);
198 204
199duplicate_name: 205duplicate_name:
206 if (retref && !IS_ERR(cell))
207 afs_get_cell(cell);
208
200 read_unlock(&afs_cells_lock); 209 read_unlock(&afs_cells_lock);
201 up_write(&afs_cells_sem); 210 up_write(&afs_cells_sem);
211
212 if (retref) {
213 _leave(" = %p", cell);
214 return cell;
215 }
216
217 _leave(" = -EEXIST");
202 return ERR_PTR(-EEXIST); 218 return ERR_PTR(-EEXIST);
203} 219}
204 220
@@ -229,7 +245,7 @@ int afs_cell_init(char *rootcell)
229 *cp++ = 0; 245 *cp++ = 0;
230 246
231 /* allocate a cell record for the root cell */ 247 /* allocate a cell record for the root cell */
232 new_root = afs_cell_create(rootcell, cp); 248 new_root = afs_cell_create(rootcell, strlen(rootcell), cp, false);
233 if (IS_ERR(new_root)) { 249 if (IS_ERR(new_root)) {
234 _leave(" = %ld", PTR_ERR(new_root)); 250 _leave(" = %ld", PTR_ERR(new_root));
235 return PTR_ERR(new_root); 251 return PTR_ERR(new_root);
@@ -249,11 +265,12 @@ int afs_cell_init(char *rootcell)
249/* 265/*
250 * lookup a cell record 266 * lookup a cell record
251 */ 267 */
252struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz) 268struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz,
269 bool dns_cell)
253{ 270{
254 struct afs_cell *cell; 271 struct afs_cell *cell;
255 272
256 _enter("\"%*.*s\",", namesz, namesz, name ? name : ""); 273 _enter("\"%*.*s\",", namesz, namesz, name ?: "");
257 274
258 down_read(&afs_cells_sem); 275 down_read(&afs_cells_sem);
259 read_lock(&afs_cells_lock); 276 read_lock(&afs_cells_lock);
@@ -267,6 +284,8 @@ struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz)
267 } 284 }
268 } 285 }
269 cell = ERR_PTR(-ENOENT); 286 cell = ERR_PTR(-ENOENT);
287 if (dns_cell)
288 goto create_cell;
270 found: 289 found:
271 ; 290 ;
272 } else { 291 } else {
@@ -289,6 +308,15 @@ struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz)
289 up_read(&afs_cells_sem); 308 up_read(&afs_cells_sem);
290 _leave(" = %p", cell); 309 _leave(" = %p", cell);
291 return cell; 310 return cell;
311
312create_cell:
313 read_unlock(&afs_cells_lock);
314 up_read(&afs_cells_sem);
315
316 cell = afs_cell_create(name, namesz, NULL, true);
317
318 _leave(" = %p", cell);
319 return cell;
292} 320}
293 321
294#if 0 322#if 0
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index b42d5cc1d6d2..0d38c09bd55e 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -477,6 +477,40 @@ static int afs_do_lookup(struct inode *dir, struct dentry *dentry,
477} 477}
478 478
479/* 479/*
480 * Try to auto mount the mountpoint with pseudo directory, if the autocell
481 * operation is setted.
482 */
483static struct inode *afs_try_auto_mntpt(
484 int ret, struct dentry *dentry, struct inode *dir, struct key *key,
485 struct afs_fid *fid)
486{
487 const char *devname = dentry->d_name.name;
488 struct afs_vnode *vnode = AFS_FS_I(dir);
489 struct inode *inode;
490
491 _enter("%d, %p{%s}, {%x:%u}, %p",
492 ret, dentry, devname, vnode->fid.vid, vnode->fid.vnode, key);
493
494 if (ret != -ENOENT ||
495 !test_bit(AFS_VNODE_AUTOCELL, &vnode->flags))
496 goto out;
497
498 inode = afs_iget_autocell(dir, devname, strlen(devname), key);
499 if (IS_ERR(inode)) {
500 ret = PTR_ERR(inode);
501 goto out;
502 }
503
504 *fid = AFS_FS_I(inode)->fid;
505 _leave("= %p", inode);
506 return inode;
507
508out:
509 _leave("= %d", ret);
510 return ERR_PTR(ret);
511}
512
513/*
480 * look up an entry in a directory 514 * look up an entry in a directory
481 */ 515 */
482static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, 516static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
@@ -520,6 +554,13 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
520 554
521 ret = afs_do_lookup(dir, dentry, &fid, key); 555 ret = afs_do_lookup(dir, dentry, &fid, key);
522 if (ret < 0) { 556 if (ret < 0) {
557 inode = afs_try_auto_mntpt(ret, dentry, dir, key, &fid);
558 if (!IS_ERR(inode)) {
559 key_put(key);
560 goto success;
561 }
562
563 ret = PTR_ERR(inode);
523 key_put(key); 564 key_put(key);
524 if (ret == -ENOENT) { 565 if (ret == -ENOENT) {
525 d_add(dentry, NULL); 566 d_add(dentry, NULL);
@@ -539,6 +580,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
539 return ERR_CAST(inode); 580 return ERR_CAST(inode);
540 } 581 }
541 582
583success:
542 dentry->d_op = &afs_fs_dentry_operations; 584 dentry->d_op = &afs_fs_dentry_operations;
543 585
544 d_add(dentry, inode); 586 d_add(dentry, inode);
@@ -696,8 +738,9 @@ static int afs_d_delete(struct dentry *dentry)
696 goto zap; 738 goto zap;
697 739
698 if (dentry->d_inode && 740 if (dentry->d_inode &&
699 test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dentry->d_inode)->flags)) 741 (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dentry->d_inode)->flags) ||
700 goto zap; 742 test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(dentry->d_inode)->flags)))
743 goto zap;
701 744
702 _leave(" = 0 [keep]"); 745 _leave(" = 0 [keep]");
703 return 0; 746 return 0;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 320ffef11574..0747339011c3 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -19,6 +19,8 @@
19#include <linux/fs.h> 19#include <linux/fs.h>
20#include <linux/pagemap.h> 20#include <linux/pagemap.h>
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/mount.h>
23#include <linux/namei.h>
22#include "internal.h" 24#include "internal.h"
23 25
24struct afs_iget_data { 26struct afs_iget_data {
@@ -102,6 +104,16 @@ static int afs_iget5_test(struct inode *inode, void *opaque)
102} 104}
103 105
104/* 106/*
107 * iget5() comparator for inode created by autocell operations
108 *
109 * These pseudo inodes don't match anything.
110 */
111static int afs_iget5_autocell_test(struct inode *inode, void *opaque)
112{
113 return 0;
114}
115
116/*
105 * iget5() inode initialiser 117 * iget5() inode initialiser
106 */ 118 */
107static int afs_iget5_set(struct inode *inode, void *opaque) 119static int afs_iget5_set(struct inode *inode, void *opaque)
@@ -118,6 +130,67 @@ static int afs_iget5_set(struct inode *inode, void *opaque)
118} 130}
119 131
120/* 132/*
133 * inode retrieval for autocell
134 */
135struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
136 int namesz, struct key *key)
137{
138 struct afs_iget_data data;
139 struct afs_super_info *as;
140 struct afs_vnode *vnode;
141 struct super_block *sb;
142 struct inode *inode;
143 static atomic_t afs_autocell_ino;
144
145 _enter("{%x:%u},%*.*s,",
146 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
147 namesz, namesz, dev_name ?: "");
148
149 sb = dir->i_sb;
150 as = sb->s_fs_info;
151 data.volume = as->volume;
152 data.fid.vid = as->volume->vid;
153 data.fid.unique = 0;
154 data.fid.vnode = 0;
155
156 inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
157 afs_iget5_autocell_test, afs_iget5_set,
158 &data);
159 if (!inode) {
160 _leave(" = -ENOMEM");
161 return ERR_PTR(-ENOMEM);
162 }
163
164 _debug("GOT INODE %p { ino=%lu, vl=%x, vn=%x, u=%x }",
165 inode, inode->i_ino, data.fid.vid, data.fid.vnode,
166 data.fid.unique);
167
168 vnode = AFS_FS_I(inode);
169
170 /* there shouldn't be an existing inode */
171 BUG_ON(!(inode->i_state & I_NEW));
172
173 inode->i_size = 0;
174 inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
175 inode->i_op = &afs_autocell_inode_operations;
176 inode->i_nlink = 2;
177 inode->i_uid = 0;
178 inode->i_gid = 0;
179 inode->i_ctime.tv_sec = get_seconds();
180 inode->i_ctime.tv_nsec = 0;
181 inode->i_atime = inode->i_mtime = inode->i_ctime;
182 inode->i_blocks = 0;
183 inode->i_version = 0;
184 inode->i_generation = 0;
185
186 set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
187 inode->i_flags |= S_NOATIME;
188 unlock_new_inode(inode);
189 _leave(" = %p", inode);
190 return inode;
191}
192
193/*
121 * inode retrieval 194 * inode retrieval
122 */ 195 */
123struct inode *afs_iget(struct super_block *sb, struct key *key, 196struct inode *afs_iget(struct super_block *sb, struct key *key,
@@ -314,6 +387,19 @@ int afs_getattr(struct vfsmount *mnt, struct dentry *dentry,
314} 387}
315 388
316/* 389/*
390 * discard an AFS inode
391 */
392int afs_drop_inode(struct inode *inode)
393{
394 _enter("");
395
396 if (test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(inode)->flags))
397 return generic_delete_inode(inode);
398 else
399 return generic_drop_inode(inode);
400}
401
402/*
317 * clear an AFS inode 403 * clear an AFS inode
318 */ 404 */
319void afs_evict_inode(struct inode *inode) 405void afs_evict_inode(struct inode *inode)
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index c6c93f180707..cca8eef736fc 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -42,6 +42,7 @@ typedef enum {
42struct afs_mount_params { 42struct afs_mount_params {
43 bool rwpath; /* T if the parent should be considered R/W */ 43 bool rwpath; /* T if the parent should be considered R/W */
44 bool force; /* T to force cell type */ 44 bool force; /* T to force cell type */
45 bool autocell; /* T if set auto mount operation */
45 afs_voltype_t type; /* type of volume requested */ 46 afs_voltype_t type; /* type of volume requested */
46 int volnamesz; /* size of volume name */ 47 int volnamesz; /* size of volume name */
47 const char *volname; /* name of volume to mount */ 48 const char *volname; /* name of volume to mount */
@@ -358,6 +359,8 @@ struct afs_vnode {
358#define AFS_VNODE_READLOCKED 7 /* set if vnode is read-locked on the server */ 359#define AFS_VNODE_READLOCKED 7 /* set if vnode is read-locked on the server */
359#define AFS_VNODE_WRITELOCKED 8 /* set if vnode is write-locked on the server */ 360#define AFS_VNODE_WRITELOCKED 8 /* set if vnode is write-locked on the server */
360#define AFS_VNODE_UNLOCKING 9 /* set if vnode is being unlocked on the server */ 361#define AFS_VNODE_UNLOCKING 9 /* set if vnode is being unlocked on the server */
362#define AFS_VNODE_AUTOCELL 10 /* set if Vnode is an auto mount point */
363#define AFS_VNODE_PSEUDODIR 11 /* set if Vnode is a pseudo directory */
361 364
362 long acl_order; /* ACL check count (callback break count) */ 365 long acl_order; /* ACL check count (callback break count) */
363 366
@@ -468,8 +471,8 @@ extern struct list_head afs_proc_cells;
468 471
469#define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0) 472#define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0)
470extern int afs_cell_init(char *); 473extern int afs_cell_init(char *);
471extern struct afs_cell *afs_cell_create(const char *, char *); 474extern struct afs_cell *afs_cell_create(const char *, unsigned, char *, bool);
472extern struct afs_cell *afs_cell_lookup(const char *, unsigned); 475extern struct afs_cell *afs_cell_lookup(const char *, unsigned, bool);
473extern struct afs_cell *afs_grab_cell(struct afs_cell *); 476extern struct afs_cell *afs_grab_cell(struct afs_cell *);
474extern void afs_put_cell(struct afs_cell *); 477extern void afs_put_cell(struct afs_cell *);
475extern void afs_cell_purge(void); 478extern void afs_cell_purge(void);
@@ -558,6 +561,8 @@ extern int afs_fs_release_lock(struct afs_server *, struct key *,
558/* 561/*
559 * inode.c 562 * inode.c
560 */ 563 */
564extern struct inode *afs_iget_autocell(struct inode *, const char *, int,
565 struct key *);
561extern struct inode *afs_iget(struct super_block *, struct key *, 566extern struct inode *afs_iget(struct super_block *, struct key *,
562 struct afs_fid *, struct afs_file_status *, 567 struct afs_fid *, struct afs_file_status *,
563 struct afs_callback *); 568 struct afs_callback *);
@@ -566,6 +571,7 @@ extern int afs_validate(struct afs_vnode *, struct key *);
566extern int afs_getattr(struct vfsmount *, struct dentry *, struct kstat *); 571extern int afs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
567extern int afs_setattr(struct dentry *, struct iattr *); 572extern int afs_setattr(struct dentry *, struct iattr *);
568extern void afs_evict_inode(struct inode *); 573extern void afs_evict_inode(struct inode *);
574extern int afs_drop_inode(struct inode *);
569 575
570/* 576/*
571 * main.c 577 * main.c
@@ -581,6 +587,7 @@ extern int afs_abort_to_error(u32);
581 * mntpt.c 587 * mntpt.c
582 */ 588 */
583extern const struct inode_operations afs_mntpt_inode_operations; 589extern const struct inode_operations afs_mntpt_inode_operations;
590extern const struct inode_operations afs_autocell_inode_operations;
584extern const struct file_operations afs_mntpt_file_operations; 591extern const struct file_operations afs_mntpt_file_operations;
585 592
586extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *); 593extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index a9e23039ea34..6d552686c498 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -38,6 +38,11 @@ const struct inode_operations afs_mntpt_inode_operations = {
38 .getattr = afs_getattr, 38 .getattr = afs_getattr,
39}; 39};
40 40
41const struct inode_operations afs_autocell_inode_operations = {
42 .follow_link = afs_mntpt_follow_link,
43 .getattr = afs_getattr,
44};
45
41static LIST_HEAD(afs_vfsmounts); 46static LIST_HEAD(afs_vfsmounts);
42static DECLARE_DELAYED_WORK(afs_mntpt_expiry_timer, afs_mntpt_expiry_timed_out); 47static DECLARE_DELAYED_WORK(afs_mntpt_expiry_timer, afs_mntpt_expiry_timed_out);
43 48
@@ -136,20 +141,16 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
136{ 141{
137 struct afs_super_info *super; 142 struct afs_super_info *super;
138 struct vfsmount *mnt; 143 struct vfsmount *mnt;
144 struct afs_vnode *vnode;
139 struct page *page; 145 struct page *page;
140 size_t size; 146 char *devname, *options;
141 char *buf, *devname, *options; 147 bool rwpath = false;
142 int ret; 148 int ret;
143 149
144 _enter("{%s}", mntpt->d_name.name); 150 _enter("{%s}", mntpt->d_name.name);
145 151
146 BUG_ON(!mntpt->d_inode); 152 BUG_ON(!mntpt->d_inode);
147 153
148 ret = -EINVAL;
149 size = mntpt->d_inode->i_size;
150 if (size > PAGE_SIZE - 1)
151 goto error_no_devname;
152
153 ret = -ENOMEM; 154 ret = -ENOMEM;
154 devname = (char *) get_zeroed_page(GFP_KERNEL); 155 devname = (char *) get_zeroed_page(GFP_KERNEL);
155 if (!devname) 156 if (!devname)
@@ -159,28 +160,59 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
159 if (!options) 160 if (!options)
160 goto error_no_options; 161 goto error_no_options;
161 162
162 /* read the contents of the AFS special symlink */ 163 vnode = AFS_FS_I(mntpt->d_inode);
163 page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL); 164 if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) {
164 if (IS_ERR(page)) { 165 /* if the directory is a pseudo directory, use the d_name */
165 ret = PTR_ERR(page); 166 static const char afs_root_cell[] = ":root.cell.";
166 goto error_no_page; 167 unsigned size = mntpt->d_name.len;
168
169 ret = -ENOENT;
170 if (size < 2 || size > AFS_MAXCELLNAME)
171 goto error_no_page;
172
173 if (mntpt->d_name.name[0] == '.') {
174 devname[0] = '#';
175 memcpy(devname + 1, mntpt->d_name.name, size - 1);
176 memcpy(devname + size, afs_root_cell,
177 sizeof(afs_root_cell));
178 rwpath = true;
179 } else {
180 devname[0] = '%';
181 memcpy(devname + 1, mntpt->d_name.name, size);
182 memcpy(devname + size + 1, afs_root_cell,
183 sizeof(afs_root_cell));
184 }
185 } else {
186 /* read the contents of the AFS special symlink */
187 loff_t size = i_size_read(mntpt->d_inode);
188 char *buf;
189
190 ret = -EINVAL;
191 if (size > PAGE_SIZE - 1)
192 goto error_no_page;
193
194 page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
195 if (IS_ERR(page)) {
196 ret = PTR_ERR(page);
197 goto error_no_page;
198 }
199
200 ret = -EIO;
201 if (PageError(page))
202 goto error;
203
204 buf = kmap_atomic(page, KM_USER0);
205 memcpy(devname, buf, size);
206 kunmap_atomic(buf, KM_USER0);
207 page_cache_release(page);
208 page = NULL;
167 } 209 }
168 210
169 ret = -EIO;
170 if (PageError(page))
171 goto error;
172
173 buf = kmap_atomic(page, KM_USER0);
174 memcpy(devname, buf, size);
175 kunmap_atomic(buf, KM_USER0);
176 page_cache_release(page);
177 page = NULL;
178
179 /* work out what options we want */ 211 /* work out what options we want */
180 super = AFS_FS_S(mntpt->d_sb); 212 super = AFS_FS_S(mntpt->d_sb);
181 memcpy(options, "cell=", 5); 213 memcpy(options, "cell=", 5);
182 strcpy(options + 5, super->volume->cell->name); 214 strcpy(options + 5, super->volume->cell->name);
183 if (super->volume->type == AFSVL_RWVOL) 215 if (super->volume->type == AFSVL_RWVOL || rwpath)
184 strcat(options, ",rwpath"); 216 strcat(options, ",rwpath");
185 217
186 /* try and do the mount */ 218 /* try and do the mount */
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 852739d262a9..096b23f821a1 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -294,7 +294,7 @@ static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf,
294 if (strcmp(kbuf, "add") == 0) { 294 if (strcmp(kbuf, "add") == 0) {
295 struct afs_cell *cell; 295 struct afs_cell *cell;
296 296
297 cell = afs_cell_create(name, args); 297 cell = afs_cell_create(name, strlen(name), args, false);
298 if (IS_ERR(cell)) { 298 if (IS_ERR(cell)) {
299 ret = PTR_ERR(cell); 299 ret = PTR_ERR(cell);
300 goto done; 300 goto done;
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 9cf80f02da16..77e1e5a61154 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/mount.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
21#include <linux/smp_lock.h> 22#include <linux/smp_lock.h>
@@ -48,6 +49,7 @@ struct file_system_type afs_fs_type = {
48static const struct super_operations afs_super_ops = { 49static const struct super_operations afs_super_ops = {
49 .statfs = afs_statfs, 50 .statfs = afs_statfs,
50 .alloc_inode = afs_alloc_inode, 51 .alloc_inode = afs_alloc_inode,
52 .drop_inode = afs_drop_inode,
51 .destroy_inode = afs_destroy_inode, 53 .destroy_inode = afs_destroy_inode,
52 .evict_inode = afs_evict_inode, 54 .evict_inode = afs_evict_inode,
53 .put_super = afs_put_super, 55 .put_super = afs_put_super,
@@ -62,12 +64,14 @@ enum {
62 afs_opt_cell, 64 afs_opt_cell,
63 afs_opt_rwpath, 65 afs_opt_rwpath,
64 afs_opt_vol, 66 afs_opt_vol,
67 afs_opt_autocell,
65}; 68};
66 69
67static const match_table_t afs_options_list = { 70static const match_table_t afs_options_list = {
68 { afs_opt_cell, "cell=%s" }, 71 { afs_opt_cell, "cell=%s" },
69 { afs_opt_rwpath, "rwpath" }, 72 { afs_opt_rwpath, "rwpath" },
70 { afs_opt_vol, "vol=%s" }, 73 { afs_opt_vol, "vol=%s" },
74 { afs_opt_autocell, "autocell" },
71 { afs_no_opt, NULL }, 75 { afs_no_opt, NULL },
72}; 76};
73 77
@@ -151,7 +155,8 @@ static int afs_parse_options(struct afs_mount_params *params,
151 switch (token) { 155 switch (token) {
152 case afs_opt_cell: 156 case afs_opt_cell:
153 cell = afs_cell_lookup(args[0].from, 157 cell = afs_cell_lookup(args[0].from,
154 args[0].to - args[0].from); 158 args[0].to - args[0].from,
159 false);
155 if (IS_ERR(cell)) 160 if (IS_ERR(cell))
156 return PTR_ERR(cell); 161 return PTR_ERR(cell);
157 afs_put_cell(params->cell); 162 afs_put_cell(params->cell);
@@ -166,6 +171,10 @@ static int afs_parse_options(struct afs_mount_params *params,
166 *devname = args[0].from; 171 *devname = args[0].from;
167 break; 172 break;
168 173
174 case afs_opt_autocell:
175 params->autocell = 1;
176 break;
177
169 default: 178 default:
170 printk(KERN_ERR "kAFS:" 179 printk(KERN_ERR "kAFS:"
171 " Unknown or invalid mount option: '%s'\n", p); 180 " Unknown or invalid mount option: '%s'\n", p);
@@ -252,10 +261,10 @@ static int afs_parse_device_name(struct afs_mount_params *params,
252 261
253 /* lookup the cell record */ 262 /* lookup the cell record */
254 if (cellname || !params->cell) { 263 if (cellname || !params->cell) {
255 cell = afs_cell_lookup(cellname, cellnamesz); 264 cell = afs_cell_lookup(cellname, cellnamesz, true);
256 if (IS_ERR(cell)) { 265 if (IS_ERR(cell)) {
257 printk(KERN_ERR "kAFS: unable to lookup cell '%s'\n", 266 printk(KERN_ERR "kAFS: unable to lookup cell '%*.*s'\n",
258 cellname ?: ""); 267 cellnamesz, cellnamesz, cellname ?: "");
259 return PTR_ERR(cell); 268 return PTR_ERR(cell);
260 } 269 }
261 afs_put_cell(params->cell); 270 afs_put_cell(params->cell);
@@ -321,6 +330,9 @@ static int afs_fill_super(struct super_block *sb, void *data)
321 if (IS_ERR(inode)) 330 if (IS_ERR(inode))
322 goto error_inode; 331 goto error_inode;
323 332
333 if (params->autocell)
334 set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags);
335
324 ret = -ENOMEM; 336 ret = -ENOMEM;
325 root = d_alloc_root(inode); 337 root = d_alloc_root(inode);
326 if (!root) 338 if (!root)
diff --git a/fs/cifs/README b/fs/cifs/README
index a7081eeeb85d..7099a526f775 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -301,6 +301,16 @@ A partial list of the supported mount options follows:
301 gid Set the default gid for inodes (similar to above). 301 gid Set the default gid for inodes (similar to above).
302 file_mode If CIFS Unix extensions are not supported by the server 302 file_mode If CIFS Unix extensions are not supported by the server
303 this overrides the default mode for file inodes. 303 this overrides the default mode for file inodes.
304 fsc Enable local disk caching using FS-Cache (off by default). This
305 option could be useful to improve performance on a slow link,
306 heavily loaded server and/or network where reading from the
307 disk is faster than reading from the server (over the network).
308 This could also impact scalability positively as the
309 number of calls to the server are reduced. However, local
310 caching is not suitable for all workloads for e.g. read-once
311 type workloads. So, you need to consider carefully your
312 workload/scenario before using this option. Currently, local
313 disk caching is functional for CIFS files opened as read-only.
304 dir_mode If CIFS Unix extensions are not supported by the server 314 dir_mode If CIFS Unix extensions are not supported by the server
305 this overrides the default mode for directory inodes. 315 this overrides the default mode for directory inodes.
306 port attempt to contact the server on this tcp port, before 316 port attempt to contact the server on this tcp port, before
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index cc1bb33b59b8..26a510a7be09 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -100,3 +100,20 @@ config NFS_FSCACHE
100 help 100 help
101 Say Y here if you want NFS data to be cached locally on disc through 101 Say Y here if you want NFS data to be cached locally on disc through
102 the general filesystem cache manager 102 the general filesystem cache manager
103
104config NFS_USE_LEGACY_DNS
105 bool "Use the legacy NFS DNS resolver"
106 depends on NFS_V4
107 help
108 The kernel now provides a method for translating a host name into an
109 IP address. Select Y here if you would rather use your own DNS
110 resolver script.
111
112 If unsure, say N
113
114config NFS_USE_KERNEL_DNS
115 bool
116 depends on NFS_V4 && !NFS_USE_LEGACY_DNS
117 select DNS_RESOLVER
118 select KEYS
119 default y
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index 76fd235d0024..dba50a5625db 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -6,6 +6,29 @@
6 * Resolves DNS hostnames into valid ip addresses 6 * Resolves DNS hostnames into valid ip addresses
7 */ 7 */
8 8
9#ifdef CONFIG_NFS_USE_KERNEL_DNS
10
11#include <linux/sunrpc/clnt.h>
12#include <linux/dns_resolver.h>
13
14ssize_t nfs_dns_resolve_name(char *name, size_t namelen,
15 struct sockaddr *sa, size_t salen)
16{
17 ssize_t ret;
18 char *ip_addr = NULL;
19 int ip_len;
20
21 ip_len = dns_query(NULL, name, namelen, NULL, &ip_addr, NULL);
22 if (ip_len > 0)
23 ret = rpc_pton(ip_addr, ip_len, sa, salen);
24 else
25 ret = -ESRCH;
26 kfree(ip_addr);
27 return ret;
28}
29
30#else
31
9#include <linux/hash.h> 32#include <linux/hash.h>
10#include <linux/string.h> 33#include <linux/string.h>
11#include <linux/kmod.h> 34#include <linux/kmod.h>
@@ -346,3 +369,4 @@ void nfs_dns_resolver_destroy(void)
346 nfs_cache_unregister(&nfs_dns_resolve); 369 nfs_cache_unregister(&nfs_dns_resolve);
347} 370}
348 371
372#endif
diff --git a/fs/nfs/dns_resolve.h b/fs/nfs/dns_resolve.h
index a3f0938babf7..199bb5543a91 100644
--- a/fs/nfs/dns_resolve.h
+++ b/fs/nfs/dns_resolve.h
@@ -6,8 +6,20 @@
6 6
7#define NFS_DNS_HOSTNAME_MAXLEN (128) 7#define NFS_DNS_HOSTNAME_MAXLEN (128)
8 8
9
10#ifdef CONFIG_NFS_USE_KERNEL_DNS
11static inline int nfs_dns_resolver_init(void)
12{
13 return 0;
14}
15
16static inline void nfs_dns_resolver_destroy(void)
17{}
18#else
9extern int nfs_dns_resolver_init(void); 19extern int nfs_dns_resolver_init(void);
10extern void nfs_dns_resolver_destroy(void); 20extern void nfs_dns_resolver_destroy(void);
21#endif
22
11extern ssize_t nfs_dns_resolve_name(char *name, size_t namelen, 23extern ssize_t nfs_dns_resolve_name(char *name, size_t namelen,
12 struct sockaddr *sa, size_t salen); 24 struct sockaddr *sa, size_t salen);
13 25
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index da702294d7e7..a76e0aa5cd3f 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -290,12 +290,30 @@ static int ocfs2_set_acl(handle_t *handle,
290 290
291int ocfs2_check_acl(struct inode *inode, int mask) 291int ocfs2_check_acl(struct inode *inode, int mask)
292{ 292{
293 struct posix_acl *acl = ocfs2_get_acl(inode, ACL_TYPE_ACCESS); 293 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
294 struct buffer_head *di_bh = NULL;
295 struct posix_acl *acl;
296 int ret = -EAGAIN;
294 297
295 if (IS_ERR(acl)) 298 if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
299 return ret;
300
301 ret = ocfs2_read_inode_block(inode, &di_bh);
302 if (ret < 0) {
303 mlog_errno(ret);
304 return ret;
305 }
306
307 acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, di_bh);
308
309 brelse(di_bh);
310
311 if (IS_ERR(acl)) {
312 mlog_errno(PTR_ERR(acl));
296 return PTR_ERR(acl); 313 return PTR_ERR(acl);
314 }
297 if (acl) { 315 if (acl) {
298 int ret = posix_acl_permission(inode, acl, mask); 316 ret = posix_acl_permission(inode, acl, mask);
299 posix_acl_release(acl); 317 posix_acl_release(acl);
300 return ret; 318 return ret;
301 } 319 }
@@ -344,7 +362,7 @@ int ocfs2_init_acl(handle_t *handle,
344{ 362{
345 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 363 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
346 struct posix_acl *acl = NULL; 364 struct posix_acl *acl = NULL;
347 int ret = 0; 365 int ret = 0, ret2;
348 mode_t mode; 366 mode_t mode;
349 367
350 if (!S_ISLNK(inode->i_mode)) { 368 if (!S_ISLNK(inode->i_mode)) {
@@ -381,7 +399,12 @@ int ocfs2_init_acl(handle_t *handle,
381 mode = inode->i_mode; 399 mode = inode->i_mode;
382 ret = posix_acl_create_masq(clone, &mode); 400 ret = posix_acl_create_masq(clone, &mode);
383 if (ret >= 0) { 401 if (ret >= 0) {
384 ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode); 402 ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
403 if (ret2) {
404 mlog_errno(ret2);
405 ret = ret2;
406 goto cleanup;
407 }
385 if (ret > 0) { 408 if (ret > 0) {
386 ret = ocfs2_set_acl(handle, inode, 409 ret = ocfs2_set_acl(handle, inode,
387 di_bh, ACL_TYPE_ACCESS, 410 di_bh, ACL_TYPE_ACCESS,
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index aa75ca3f78da..1361997cf205 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -1759,6 +1759,7 @@ static int o2net_accept_one(struct socket *sock)
1759 struct sockaddr_in sin; 1759 struct sockaddr_in sin;
1760 struct socket *new_sock = NULL; 1760 struct socket *new_sock = NULL;
1761 struct o2nm_node *node = NULL; 1761 struct o2nm_node *node = NULL;
1762 struct o2nm_node *local_node = NULL;
1762 struct o2net_sock_container *sc = NULL; 1763 struct o2net_sock_container *sc = NULL;
1763 struct o2net_node *nn; 1764 struct o2net_node *nn;
1764 1765
@@ -1796,11 +1797,15 @@ static int o2net_accept_one(struct socket *sock)
1796 goto out; 1797 goto out;
1797 } 1798 }
1798 1799
1799 if (o2nm_this_node() > node->nd_num) { 1800 if (o2nm_this_node() >= node->nd_num) {
1800 mlog(ML_NOTICE, "unexpected connect attempted from a lower " 1801 local_node = o2nm_get_node_by_num(o2nm_this_node());
1801 "numbered node '%s' at " "%pI4:%d with num %u\n", 1802 mlog(ML_NOTICE, "unexpected connect attempt seen at node '%s' ("
1802 node->nd_name, &sin.sin_addr.s_addr, 1803 "%u, %pI4:%d) from node '%s' (%u, %pI4:%d)\n",
1803 ntohs(sin.sin_port), node->nd_num); 1804 local_node->nd_name, local_node->nd_num,
1805 &(local_node->nd_ipv4_address),
1806 ntohs(local_node->nd_ipv4_port),
1807 node->nd_name, node->nd_num, &sin.sin_addr.s_addr,
1808 ntohs(sin.sin_port));
1804 ret = -EINVAL; 1809 ret = -EINVAL;
1805 goto out; 1810 goto out;
1806 } 1811 }
@@ -1857,6 +1862,8 @@ out:
1857 sock_release(new_sock); 1862 sock_release(new_sock);
1858 if (node) 1863 if (node)
1859 o2nm_node_put(node); 1864 o2nm_node_put(node);
1865 if (local_node)
1866 o2nm_node_put(local_node);
1860 if (sc) 1867 if (sc)
1861 sc_put(sc); 1868 sc_put(sc);
1862 return ret; 1869 return ret;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 94b97fc6a88e..ffb4c68dafa4 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -511,8 +511,6 @@ static void dlm_lockres_release(struct kref *kref)
511 511
512 atomic_dec(&dlm->res_cur_count); 512 atomic_dec(&dlm->res_cur_count);
513 513
514 dlm_put(dlm);
515
516 if (!hlist_unhashed(&res->hash_node) || 514 if (!hlist_unhashed(&res->hash_node) ||
517 !list_empty(&res->granted) || 515 !list_empty(&res->granted) ||
518 !list_empty(&res->converting) || 516 !list_empty(&res->converting) ||
@@ -585,8 +583,6 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
585 res->migration_pending = 0; 583 res->migration_pending = 0;
586 res->inflight_locks = 0; 584 res->inflight_locks = 0;
587 585
588 /* put in dlm_lockres_release */
589 dlm_grab(dlm);
590 res->dlm = dlm; 586 res->dlm = dlm;
591 587
592 kref_init(&res->refs); 588 kref_init(&res->refs);
@@ -3050,8 +3046,6 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3050 /* check for pre-existing lock */ 3046 /* check for pre-existing lock */
3051 spin_lock(&dlm->spinlock); 3047 spin_lock(&dlm->spinlock);
3052 res = __dlm_lookup_lockres(dlm, name, namelen, hash); 3048 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3053 spin_lock(&dlm->master_lock);
3054
3055 if (res) { 3049 if (res) {
3056 spin_lock(&res->spinlock); 3050 spin_lock(&res->spinlock);
3057 if (res->state & DLM_LOCK_RES_RECOVERING) { 3051 if (res->state & DLM_LOCK_RES_RECOVERING) {
@@ -3069,14 +3063,15 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3069 spin_unlock(&res->spinlock); 3063 spin_unlock(&res->spinlock);
3070 } 3064 }
3071 3065
3066 spin_lock(&dlm->master_lock);
3072 /* ignore status. only nonzero status would BUG. */ 3067 /* ignore status. only nonzero status would BUG. */
3073 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, 3068 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3074 name, namelen, 3069 name, namelen,
3075 migrate->new_master, 3070 migrate->new_master,
3076 migrate->master); 3071 migrate->master);
3077 3072
3078unlock:
3079 spin_unlock(&dlm->master_lock); 3073 spin_unlock(&dlm->master_lock);
3074unlock:
3080 spin_unlock(&dlm->spinlock); 3075 spin_unlock(&dlm->spinlock);
3081 3076
3082 if (oldmle) { 3077 if (oldmle) {
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 9dfaac73b36d..aaaffbcbe916 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1997,6 +1997,8 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1997 struct list_head *queue; 1997 struct list_head *queue;
1998 struct dlm_lock *lock, *next; 1998 struct dlm_lock *lock, *next;
1999 1999
2000 assert_spin_locked(&dlm->spinlock);
2001 assert_spin_locked(&res->spinlock);
2000 res->state |= DLM_LOCK_RES_RECOVERING; 2002 res->state |= DLM_LOCK_RES_RECOVERING;
2001 if (!list_empty(&res->recovering)) { 2003 if (!list_empty(&res->recovering)) {
2002 mlog(0, 2004 mlog(0,
@@ -2326,19 +2328,15 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2326 /* zero the lvb if necessary */ 2328 /* zero the lvb if necessary */
2327 dlm_revalidate_lvb(dlm, res, dead_node); 2329 dlm_revalidate_lvb(dlm, res, dead_node);
2328 if (res->owner == dead_node) { 2330 if (res->owner == dead_node) {
2329 if (res->state & DLM_LOCK_RES_DROPPING_REF) 2331 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
2330 mlog(0, "%s:%.*s: owned by " 2332 mlog(ML_NOTICE, "Ignore %.*s for "
2331 "dead node %u, this node was " 2333 "recovery as it is being freed\n",
2332 "dropping its ref when it died. " 2334 res->lockname.len,
2333 "continue, dropping the flag.\n", 2335 res->lockname.name);
2334 dlm->name, res->lockname.len, 2336 } else
2335 res->lockname.name, dead_node); 2337 dlm_move_lockres_to_recovery_list(dlm,
2336 2338 res);
2337 /* the wake_up for this will happen when the
2338 * RECOVERING flag is dropped later */
2339 res->state &= ~DLM_LOCK_RES_DROPPING_REF;
2340 2339
2341 dlm_move_lockres_to_recovery_list(dlm, res);
2342 } else if (res->owner == dlm->node_num) { 2340 } else if (res->owner == dlm->node_num) {
2343 dlm_free_dead_locks(dlm, res, dead_node); 2341 dlm_free_dead_locks(dlm, res, dead_node);
2344 __dlm_lockres_calc_usage(dlm, res); 2342 __dlm_lockres_calc_usage(dlm, res);
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index d4f73ca68fe5..2211acf33d9b 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -92,19 +92,27 @@ int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
92 * truly ready to be freed. */ 92 * truly ready to be freed. */
93int __dlm_lockres_unused(struct dlm_lock_resource *res) 93int __dlm_lockres_unused(struct dlm_lock_resource *res)
94{ 94{
95 if (!__dlm_lockres_has_locks(res) && 95 int bit;
96 (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) { 96
97 /* try not to scan the bitmap unless the first two 97 if (__dlm_lockres_has_locks(res))
98 * conditions are already true */ 98 return 0;
99 int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); 99
100 if (bit >= O2NM_MAX_NODES) { 100 if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
101 /* since the bit for dlm->node_num is not 101 return 0;
102 * set, inflight_locks better be zero */ 102
103 BUG_ON(res->inflight_locks != 0); 103 if (res->state & DLM_LOCK_RES_RECOVERING)
104 return 1; 104 return 0;
105 } 105
106 } 106 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
107 return 0; 107 if (bit < O2NM_MAX_NODES)
108 return 0;
109
110 /*
111 * since the bit for dlm->node_num is not set, inflight_locks better
112 * be zero
113 */
114 BUG_ON(res->inflight_locks != 0);
115 return 1;
108} 116}
109 117
110 118
@@ -152,45 +160,25 @@ void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
152 spin_unlock(&dlm->spinlock); 160 spin_unlock(&dlm->spinlock);
153} 161}
154 162
155static int dlm_purge_lockres(struct dlm_ctxt *dlm, 163static void dlm_purge_lockres(struct dlm_ctxt *dlm,
156 struct dlm_lock_resource *res) 164 struct dlm_lock_resource *res)
157{ 165{
158 int master; 166 int master;
159 int ret = 0; 167 int ret = 0;
160 168
161 spin_lock(&res->spinlock); 169 assert_spin_locked(&dlm->spinlock);
162 if (!__dlm_lockres_unused(res)) { 170 assert_spin_locked(&res->spinlock);
163 mlog(0, "%s:%.*s: tried to purge but not unused\n",
164 dlm->name, res->lockname.len, res->lockname.name);
165 __dlm_print_one_lock_resource(res);
166 spin_unlock(&res->spinlock);
167 BUG();
168 }
169
170 if (res->state & DLM_LOCK_RES_MIGRATING) {
171 mlog(0, "%s:%.*s: Delay dropref as this lockres is "
172 "being remastered\n", dlm->name, res->lockname.len,
173 res->lockname.name);
174 /* Re-add the lockres to the end of the purge list */
175 if (!list_empty(&res->purge)) {
176 list_del_init(&res->purge);
177 list_add_tail(&res->purge, &dlm->purge_list);
178 }
179 spin_unlock(&res->spinlock);
180 return 0;
181 }
182 171
183 master = (res->owner == dlm->node_num); 172 master = (res->owner == dlm->node_num);
184 173
185 if (!master)
186 res->state |= DLM_LOCK_RES_DROPPING_REF;
187 spin_unlock(&res->spinlock);
188 174
189 mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len, 175 mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
190 res->lockname.name, master); 176 res->lockname.name, master);
191 177
192 if (!master) { 178 if (!master) {
179 res->state |= DLM_LOCK_RES_DROPPING_REF;
193 /* drop spinlock... retake below */ 180 /* drop spinlock... retake below */
181 spin_unlock(&res->spinlock);
194 spin_unlock(&dlm->spinlock); 182 spin_unlock(&dlm->spinlock);
195 183
196 spin_lock(&res->spinlock); 184 spin_lock(&res->spinlock);
@@ -208,31 +196,35 @@ static int dlm_purge_lockres(struct dlm_ctxt *dlm,
208 mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n", 196 mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
209 dlm->name, res->lockname.len, res->lockname.name, ret); 197 dlm->name, res->lockname.len, res->lockname.name, ret);
210 spin_lock(&dlm->spinlock); 198 spin_lock(&dlm->spinlock);
199 spin_lock(&res->spinlock);
211 } 200 }
212 201
213 spin_lock(&res->spinlock);
214 if (!list_empty(&res->purge)) { 202 if (!list_empty(&res->purge)) {
215 mlog(0, "removing lockres %.*s:%p from purgelist, " 203 mlog(0, "removing lockres %.*s:%p from purgelist, "
216 "master = %d\n", res->lockname.len, res->lockname.name, 204 "master = %d\n", res->lockname.len, res->lockname.name,
217 res, master); 205 res, master);
218 list_del_init(&res->purge); 206 list_del_init(&res->purge);
219 spin_unlock(&res->spinlock);
220 dlm_lockres_put(res); 207 dlm_lockres_put(res);
221 dlm->purge_count--; 208 dlm->purge_count--;
222 } else 209 }
223 spin_unlock(&res->spinlock); 210
211 if (!__dlm_lockres_unused(res)) {
212 mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n",
213 dlm->name, res->lockname.len, res->lockname.name);
214 __dlm_print_one_lock_resource(res);
215 BUG();
216 }
224 217
225 __dlm_unhash_lockres(res); 218 __dlm_unhash_lockres(res);
226 219
227 /* lockres is not in the hash now. drop the flag and wake up 220 /* lockres is not in the hash now. drop the flag and wake up
228 * any processes waiting in dlm_get_lock_resource. */ 221 * any processes waiting in dlm_get_lock_resource. */
229 if (!master) { 222 if (!master) {
230 spin_lock(&res->spinlock);
231 res->state &= ~DLM_LOCK_RES_DROPPING_REF; 223 res->state &= ~DLM_LOCK_RES_DROPPING_REF;
232 spin_unlock(&res->spinlock); 224 spin_unlock(&res->spinlock);
233 wake_up(&res->wq); 225 wake_up(&res->wq);
234 } 226 } else
235 return 0; 227 spin_unlock(&res->spinlock);
236} 228}
237 229
238static void dlm_run_purge_list(struct dlm_ctxt *dlm, 230static void dlm_run_purge_list(struct dlm_ctxt *dlm,
@@ -251,17 +243,7 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
251 lockres = list_entry(dlm->purge_list.next, 243 lockres = list_entry(dlm->purge_list.next,
252 struct dlm_lock_resource, purge); 244 struct dlm_lock_resource, purge);
253 245
254 /* Status of the lockres *might* change so double
255 * check. If the lockres is unused, holding the dlm
256 * spinlock will prevent people from getting and more
257 * refs on it -- there's no need to keep the lockres
258 * spinlock. */
259 spin_lock(&lockres->spinlock); 246 spin_lock(&lockres->spinlock);
260 unused = __dlm_lockres_unused(lockres);
261 spin_unlock(&lockres->spinlock);
262
263 if (!unused)
264 continue;
265 247
266 purge_jiffies = lockres->last_used + 248 purge_jiffies = lockres->last_used +
267 msecs_to_jiffies(DLM_PURGE_INTERVAL_MS); 249 msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
@@ -273,15 +255,29 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
273 * in tail order, we can stop at the first 255 * in tail order, we can stop at the first
274 * unpurgable resource -- anyone added after 256 * unpurgable resource -- anyone added after
275 * him will have a greater last_used value */ 257 * him will have a greater last_used value */
258 spin_unlock(&lockres->spinlock);
276 break; 259 break;
277 } 260 }
278 261
262 /* Status of the lockres *might* change so double
263 * check. If the lockres is unused, holding the dlm
264 * spinlock will prevent people from getting and more
265 * refs on it. */
266 unused = __dlm_lockres_unused(lockres);
267 if (!unused ||
268 (lockres->state & DLM_LOCK_RES_MIGRATING)) {
269 mlog(0, "lockres %s:%.*s: is in use or "
270 "being remastered, used %d, state %d\n",
271 dlm->name, lockres->lockname.len,
272 lockres->lockname.name, !unused, lockres->state);
273 list_move_tail(&dlm->purge_list, &lockres->purge);
274 spin_unlock(&lockres->spinlock);
275 continue;
276 }
277
279 dlm_lockres_get(lockres); 278 dlm_lockres_get(lockres);
280 279
281 /* This may drop and reacquire the dlm spinlock if it 280 dlm_purge_lockres(dlm, lockres);
282 * has to do migration. */
283 if (dlm_purge_lockres(dlm, lockres))
284 BUG();
285 281
286 dlm_lockres_put(lockres); 282 dlm_lockres_put(lockres);
287 283
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 3ac5aa733e9c..73a11ccfd4c2 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2436,16 +2436,26 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
2436 len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) + 2436 len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
2437 le32_to_cpu(rec.r_clusters)) - cpos; 2437 le32_to_cpu(rec.r_clusters)) - cpos;
2438 /* 2438 /*
2439 * If the refcount rec already exist, cool. We just need
2440 * to check whether there is a split. Otherwise we just need
2441 * to increase the refcount.
2442 * If we will insert one, increases recs_add.
2443 *
2444 * We record all the records which will be inserted to the 2439 * We record all the records which will be inserted to the
2445 * same refcount block, so that we can tell exactly whether 2440 * same refcount block, so that we can tell exactly whether
2446 * we need a new refcount block or not. 2441 * we need a new refcount block or not.
2442 *
2443 * If we will insert a new one, this is easy and only happens
2444 * during adding refcounted flag to the extent, so we don't
2445 * have a chance of spliting. We just need one record.
2446 *
2447 * If the refcount rec already exists, that would be a little
2448 * complicated. we may have to:
2449 * 1) split at the beginning if the start pos isn't aligned.
2450 * we need 1 more record in this case.
2451 * 2) split int the end if the end pos isn't aligned.
2452 * we need 1 more record in this case.
2453 * 3) split in the middle because of file system fragmentation.
2454 * we need 2 more records in this case(we can't detect this
2455 * beforehand, so always think of the worst case).
2447 */ 2456 */
2448 if (rec.r_refcount) { 2457 if (rec.r_refcount) {
2458 recs_add += 2;
2449 /* Check whether we need a split at the beginning. */ 2459 /* Check whether we need a split at the beginning. */
2450 if (cpos == start_cpos && 2460 if (cpos == start_cpos &&
2451 cpos != le64_to_cpu(rec.r_cpos)) 2461 cpos != le64_to_cpu(rec.r_cpos))
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 848480bc2bf9..2308fbb4523a 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -129,7 +129,7 @@ static inline void random_ether_addr(u8 *addr)
129/** 129/**
130 * dev_hw_addr_random - Create random MAC and set device flag 130 * dev_hw_addr_random - Create random MAC and set device flag
131 * @dev: pointer to net_device structure 131 * @dev: pointer to net_device structure
132 * @addr: Pointer to a six-byte array containing the Ethernet address 132 * @hwaddr: Pointer to a six-byte array containing the Ethernet address
133 * 133 *
134 * Generate random MAC to be used by a device and set addr_assign_type 134 * Generate random MAC to be used by a device and set addr_assign_type
135 * so the state can be read by sysfs and be used by udev. 135 * so the state can be read by sysfs and be used by udev.
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 413742c92d14..791d5109f34c 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -122,7 +122,7 @@ static inline int netpoll_tx_running(struct net_device *dev)
122} 122}
123 123
124#else 124#else
125static inline int netpoll_rx(struct sk_buff *skb) 125static inline bool netpoll_rx(struct sk_buff *skb)
126{ 126{
127 return 0; 127 return 0;
128} 128}
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 636724b203ee..6c241444f902 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -33,9 +33,9 @@
33#define L2CAP_DEFAULT_FLUSH_TO 0xffff 33#define L2CAP_DEFAULT_FLUSH_TO 0xffff
34#define L2CAP_DEFAULT_TX_WINDOW 63 34#define L2CAP_DEFAULT_TX_WINDOW 63
35#define L2CAP_DEFAULT_MAX_TX 3 35#define L2CAP_DEFAULT_MAX_TX 3
36#define L2CAP_DEFAULT_RETRANS_TO 1000 /* 1 second */ 36#define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */
37#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */ 37#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */
38#define L2CAP_DEFAULT_MAX_PDU_SIZE 672 38#define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */
39#define L2CAP_DEFAULT_ACK_TO 200 39#define L2CAP_DEFAULT_ACK_TO 200
40#define L2CAP_LOCAL_BUSY_TRIES 12 40#define L2CAP_LOCAL_BUSY_TRIES 12
41 41
diff --git a/include/net/sock.h b/include/net/sock.h
index a441c9cdd625..ac53bfbdfe16 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -195,7 +195,8 @@ struct sock_common {
195 * @sk_priority: %SO_PRIORITY setting 195 * @sk_priority: %SO_PRIORITY setting
196 * @sk_type: socket type (%SOCK_STREAM, etc) 196 * @sk_type: socket type (%SOCK_STREAM, etc)
197 * @sk_protocol: which protocol this socket belongs in this network family 197 * @sk_protocol: which protocol this socket belongs in this network family
198 * @sk_peercred: %SO_PEERCRED setting 198 * @sk_peer_pid: &struct pid for this socket's peer
199 * @sk_peer_cred: %SO_PEERCRED setting
199 * @sk_rcvlowat: %SO_RCVLOWAT setting 200 * @sk_rcvlowat: %SO_RCVLOWAT setting
200 * @sk_rcvtimeo: %SO_RCVTIMEO setting 201 * @sk_rcvtimeo: %SO_RCVTIMEO setting
201 * @sk_sndtimeo: %SO_SNDTIMEO setting 202 * @sk_sndtimeo: %SO_SNDTIMEO setting
@@ -211,6 +212,7 @@ struct sock_common {
211 * @sk_send_head: front of stuff to transmit 212 * @sk_send_head: front of stuff to transmit
212 * @sk_security: used by security modules 213 * @sk_security: used by security modules
213 * @sk_mark: generic packet mark 214 * @sk_mark: generic packet mark
215 * @sk_classid: this socket's cgroup classid
214 * @sk_write_pending: a write to stream socket waits to start 216 * @sk_write_pending: a write to stream socket waits to start
215 * @sk_state_change: callback to indicate change in the state of the sock 217 * @sk_state_change: callback to indicate change in the state of the sock
216 * @sk_data_ready: callback to indicate there is data to be processed 218 * @sk_data_ready: callback to indicate there is data to be processed
diff --git a/mm/memory.c b/mm/memory.c
index 9606ceb3c165..9b3b73f4ae9c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2792,8 +2792,10 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2792 spinlock_t *ptl; 2792 spinlock_t *ptl;
2793 pte_t entry; 2793 pte_t entry;
2794 2794
2795 if (check_stack_guard_page(vma, address) < 0) 2795 if (check_stack_guard_page(vma, address) < 0) {
2796 pte_unmap(page_table);
2796 return VM_FAULT_SIGBUS; 2797 return VM_FAULT_SIGBUS;
2798 }
2797 2799
2798 if (!(flags & FAULT_FLAG_WRITE)) { 2800 if (!(flags & FAULT_FLAG_WRITE)) {
2799 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), 2801 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 3e3cd9d4e52c..fadf26b4ed7c 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -2705,8 +2705,9 @@ done:
2705 case L2CAP_MODE_ERTM: 2705 case L2CAP_MODE_ERTM:
2706 pi->remote_tx_win = rfc.txwin_size; 2706 pi->remote_tx_win = rfc.txwin_size;
2707 pi->remote_max_tx = rfc.max_transmit; 2707 pi->remote_max_tx = rfc.max_transmit;
2708 if (rfc.max_pdu_size > pi->conn->mtu - 10) 2708
2709 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10); 2709 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2710 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2710 2711
2711 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size); 2712 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2712 2713
@@ -2723,8 +2724,8 @@ done:
2723 break; 2724 break;
2724 2725
2725 case L2CAP_MODE_STREAMING: 2726 case L2CAP_MODE_STREAMING:
2726 if (rfc.max_pdu_size > pi->conn->mtu - 10) 2727 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2727 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10); 2728 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2728 2729
2729 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size); 2730 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2730 2731
@@ -2806,7 +2807,6 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
2806 if (*result == L2CAP_CONF_SUCCESS) { 2807 if (*result == L2CAP_CONF_SUCCESS) {
2807 switch (rfc.mode) { 2808 switch (rfc.mode) {
2808 case L2CAP_MODE_ERTM: 2809 case L2CAP_MODE_ERTM:
2809 pi->remote_tx_win = rfc.txwin_size;
2810 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 2810 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2811 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 2811 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2812 pi->mps = le16_to_cpu(rfc.max_pdu_size); 2812 pi->mps = le16_to_cpu(rfc.max_pdu_size);
@@ -2862,7 +2862,6 @@ static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2862done: 2862done:
2863 switch (rfc.mode) { 2863 switch (rfc.mode) {
2864 case L2CAP_MODE_ERTM: 2864 case L2CAP_MODE_ERTM:
2865 pi->remote_tx_win = rfc.txwin_size;
2866 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 2865 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2867 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 2866 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2868 pi->mps = le16_to_cpu(rfc.max_pdu_size); 2867 pi->mps = le16_to_cpu(rfc.max_pdu_size);
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 01f238ff2346..c49a6695793a 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -9,7 +9,7 @@
9#include <linux/hardirq.h> 9#include <linux/hardirq.h>
10#include <net/caif/cfpkt.h> 10#include <net/caif/cfpkt.h>
11 11
12#define PKT_PREFIX 16 12#define PKT_PREFIX 48
13#define PKT_POSTFIX 2 13#define PKT_POSTFIX 2
14#define PKT_LEN_WHEN_EXTENDING 128 14#define PKT_LEN_WHEN_EXTENDING 128
15#define PKT_ERROR(pkt, errmsg) do { \ 15#define PKT_ERROR(pkt, errmsg) do { \
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 9c65e9deb9c3..08ffe9e4be20 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -60,6 +60,13 @@
60#include <net/sock.h> 60#include <net/sock.h>
61#include <net/net_namespace.h> 61#include <net/net_namespace.h>
62 62
63/*
64 * To send multiple CAN frame content within TX_SETUP or to filter
65 * CAN messages with multiplex index within RX_SETUP, the number of
66 * different filters is limited to 256 due to the one byte index value.
67 */
68#define MAX_NFRAMES 256
69
63/* use of last_frames[index].can_dlc */ 70/* use of last_frames[index].can_dlc */
64#define RX_RECV 0x40 /* received data for this element */ 71#define RX_RECV 0x40 /* received data for this element */
65#define RX_THR 0x80 /* element not been sent due to throttle feature */ 72#define RX_THR 0x80 /* element not been sent due to throttle feature */
@@ -89,16 +96,16 @@ struct bcm_op {
89 struct list_head list; 96 struct list_head list;
90 int ifindex; 97 int ifindex;
91 canid_t can_id; 98 canid_t can_id;
92 int flags; 99 u32 flags;
93 unsigned long frames_abs, frames_filtered; 100 unsigned long frames_abs, frames_filtered;
94 struct timeval ival1, ival2; 101 struct timeval ival1, ival2;
95 struct hrtimer timer, thrtimer; 102 struct hrtimer timer, thrtimer;
96 struct tasklet_struct tsklet, thrtsklet; 103 struct tasklet_struct tsklet, thrtsklet;
97 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; 104 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
98 int rx_ifindex; 105 int rx_ifindex;
99 int count; 106 u32 count;
100 int nframes; 107 u32 nframes;
101 int currframe; 108 u32 currframe;
102 struct can_frame *frames; 109 struct can_frame *frames;
103 struct can_frame *last_frames; 110 struct can_frame *last_frames;
104 struct can_frame sframe; 111 struct can_frame sframe;
@@ -175,7 +182,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
175 182
176 seq_printf(m, "rx_op: %03X %-5s ", 183 seq_printf(m, "rx_op: %03X %-5s ",
177 op->can_id, bcm_proc_getifname(ifname, op->ifindex)); 184 op->can_id, bcm_proc_getifname(ifname, op->ifindex));
178 seq_printf(m, "[%d]%c ", op->nframes, 185 seq_printf(m, "[%u]%c ", op->nframes,
179 (op->flags & RX_CHECK_DLC)?'d':' '); 186 (op->flags & RX_CHECK_DLC)?'d':' ');
180 if (op->kt_ival1.tv64) 187 if (op->kt_ival1.tv64)
181 seq_printf(m, "timeo=%lld ", 188 seq_printf(m, "timeo=%lld ",
@@ -198,7 +205,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
198 205
199 list_for_each_entry(op, &bo->tx_ops, list) { 206 list_for_each_entry(op, &bo->tx_ops, list) {
200 207
201 seq_printf(m, "tx_op: %03X %s [%d] ", 208 seq_printf(m, "tx_op: %03X %s [%u] ",
202 op->can_id, 209 op->can_id,
203 bcm_proc_getifname(ifname, op->ifindex), 210 bcm_proc_getifname(ifname, op->ifindex),
204 op->nframes); 211 op->nframes);
@@ -283,7 +290,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
283 struct can_frame *firstframe; 290 struct can_frame *firstframe;
284 struct sockaddr_can *addr; 291 struct sockaddr_can *addr;
285 struct sock *sk = op->sk; 292 struct sock *sk = op->sk;
286 int datalen = head->nframes * CFSIZ; 293 unsigned int datalen = head->nframes * CFSIZ;
287 int err; 294 int err;
288 295
289 skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); 296 skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
@@ -468,7 +475,7 @@ rx_changed_settime:
468 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly 475 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
469 * received data stored in op->last_frames[] 476 * received data stored in op->last_frames[]
470 */ 477 */
471static void bcm_rx_cmp_to_index(struct bcm_op *op, int index, 478static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
472 const struct can_frame *rxdata) 479 const struct can_frame *rxdata)
473{ 480{
474 /* 481 /*
@@ -554,7 +561,8 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
554/* 561/*
555 * bcm_rx_do_flush - helper for bcm_rx_thr_flush 562 * bcm_rx_do_flush - helper for bcm_rx_thr_flush
556 */ 563 */
557static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index) 564static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
565 unsigned int index)
558{ 566{
559 if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) { 567 if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
560 if (update) 568 if (update)
@@ -575,7 +583,7 @@ static int bcm_rx_thr_flush(struct bcm_op *op, int update)
575 int updated = 0; 583 int updated = 0;
576 584
577 if (op->nframes > 1) { 585 if (op->nframes > 1) {
578 int i; 586 unsigned int i;
579 587
580 /* for MUX filter we start at index 1 */ 588 /* for MUX filter we start at index 1 */
581 for (i = 1; i < op->nframes; i++) 589 for (i = 1; i < op->nframes; i++)
@@ -624,7 +632,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
624{ 632{
625 struct bcm_op *op = (struct bcm_op *)data; 633 struct bcm_op *op = (struct bcm_op *)data;
626 const struct can_frame *rxframe = (struct can_frame *)skb->data; 634 const struct can_frame *rxframe = (struct can_frame *)skb->data;
627 int i; 635 unsigned int i;
628 636
629 /* disable timeout */ 637 /* disable timeout */
630 hrtimer_cancel(&op->timer); 638 hrtimer_cancel(&op->timer);
@@ -822,14 +830,15 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
822{ 830{
823 struct bcm_sock *bo = bcm_sk(sk); 831 struct bcm_sock *bo = bcm_sk(sk);
824 struct bcm_op *op; 832 struct bcm_op *op;
825 int i, err; 833 unsigned int i;
834 int err;
826 835
827 /* we need a real device to send frames */ 836 /* we need a real device to send frames */
828 if (!ifindex) 837 if (!ifindex)
829 return -ENODEV; 838 return -ENODEV;
830 839
831 /* we need at least one can_frame */ 840 /* check nframes boundaries - we need at least one can_frame */
832 if (msg_head->nframes < 1) 841 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
833 return -EINVAL; 842 return -EINVAL;
834 843
835 /* check the given can_id */ 844 /* check the given can_id */
@@ -993,6 +1002,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
993 msg_head->nframes = 0; 1002 msg_head->nframes = 0;
994 } 1003 }
995 1004
1005 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
1006 if (msg_head->nframes > MAX_NFRAMES + 1)
1007 return -EINVAL;
1008
996 if ((msg_head->flags & RX_RTR_FRAME) && 1009 if ((msg_head->flags & RX_RTR_FRAME) &&
997 ((msg_head->nframes != 1) || 1010 ((msg_head->nframes != 1) ||
998 (!(msg_head->can_id & CAN_RTR_FLAG)))) 1011 (!(msg_head->can_id & CAN_RTR_FLAG))))
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 400a04d5c9a1..739435a6af39 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -29,6 +29,7 @@
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/keyctl.h> 30#include <linux/keyctl.h>
31#include <linux/err.h> 31#include <linux/err.h>
32#include <linux/seq_file.h>
32#include <keys/dns_resolver-type.h> 33#include <keys/dns_resolver-type.h>
33#include <keys/user-type.h> 34#include <keys/user-type.h>
34#include "internal.h" 35#include "internal.h"
@@ -43,6 +44,8 @@ MODULE_PARM_DESC(debug, "DNS Resolver debugging mask");
43 44
44const struct cred *dns_resolver_cache; 45const struct cred *dns_resolver_cache;
45 46
47#define DNS_ERRORNO_OPTION "dnserror"
48
46/* 49/*
47 * Instantiate a user defined key for dns_resolver. 50 * Instantiate a user defined key for dns_resolver.
48 * 51 *
@@ -59,9 +62,10 @@ static int
59dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen) 62dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen)
60{ 63{
61 struct user_key_payload *upayload; 64 struct user_key_payload *upayload;
65 unsigned long derrno;
62 int ret; 66 int ret;
63 size_t result_len = 0; 67 size_t result_len = 0;
64 const char *data = _data, *opt; 68 const char *data = _data, *end, *opt;
65 69
66 kenter("%%%d,%s,'%s',%zu", 70 kenter("%%%d,%s,'%s',%zu",
67 key->serial, key->description, data, datalen); 71 key->serial, key->description, data, datalen);
@@ -71,13 +75,77 @@ dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen)
71 datalen--; 75 datalen--;
72 76
73 /* deal with any options embedded in the data */ 77 /* deal with any options embedded in the data */
78 end = data + datalen;
74 opt = memchr(data, '#', datalen); 79 opt = memchr(data, '#', datalen);
75 if (!opt) { 80 if (!opt) {
76 kdebug("no options currently supported"); 81 /* no options: the entire data is the result */
77 return -EINVAL; 82 kdebug("no options");
83 result_len = datalen;
84 } else {
85 const char *next_opt;
86
87 result_len = opt - data;
88 opt++;
89 kdebug("options: '%s'", opt);
90 do {
91 const char *eq;
92 int opt_len, opt_nlen, opt_vlen, tmp;
93
94 next_opt = memchr(opt, '#', end - opt) ?: end;
95 opt_len = next_opt - opt;
96 if (!opt_len) {
97 printk(KERN_WARNING
98 "Empty option to dns_resolver key %d\n",
99 key->serial);
100 return -EINVAL;
101 }
102
103 eq = memchr(opt, '=', opt_len) ?: end;
104 opt_nlen = eq - opt;
105 eq++;
106 opt_vlen = next_opt - eq; /* will be -1 if no value */
107
108 tmp = opt_vlen >= 0 ? opt_vlen : 0;
109 kdebug("option '%*.*s' val '%*.*s'",
110 opt_nlen, opt_nlen, opt, tmp, tmp, eq);
111
112 /* see if it's an error number representing a DNS error
113 * that's to be recorded as the result in this key */
114 if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
115 memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
116 kdebug("dns error number option");
117 if (opt_vlen <= 0)
118 goto bad_option_value;
119
120 ret = strict_strtoul(eq, 10, &derrno);
121 if (ret < 0)
122 goto bad_option_value;
123
124 if (derrno < 1 || derrno > 511)
125 goto bad_option_value;
126
127 kdebug("dns error no. = %lu", derrno);
128 key->type_data.x[0] = -derrno;
129 continue;
130 }
131
132 bad_option_value:
133 printk(KERN_WARNING
134 "Option '%*.*s' to dns_resolver key %d:"
135 " bad/missing value\n",
136 opt_nlen, opt_nlen, opt, key->serial);
137 return -EINVAL;
138 } while (opt = next_opt + 1, opt < end);
139 }
140
141 /* don't cache the result if we're caching an error saying there's no
142 * result */
143 if (key->type_data.x[0]) {
144 kleave(" = 0 [h_error %ld]", key->type_data.x[0]);
145 return 0;
78 } 146 }
79 147
80 result_len = datalen; 148 kdebug("store result");
81 ret = key_payload_reserve(key, result_len); 149 ret = key_payload_reserve(key, result_len);
82 if (ret < 0) 150 if (ret < 0)
83 return -EINVAL; 151 return -EINVAL;
@@ -135,13 +203,27 @@ no_match:
135 return ret; 203 return ret;
136} 204}
137 205
206/*
207 * Describe a DNS key
208 */
209static void dns_resolver_describe(const struct key *key, struct seq_file *m)
210{
211 int err = key->type_data.x[0];
212
213 seq_puts(m, key->description);
214 if (err)
215 seq_printf(m, ": %d", err);
216 else
217 seq_printf(m, ": %u", key->datalen);
218}
219
138struct key_type key_type_dns_resolver = { 220struct key_type key_type_dns_resolver = {
139 .name = "dns_resolver", 221 .name = "dns_resolver",
140 .instantiate = dns_resolver_instantiate, 222 .instantiate = dns_resolver_instantiate,
141 .match = dns_resolver_match, 223 .match = dns_resolver_match,
142 .revoke = user_revoke, 224 .revoke = user_revoke,
143 .destroy = user_destroy, 225 .destroy = user_destroy,
144 .describe = user_describe, 226 .describe = dns_resolver_describe,
145 .read = user_read, 227 .read = user_read,
146}; 228};
147 229
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index 03d5255f5cf2..c32be292c7e3 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -136,6 +136,11 @@ int dns_query(const char *type, const char *name, size_t namelen,
136 if (ret < 0) 136 if (ret < 0)
137 goto put; 137 goto put;
138 138
139 /* If the DNS server gave an error, return that to the caller */
140 ret = rkey->type_data.x[0];
141 if (ret)
142 goto put;
143
139 upayload = rcu_dereference_protected(rkey->payload.data, 144 upayload = rcu_dereference_protected(rkey->payload.data,
140 lockdep_is_held(&rkey->sem)); 145 lockdep_is_held(&rkey->sem));
141 len = upayload->datalen; 146 len = upayload->datalen;
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 11201784d29a..87bb5f4de0e8 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -1,7 +1,7 @@
1menuconfig NET_DSA 1menuconfig NET_DSA
2 bool "Distributed Switch Architecture support" 2 bool "Distributed Switch Architecture support"
3 default n 3 default n
4 depends on EXPERIMENTAL && NET_ETHERNET && !S390 4 depends on EXPERIMENTAL && NETDEVICES && !S390
5 select PHYLIB 5 select PHYLIB
6 ---help--- 6 ---help---
7 This allows you to use hardware switch chips that use 7 This allows you to use hardware switch chips that use
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b9e8c3b7d406..408eea7086aa 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -150,22 +150,34 @@ int register_qdisc(struct Qdisc_ops *qops)
150 if (qops->enqueue == NULL) 150 if (qops->enqueue == NULL)
151 qops->enqueue = noop_qdisc_ops.enqueue; 151 qops->enqueue = noop_qdisc_ops.enqueue;
152 if (qops->peek == NULL) { 152 if (qops->peek == NULL) {
153 if (qops->dequeue == NULL) { 153 if (qops->dequeue == NULL)
154 qops->peek = noop_qdisc_ops.peek; 154 qops->peek = noop_qdisc_ops.peek;
155 } else { 155 else
156 rc = -EINVAL; 156 goto out_einval;
157 goto out;
158 }
159 } 157 }
160 if (qops->dequeue == NULL) 158 if (qops->dequeue == NULL)
161 qops->dequeue = noop_qdisc_ops.dequeue; 159 qops->dequeue = noop_qdisc_ops.dequeue;
162 160
161 if (qops->cl_ops) {
162 const struct Qdisc_class_ops *cops = qops->cl_ops;
163
164 if (!(cops->get && cops->put && cops->walk && cops->leaf))
165 goto out_einval;
166
167 if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
168 goto out_einval;
169 }
170
163 qops->next = NULL; 171 qops->next = NULL;
164 *qp = qops; 172 *qp = qops;
165 rc = 0; 173 rc = 0;
166out: 174out:
167 write_unlock(&qdisc_mod_lock); 175 write_unlock(&qdisc_mod_lock);
168 return rc; 176 return rc;
177
178out_einval:
179 rc = -EINVAL;
180 goto out;
169} 181}
170EXPORT_SYMBOL(register_qdisc); 182EXPORT_SYMBOL(register_qdisc);
171 183
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index e114f23d5eae..340662789529 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -418,7 +418,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
418 } 418 }
419 419
420 ret = qdisc_enqueue(skb, flow->q); 420 ret = qdisc_enqueue(skb, flow->q);
421 if (ret != 0) { 421 if (ret != NET_XMIT_SUCCESS) {
422drop: __maybe_unused 422drop: __maybe_unused
423 if (net_xmit_drop_count(ret)) { 423 if (net_xmit_drop_count(ret)) {
424 sch->qstats.drops++; 424 sch->qstats.drops++;
@@ -442,7 +442,7 @@ drop: __maybe_unused
442 */ 442 */
443 if (flow == &p->link) { 443 if (flow == &p->link) {
444 sch->q.qlen++; 444 sch->q.qlen++;
445 return 0; 445 return NET_XMIT_SUCCESS;
446 } 446 }
447 tasklet_schedule(&p->task); 447 tasklet_schedule(&p->task);
448 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 448 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 534f33231c17..201cbac2b32c 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -334,7 +334,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
334 if (++sch->q.qlen <= q->limit) { 334 if (++sch->q.qlen <= q->limit) {
335 sch->bstats.bytes += qdisc_pkt_len(skb); 335 sch->bstats.bytes += qdisc_pkt_len(skb);
336 sch->bstats.packets++; 336 sch->bstats.packets++;
337 return 0; 337 return NET_XMIT_SUCCESS;
338 } 338 }
339 339
340 sfq_drop(sch); 340 sfq_drop(sch);
@@ -508,6 +508,11 @@ nla_put_failure:
508 return -1; 508 return -1;
509} 509}
510 510
511static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
512{
513 return NULL;
514}
515
511static unsigned long sfq_get(struct Qdisc *sch, u32 classid) 516static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
512{ 517{
513 return 0; 518 return 0;
@@ -519,6 +524,10 @@ static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
519 return 0; 524 return 0;
520} 525}
521 526
527static void sfq_put(struct Qdisc *q, unsigned long cl)
528{
529}
530
522static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl) 531static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
523{ 532{
524 struct sfq_sched_data *q = qdisc_priv(sch); 533 struct sfq_sched_data *q = qdisc_priv(sch);
@@ -571,9 +580,12 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
571} 580}
572 581
573static const struct Qdisc_class_ops sfq_class_ops = { 582static const struct Qdisc_class_ops sfq_class_ops = {
583 .leaf = sfq_leaf,
574 .get = sfq_get, 584 .get = sfq_get,
585 .put = sfq_put,
575 .tcf_chain = sfq_find_tcf, 586 .tcf_chain = sfq_find_tcf,
576 .bind_tcf = sfq_bind, 587 .bind_tcf = sfq_bind,
588 .unbind_tcf = sfq_put,
577 .dump = sfq_dump_class, 589 .dump = sfq_dump_class,
578 .dump_stats = sfq_dump_class_stats, 590 .dump_stats = sfq_dump_class_stats,
579 .walk = sfq_walk, 591 .walk = sfq_walk,
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 0991c640cd3e..641a30d64635 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -127,7 +127,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
127 return qdisc_reshape_fail(skb, sch); 127 return qdisc_reshape_fail(skb, sch);
128 128
129 ret = qdisc_enqueue(skb, q->qdisc); 129 ret = qdisc_enqueue(skb, q->qdisc);
130 if (ret != 0) { 130 if (ret != NET_XMIT_SUCCESS) {
131 if (net_xmit_drop_count(ret)) 131 if (net_xmit_drop_count(ret))
132 sch->qstats.drops++; 132 sch->qstats.drops++;
133 return ret; 133 return ret;
@@ -136,7 +136,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
136 sch->q.qlen++; 136 sch->q.qlen++;
137 sch->bstats.bytes += qdisc_pkt_len(skb); 137 sch->bstats.bytes += qdisc_pkt_len(skb);
138 sch->bstats.packets++; 138 sch->bstats.packets++;
139 return 0; 139 return NET_XMIT_SUCCESS;
140} 140}
141 141
142static unsigned int tbf_drop(struct Qdisc* sch) 142static unsigned int tbf_drop(struct Qdisc* sch)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 807643bdcbac..feaabc103ce6 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -85,7 +85,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
85 __skb_queue_tail(&q->q, skb); 85 __skb_queue_tail(&q->q, skb);
86 sch->bstats.bytes += qdisc_pkt_len(skb); 86 sch->bstats.bytes += qdisc_pkt_len(skb);
87 sch->bstats.packets++; 87 sch->bstats.packets++;
88 return 0; 88 return NET_XMIT_SUCCESS;
89 } 89 }
90 90
91 kfree_skb(skb); 91 kfree_skb(skb);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index e74a1a2119d3..d1a3fb99fdf2 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -843,13 +843,19 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
843 return -EINVAL; 843 return -EINVAL;
844 if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) { 844 if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) {
845 /* Verify that we are associated with the destination AP */ 845 /* Verify that we are associated with the destination AP */
846 wdev_lock(wdev);
847
846 if (!wdev->current_bss || 848 if (!wdev->current_bss ||
847 memcmp(wdev->current_bss->pub.bssid, mgmt->bssid, 849 memcmp(wdev->current_bss->pub.bssid, mgmt->bssid,
848 ETH_ALEN) != 0 || 850 ETH_ALEN) != 0 ||
849 (wdev->iftype == NL80211_IFTYPE_STATION && 851 (wdev->iftype == NL80211_IFTYPE_STATION &&
850 memcmp(wdev->current_bss->pub.bssid, mgmt->da, 852 memcmp(wdev->current_bss->pub.bssid, mgmt->da,
851 ETH_ALEN) != 0)) 853 ETH_ALEN) != 0)) {
854 wdev_unlock(wdev);
852 return -ENOTCONN; 855 return -ENOTCONN;
856 }
857
858 wdev_unlock(wdev);
853 } 859 }
854 860
855 if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0) 861 if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0)
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 26f626d45a9e..41abb90df50d 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -157,9 +157,8 @@ all::
157# 157#
158# Define NO_DWARF if you do not want debug-info analysis feature at all. 158# Define NO_DWARF if you do not want debug-info analysis feature at all.
159 159
160$(shell sh -c 'mkdir -p $(OUTPUT)scripts/python/Perf-Trace-Util/' 2> /dev/null) 160$(shell sh -c 'mkdir -p $(OUTPUT)scripts/{perl,python}/Perf-Trace-Util/' 2> /dev/null)
161$(shell sh -c 'mkdir -p $(OUTPUT)scripts/perl/Perf-Trace-Util/' 2> /dev/null) 161$(shell sh -c 'mkdir -p $(OUTPUT)util/{ui/browsers,scripting-engines}/' 2> /dev/null)
162$(shell sh -c 'mkdir -p $(OUTPUT)util/scripting-engines/' 2> /dev/null)
163$(shell sh -c 'mkdir $(OUTPUT)bench' 2> /dev/null) 162$(shell sh -c 'mkdir $(OUTPUT)bench' 2> /dev/null)
164 163
165$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE 164$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
@@ -568,7 +567,20 @@ else
568 # Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h 567 # Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h
569 BASIC_CFLAGS += -I/usr/include/slang 568 BASIC_CFLAGS += -I/usr/include/slang
570 EXTLIBS += -lnewt -lslang 569 EXTLIBS += -lnewt -lslang
571 LIB_OBJS += $(OUTPUT)util/newt.o 570 LIB_OBJS += $(OUTPUT)util/ui/setup.o
571 LIB_OBJS += $(OUTPUT)util/ui/browser.o
572 LIB_OBJS += $(OUTPUT)util/ui/browsers/annotate.o
573 LIB_OBJS += $(OUTPUT)util/ui/browsers/hists.o
574 LIB_OBJS += $(OUTPUT)util/ui/browsers/map.o
575 LIB_OBJS += $(OUTPUT)util/ui/helpline.o
576 LIB_OBJS += $(OUTPUT)util/ui/progress.o
577 LIB_OBJS += $(OUTPUT)util/ui/util.o
578 LIB_H += util/ui/browser.h
579 LIB_H += util/ui/browsers/map.h
580 LIB_H += util/ui/helpline.h
581 LIB_H += util/ui/libslang.h
582 LIB_H += util/ui/progress.h
583 LIB_H += util/ui/util.h
572 endif 584 endif
573endif 585endif
574 586
@@ -966,7 +978,16 @@ $(OUTPUT)builtin-init-db.o: builtin-init-db.c $(OUTPUT)PERF-CFLAGS
966$(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS 978$(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
967 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< 979 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
968 980
969$(OUTPUT)util/newt.o: util/newt.c $(OUTPUT)PERF-CFLAGS 981$(OUTPUT)util/ui/browser.o: util/ui/browser.c $(OUTPUT)PERF-CFLAGS
982 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
983
984$(OUTPUT)util/ui/browsers/annotate.o: util/ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS
985 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
986
987$(OUTPUT)util/ui/browsers/hists.o: util/ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS
988 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
989
990$(OUTPUT)util/ui/browsers/map.o: util/ui/browsers/map.c $(OUTPUT)PERF-CFLAGS
970 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $< 991 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
971 992
972$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS 993$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index fd20670ce986..1478dc64bf15 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -285,7 +285,7 @@ static int hist_entry__tty_annotate(struct hist_entry *he)
285 LIST_HEAD(head); 285 LIST_HEAD(head);
286 struct objdump_line *pos, *n; 286 struct objdump_line *pos, *n;
287 287
288 if (hist_entry__annotate(he, &head) < 0) 288 if (hist_entry__annotate(he, &head, 0) < 0)
289 return -1; 289 return -1;
290 290
291 if (full_paths) 291 if (full_paths)
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 2f4b92925b26..55fc1f46892a 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -348,7 +348,18 @@ static int __cmd_report(void)
348 hists__tty_browse_tree(&session->hists_tree, help); 348 hists__tty_browse_tree(&session->hists_tree, help);
349 349
350out_delete: 350out_delete:
351 perf_session__delete(session); 351 /*
352 * Speed up the exit process, for large files this can
353 * take quite a while.
354 *
355 * XXX Enable this when using valgrind or if we ever
356 * librarize this command.
357 *
358 * Also experiment with obstacks to see how much speed
359 * up we'll get here.
360 *
361 * perf_session__delete(session);
362 */
352 return ret; 363 return ret;
353} 364}
354 365
@@ -478,8 +489,24 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
478 * so don't allocate extra space that won't be used in the stdio 489 * so don't allocate extra space that won't be used in the stdio
479 * implementation. 490 * implementation.
480 */ 491 */
481 if (use_browser > 0) 492 if (use_browser > 0) {
482 symbol_conf.priv_size = sizeof(struct sym_priv); 493 symbol_conf.priv_size = sizeof(struct sym_priv);
494 /*
495 * For searching by name on the "Browse map details".
496 * providing it only in verbose mode not to bloat too
497 * much struct symbol.
498 */
499 if (verbose) {
500 /*
501 * XXX: Need to provide a less kludgy way to ask for
502 * more space per symbol, the u32 is for the index on
503 * the ui browser.
504 * See symbol__browser_index.
505 */
506 symbol_conf.priv_size += sizeof(u32);
507 symbol_conf.sort_by_name = true;
508 }
509 }
483 510
484 if (symbol__init() < 0) 511 if (symbol__init() < 0)
485 return -1; 512 return -1;
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 5161619d4714..9bcc38f0b706 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -455,8 +455,8 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
455 if (p->current->state != TYPE_NONE) 455 if (p->current->state != TYPE_NONE)
456 pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp); 456 pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp);
457 457
458 p->current->state_since = timestamp; 458 p->current->state_since = timestamp;
459 p->current->state = TYPE_RUNNING; 459 p->current->state = TYPE_RUNNING;
460 } 460 }
461 461
462 if (prev_p->current) { 462 if (prev_p->current) {
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 294da725a57d..40a6a2992d15 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1,13 +1,16 @@
1#include "builtin.h" 1#include "builtin.h"
2 2
3#include "util/util.h" 3#include "perf.h"
4#include "util/cache.h" 4#include "util/cache.h"
5#include "util/debug.h"
6#include "util/exec_cmd.h"
7#include "util/header.h"
8#include "util/parse-options.h"
9#include "util/session.h"
5#include "util/symbol.h" 10#include "util/symbol.h"
6#include "util/thread.h" 11#include "util/thread.h"
7#include "util/header.h"
8#include "util/exec_cmd.h"
9#include "util/trace-event.h" 12#include "util/trace-event.h"
10#include "util/session.h" 13#include "util/util.h"
11 14
12static char const *script_name; 15static char const *script_name;
13static char const *generate_script_lang; 16static char const *generate_script_lang;
@@ -59,14 +62,6 @@ static int cleanup_scripting(void)
59 return scripting_ops->stop_script(); 62 return scripting_ops->stop_script();
60} 63}
61 64
62#include "util/parse-options.h"
63
64#include "perf.h"
65#include "util/debug.h"
66
67#include "util/trace-event.h"
68#include "util/exec_cmd.h"
69
70static char const *input_name = "perf.data"; 65static char const *input_name = "perf.data";
71 66
72static int process_sample_event(event_t *event, struct perf_session *session) 67static int process_sample_event(event_t *event, struct perf_session *session)
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 318dab15d177..f9c7e3ad1aa7 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -23,7 +23,7 @@ int eprintf(int level, const char *fmt, ...)
23 if (verbose >= level) { 23 if (verbose >= level) {
24 va_start(args, fmt); 24 va_start(args, fmt);
25 if (use_browser > 0) 25 if (use_browser > 0)
26 ret = browser__show_help(fmt, args); 26 ret = ui_helpline__show_help(fmt, args);
27 else 27 else
28 ret = vfprintf(stderr, fmt, args); 28 ret = vfprintf(stderr, fmt, args);
29 va_end(args); 29 va_end(args);
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index 047ac3324ebe..7a17ee061bcb 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -14,7 +14,7 @@ void trace_event(event_t *event);
14struct ui_progress; 14struct ui_progress;
15 15
16#ifdef NO_NEWT_SUPPORT 16#ifdef NO_NEWT_SUPPORT
17static inline int browser__show_help(const char *format __used, va_list ap __used) 17static inline int ui_helpline__show_help(const char *format __used, va_list ap __used)
18{ 18{
19 return 0; 19 return 0;
20} 20}
@@ -30,10 +30,9 @@ static inline void ui_progress__update(struct ui_progress *self __used,
30 30
31static inline void ui_progress__delete(struct ui_progress *self __used) {} 31static inline void ui_progress__delete(struct ui_progress *self __used) {}
32#else 32#else
33int browser__show_help(const char *format, va_list ap); 33extern char ui_helpline__last_msg[];
34struct ui_progress *ui_progress__new(const char *title, u64 total); 34int ui_helpline__show_help(const char *format, va_list ap);
35void ui_progress__update(struct ui_progress *self, u64 curr); 35#include "ui/progress.h"
36void ui_progress__delete(struct ui_progress *self);
37#endif 36#endif
38 37
39#endif /* __PERF_DEBUG_H */ 38#endif /* __PERF_DEBUG_H */
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index e7263d49bcf0..be22ae6ef055 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -876,6 +876,9 @@ unsigned int hists__sort_list_width(struct hists *self)
876 if (!se->elide) 876 if (!se->elide)
877 ret += 2 + hists__col_len(self, se->se_width_idx); 877 ret += 2 + hists__col_len(self, se->se_width_idx);
878 878
879 if (verbose) /* Addr + origin */
880 ret += 3 + BITS_PER_LONG / 4;
881
879 return ret; 882 return ret;
880} 883}
881 884
@@ -980,9 +983,9 @@ int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
980 return 0; 983 return 0;
981} 984}
982 985
983static struct objdump_line *objdump_line__new(s64 offset, char *line) 986static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize)
984{ 987{
985 struct objdump_line *self = malloc(sizeof(*self)); 988 struct objdump_line *self = malloc(sizeof(*self) + privsize);
986 989
987 if (self != NULL) { 990 if (self != NULL) {
988 self->offset = offset; 991 self->offset = offset;
@@ -1014,7 +1017,7 @@ struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
1014} 1017}
1015 1018
1016static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file, 1019static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
1017 struct list_head *head) 1020 struct list_head *head, size_t privsize)
1018{ 1021{
1019 struct symbol *sym = self->ms.sym; 1022 struct symbol *sym = self->ms.sym;
1020 struct objdump_line *objdump_line; 1023 struct objdump_line *objdump_line;
@@ -1065,7 +1068,7 @@ static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
1065 offset = -1; 1068 offset = -1;
1066 } 1069 }
1067 1070
1068 objdump_line = objdump_line__new(offset, line); 1071 objdump_line = objdump_line__new(offset, line, privsize);
1069 if (objdump_line == NULL) { 1072 if (objdump_line == NULL) {
1070 free(line); 1073 free(line);
1071 return -1; 1074 return -1;
@@ -1075,7 +1078,8 @@ static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
1075 return 0; 1078 return 0;
1076} 1079}
1077 1080
1078int hist_entry__annotate(struct hist_entry *self, struct list_head *head) 1081int hist_entry__annotate(struct hist_entry *self, struct list_head *head,
1082 size_t privsize)
1079{ 1083{
1080 struct symbol *sym = self->ms.sym; 1084 struct symbol *sym = self->ms.sym;
1081 struct map *map = self->ms.map; 1085 struct map *map = self->ms.map;
@@ -1140,7 +1144,7 @@ fallback:
1140 goto out_free_filename; 1144 goto out_free_filename;
1141 1145
1142 while (!feof(file)) 1146 while (!feof(file))
1143 if (hist_entry__parse_objdump_line(self, file, head) < 0) 1147 if (hist_entry__parse_objdump_line(self, file, head, privsize) < 0)
1144 break; 1148 break;
1145 1149
1146 pclose(file); 1150 pclose(file);
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 65a48db46a29..587d375d3430 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -101,7 +101,8 @@ size_t hists__fprintf(struct hists *self, struct hists *pair,
101 bool show_displacement, FILE *fp); 101 bool show_displacement, FILE *fp);
102 102
103int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip); 103int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip);
104int hist_entry__annotate(struct hist_entry *self, struct list_head *head); 104int hist_entry__annotate(struct hist_entry *self, struct list_head *head,
105 size_t privsize);
105 106
106void hists__filter_by_dso(struct hists *self, const struct dso *dso); 107void hists__filter_by_dso(struct hists *self, const struct dso *dso);
107void hists__filter_by_thread(struct hists *self, const struct thread *thread); 108void hists__filter_by_thread(struct hists *self, const struct thread *thread);
diff --git a/tools/perf/util/include/linux/list.h b/tools/perf/util/include/linux/list.h
index dbe4b814382a..f5ca26e53fbb 100644
--- a/tools/perf/util/include/linux/list.h
+++ b/tools/perf/util/include/linux/list.h
@@ -15,4 +15,12 @@ static inline void list_del_range(struct list_head *begin,
15 begin->prev->next = end->next; 15 begin->prev->next = end->next;
16 end->next->prev = begin->prev; 16 end->next->prev = begin->prev;
17} 17}
18
19/**
20 * list_for_each_from - iterate over a list from one of its nodes
21 * @pos: the &struct list_head to use as a loop cursor, from where to start
22 * @head: the head for your list.
23 */
24#define list_for_each_from(pos, head) \
25 for (; prefetch(pos->next), pos != (head); pos = pos->next)
18#endif 26#endif
diff --git a/tools/perf/util/include/linux/types.h b/tools/perf/util/include/linux/types.h
index 196862a81a21..12de3b8112f9 100644
--- a/tools/perf/util/include/linux/types.h
+++ b/tools/perf/util/include/linux/types.h
@@ -6,4 +6,16 @@
6#define DECLARE_BITMAP(name,bits) \ 6#define DECLARE_BITMAP(name,bits) \
7 unsigned long name[BITS_TO_LONGS(bits)] 7 unsigned long name[BITS_TO_LONGS(bits)]
8 8
9struct list_head {
10 struct list_head *next, *prev;
11};
12
13struct hlist_head {
14 struct hlist_node *first;
15};
16
17struct hlist_node {
18 struct hlist_node *next, **pprev;
19};
20
9#endif 21#endif
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 2e665cb84055..e72f05c3bef0 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -1606,8 +1606,10 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
1606 1606
1607 /* Init vmlinux path */ 1607 /* Init vmlinux path */
1608 ret = init_vmlinux(); 1608 ret = init_vmlinux();
1609 if (ret < 0) 1609 if (ret < 0) {
1610 free(pkgs);
1610 return ret; 1611 return ret;
1612 }
1611 1613
1612 /* Loop 1: convert all events */ 1614 /* Loop 1: convert all events */
1613 for (i = 0; i < npevs; i++) { 1615 for (i = 0; i < npevs; i++) {
@@ -1625,10 +1627,13 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
1625 ret = __add_probe_trace_events(pkgs[i].pev, pkgs[i].tevs, 1627 ret = __add_probe_trace_events(pkgs[i].pev, pkgs[i].tevs,
1626 pkgs[i].ntevs, force_add); 1628 pkgs[i].ntevs, force_add);
1627end: 1629end:
1628 /* Loop 3: cleanup trace events */ 1630 /* Loop 3: cleanup and free trace events */
1629 for (i = 0; i < npevs; i++) 1631 for (i = 0; i < npevs; i++) {
1630 for (j = 0; j < pkgs[i].ntevs; j++) 1632 for (j = 0; j < pkgs[i].ntevs; j++)
1631 clear_probe_trace_event(&pkgs[i].tevs[j]); 1633 clear_probe_trace_event(&pkgs[i].tevs[j]);
1634 free(pkgs[i].tevs);
1635 }
1636 free(pkgs);
1632 1637
1633 return ret; 1638 return ret;
1634} 1639}
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 840f1aabbb74..525136684d4e 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -33,7 +33,6 @@
33#include <ctype.h> 33#include <ctype.h>
34#include <dwarf-regs.h> 34#include <dwarf-regs.h>
35 35
36#include "string.h"
37#include "event.h" 36#include "event.h"
38#include "debug.h" 37#include "debug.h"
39#include "util.h" 38#include "util.h"
@@ -706,8 +705,12 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
706 pf->tvar->value = strdup(pf->pvar->var); 705 pf->tvar->value = strdup(pf->pvar->var);
707 if (pf->tvar->value == NULL) 706 if (pf->tvar->value == NULL)
708 return -ENOMEM; 707 return -ENOMEM;
709 else 708 if (pf->pvar->type) {
710 return 0; 709 pf->tvar->type = strdup(pf->pvar->type);
710 if (pf->tvar->type == NULL)
711 return -ENOMEM;
712 }
713 return 0;
711 } 714 }
712 715
713 pr_debug("Searching '%s' variable in context.\n", 716 pr_debug("Searching '%s' variable in context.\n",
diff --git a/tools/perf/util/pstack.h b/tools/perf/util/pstack.h
index 5ad07023504b..4cedea59f518 100644
--- a/tools/perf/util/pstack.h
+++ b/tools/perf/util/pstack.h
@@ -1,6 +1,8 @@
1#ifndef _PERF_PSTACK_ 1#ifndef _PERF_PSTACK_
2#define _PERF_PSTACK_ 2#define _PERF_PSTACK_
3 3
4#include <stdbool.h>
5
4struct pstack; 6struct pstack;
5struct pstack *pstack__new(unsigned short max_nr_entries); 7struct pstack *pstack__new(unsigned short max_nr_entries);
6void pstack__delete(struct pstack *self); 8void pstack__delete(struct pstack *self);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 1c61a4f4aa8a..b62a553cc67d 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -196,7 +196,8 @@ static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
196 196
197 if (verbose) { 197 if (verbose) {
198 char o = self->ms.map ? dso__symtab_origin(self->ms.map->dso) : '!'; 198 char o = self->ms.map ? dso__symtab_origin(self->ms.map->dso) : '!';
199 ret += repsep_snprintf(bf, size, "%#018llx %c ", self->ip, o); 199 ret += repsep_snprintf(bf, size, "%*Lx %c ",
200 BITS_PER_LONG / 4, self->ip, o);
200 } 201 }
201 202
202 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", self->level); 203 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", self->level);
@@ -204,7 +205,8 @@ static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
204 ret += repsep_snprintf(bf + ret, size - ret, "%s", 205 ret += repsep_snprintf(bf + ret, size - ret, "%s",
205 self->ms.sym->name); 206 self->ms.sym->name);
206 else 207 else
207 ret += repsep_snprintf(bf + ret, size - ret, "%#016llx", self->ip); 208 ret += repsep_snprintf(bf + ret, size - ret, "%*Lx",
209 BITS_PER_LONG / 4, self->ip);
208 210
209 return ret; 211 return ret;
210} 212}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 6f0dd90c36ce..1a367734e016 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -131,7 +131,8 @@ static void map_groups__fixup_end(struct map_groups *self)
131 __map_groups__fixup_end(self, i); 131 __map_groups__fixup_end(self, i);
132} 132}
133 133
134static struct symbol *symbol__new(u64 start, u64 len, const char *name) 134static struct symbol *symbol__new(u64 start, u64 len, u8 binding,
135 const char *name)
135{ 136{
136 size_t namelen = strlen(name) + 1; 137 size_t namelen = strlen(name) + 1;
137 struct symbol *self = calloc(1, (symbol_conf.priv_size + 138 struct symbol *self = calloc(1, (symbol_conf.priv_size +
@@ -144,6 +145,7 @@ static struct symbol *symbol__new(u64 start, u64 len, const char *name)
144 145
145 self->start = start; 146 self->start = start;
146 self->end = len ? start + len - 1 : start; 147 self->end = len ? start + len - 1 : start;
148 self->binding = binding;
147 self->namelen = namelen - 1; 149 self->namelen = namelen - 1;
148 150
149 pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end); 151 pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end);
@@ -160,8 +162,11 @@ void symbol__delete(struct symbol *self)
160 162
161static size_t symbol__fprintf(struct symbol *self, FILE *fp) 163static size_t symbol__fprintf(struct symbol *self, FILE *fp)
162{ 164{
163 return fprintf(fp, " %llx-%llx %s\n", 165 return fprintf(fp, " %llx-%llx %c %s\n",
164 self->start, self->end, self->name); 166 self->start, self->end,
167 self->binding == STB_GLOBAL ? 'g' :
168 self->binding == STB_LOCAL ? 'l' : 'w',
169 self->name);
165} 170}
166 171
167void dso__set_long_name(struct dso *self, char *name) 172void dso__set_long_name(struct dso *self, char *name)
@@ -453,6 +458,14 @@ struct process_kallsyms_args {
453 struct dso *dso; 458 struct dso *dso;
454}; 459};
455 460
461static u8 kallsyms2elf_type(char type)
462{
463 if (type == 'W')
464 return STB_WEAK;
465
466 return isupper(type) ? STB_GLOBAL : STB_LOCAL;
467}
468
456static int map__process_kallsym_symbol(void *arg, const char *name, 469static int map__process_kallsym_symbol(void *arg, const char *name,
457 char type, u64 start) 470 char type, u64 start)
458{ 471{
@@ -466,7 +479,7 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
466 /* 479 /*
467 * Will fix up the end later, when we have all symbols sorted. 480 * Will fix up the end later, when we have all symbols sorted.
468 */ 481 */
469 sym = symbol__new(start, 0, name); 482 sym = symbol__new(start, 0, kallsyms2elf_type(type), name);
470 483
471 if (sym == NULL) 484 if (sym == NULL)
472 return -ENOMEM; 485 return -ENOMEM;
@@ -661,7 +674,7 @@ static int dso__load_perf_map(struct dso *self, struct map *map,
661 if (len + 2 >= line_len) 674 if (len + 2 >= line_len)
662 continue; 675 continue;
663 676
664 sym = symbol__new(start, size, line + len); 677 sym = symbol__new(start, size, STB_GLOBAL, line + len);
665 678
666 if (sym == NULL) 679 if (sym == NULL)
667 goto out_delete_line; 680 goto out_delete_line;
@@ -873,7 +886,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map,
873 "%s@plt", elf_sym__name(&sym, symstrs)); 886 "%s@plt", elf_sym__name(&sym, symstrs));
874 887
875 f = symbol__new(plt_offset, shdr_plt.sh_entsize, 888 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
876 sympltname); 889 STB_GLOBAL, sympltname);
877 if (!f) 890 if (!f)
878 goto out_elf_end; 891 goto out_elf_end;
879 892
@@ -895,7 +908,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map,
895 "%s@plt", elf_sym__name(&sym, symstrs)); 908 "%s@plt", elf_sym__name(&sym, symstrs));
896 909
897 f = symbol__new(plt_offset, shdr_plt.sh_entsize, 910 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
898 sympltname); 911 STB_GLOBAL, sympltname);
899 if (!f) 912 if (!f)
900 goto out_elf_end; 913 goto out_elf_end;
901 914
@@ -1066,6 +1079,16 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
1066 if (!is_label && !elf_sym__is_a(&sym, map->type)) 1079 if (!is_label && !elf_sym__is_a(&sym, map->type))
1067 continue; 1080 continue;
1068 1081
1082 /* Reject ARM ELF "mapping symbols": these aren't unique and
1083 * don't identify functions, so will confuse the profile
1084 * output: */
1085 if (ehdr.e_machine == EM_ARM) {
1086 if (!strcmp(elf_name, "$a") ||
1087 !strcmp(elf_name, "$d") ||
1088 !strcmp(elf_name, "$t"))
1089 continue;
1090 }
1091
1069 if (opdsec && sym.st_shndx == opdidx) { 1092 if (opdsec && sym.st_shndx == opdidx) {
1070 u32 offset = sym.st_value - opdshdr.sh_addr; 1093 u32 offset = sym.st_value - opdshdr.sh_addr;
1071 u64 *opd = opddata->d_buf + offset; 1094 u64 *opd = opddata->d_buf + offset;
@@ -1146,7 +1169,8 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
1146 if (demangled != NULL) 1169 if (demangled != NULL)
1147 elf_name = demangled; 1170 elf_name = demangled;
1148new_symbol: 1171new_symbol:
1149 f = symbol__new(sym.st_value, sym.st_size, elf_name); 1172 f = symbol__new(sym.st_value, sym.st_size,
1173 GELF_ST_BIND(sym.st_info), elf_name);
1150 free(demangled); 1174 free(demangled);
1151 if (!f) 1175 if (!f)
1152 goto out_elf_end; 1176 goto out_elf_end;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 906be20011d9..b7a8da4af5a0 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -53,6 +53,7 @@ struct symbol {
53 u64 start; 53 u64 start;
54 u64 end; 54 u64 end;
55 u16 namelen; 55 u16 namelen;
56 u8 binding;
56 char name[0]; 57 char name[0];
57}; 58};
58 59
diff --git a/tools/perf/util/ui/browser.c b/tools/perf/util/ui/browser.c
new file mode 100644
index 000000000000..66f2d583d8c4
--- /dev/null
+++ b/tools/perf/util/ui/browser.c
@@ -0,0 +1,329 @@
1#define _GNU_SOURCE
2#include <stdio.h>
3#undef _GNU_SOURCE
4/*
5 * slang versions <= 2.0.6 have a "#if HAVE_LONG_LONG" that breaks
6 * the build if it isn't defined. Use the equivalent one that glibc
7 * has on features.h.
8 */
9#include <features.h>
10#ifndef HAVE_LONG_LONG
11#define HAVE_LONG_LONG __GLIBC_HAVE_LONG_LONG
12#endif
13#include <slang.h>
14#include <linux/list.h>
15#include <linux/rbtree.h>
16#include <stdlib.h>
17#include <sys/ttydefaults.h>
18#include "browser.h"
19#include "helpline.h"
20#include "../color.h"
21#include "../util.h"
22
23#if SLANG_VERSION < 20104
24#define sltt_set_color(obj, name, fg, bg) \
25 SLtt_set_color(obj,(char *)name, (char *)fg, (char *)bg)
26#else
27#define sltt_set_color SLtt_set_color
28#endif
29
30newtComponent newt_form__new(void);
31
32int ui_browser__percent_color(double percent, bool current)
33{
34 if (current)
35 return HE_COLORSET_SELECTED;
36 if (percent >= MIN_RED)
37 return HE_COLORSET_TOP;
38 if (percent >= MIN_GREEN)
39 return HE_COLORSET_MEDIUM;
40 return HE_COLORSET_NORMAL;
41}
42
43void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence)
44{
45 struct list_head *head = self->entries;
46 struct list_head *pos;
47
48 switch (whence) {
49 case SEEK_SET:
50 pos = head->next;
51 break;
52 case SEEK_CUR:
53 pos = self->top;
54 break;
55 case SEEK_END:
56 pos = head->prev;
57 break;
58 default:
59 return;
60 }
61
62 if (offset > 0) {
63 while (offset-- != 0)
64 pos = pos->next;
65 } else {
66 while (offset++ != 0)
67 pos = pos->prev;
68 }
69
70 self->top = pos;
71}
72
73void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence)
74{
75 struct rb_root *root = self->entries;
76 struct rb_node *nd;
77
78 switch (whence) {
79 case SEEK_SET:
80 nd = rb_first(root);
81 break;
82 case SEEK_CUR:
83 nd = self->top;
84 break;
85 case SEEK_END:
86 nd = rb_last(root);
87 break;
88 default:
89 return;
90 }
91
92 if (offset > 0) {
93 while (offset-- != 0)
94 nd = rb_next(nd);
95 } else {
96 while (offset++ != 0)
97 nd = rb_prev(nd);
98 }
99
100 self->top = nd;
101}
102
103unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self)
104{
105 struct rb_node *nd;
106 int row = 0;
107
108 if (self->top == NULL)
109 self->top = rb_first(self->entries);
110
111 nd = self->top;
112
113 while (nd != NULL) {
114 SLsmg_gotorc(self->y + row, self->x);
115 self->write(self, nd, row);
116 if (++row == self->height)
117 break;
118 nd = rb_next(nd);
119 }
120
121 return row;
122}
123
124bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row)
125{
126 return self->top_idx + row == self->index;
127}
128
129void ui_browser__refresh_dimensions(struct ui_browser *self)
130{
131 int cols, rows;
132 newtGetScreenSize(&cols, &rows);
133
134 if (self->width > cols - 4)
135 self->width = cols - 4;
136 self->height = rows - 5;
137 if (self->height > self->nr_entries)
138 self->height = self->nr_entries;
139 self->y = (rows - self->height) / 2;
140 self->x = (cols - self->width) / 2;
141}
142
143void ui_browser__reset_index(struct ui_browser *self)
144{
145 self->index = self->top_idx = 0;
146 self->seek(self, 0, SEEK_SET);
147}
148
149int ui_browser__show(struct ui_browser *self, const char *title,
150 const char *helpline, ...)
151{
152 va_list ap;
153
154 if (self->form != NULL) {
155 newtFormDestroy(self->form);
156 newtPopWindow();
157 }
158 ui_browser__refresh_dimensions(self);
159 newtCenteredWindow(self->width, self->height, title);
160 self->form = newt_form__new();
161 if (self->form == NULL)
162 return -1;
163
164 self->sb = newtVerticalScrollbar(self->width, 0, self->height,
165 HE_COLORSET_NORMAL,
166 HE_COLORSET_SELECTED);
167 if (self->sb == NULL)
168 return -1;
169
170 newtFormAddHotKey(self->form, NEWT_KEY_UP);
171 newtFormAddHotKey(self->form, NEWT_KEY_DOWN);
172 newtFormAddHotKey(self->form, NEWT_KEY_PGUP);
173 newtFormAddHotKey(self->form, NEWT_KEY_PGDN);
174 newtFormAddHotKey(self->form, NEWT_KEY_HOME);
175 newtFormAddHotKey(self->form, NEWT_KEY_END);
176 newtFormAddHotKey(self->form, ' ');
177 newtFormAddComponent(self->form, self->sb);
178
179 va_start(ap, helpline);
180 ui_helpline__vpush(helpline, ap);
181 va_end(ap);
182 return 0;
183}
184
185void ui_browser__hide(struct ui_browser *self)
186{
187 newtFormDestroy(self->form);
188 newtPopWindow();
189 self->form = NULL;
190 ui_helpline__pop();
191}
192
193int ui_browser__refresh(struct ui_browser *self)
194{
195 int row;
196
197 newtScrollbarSet(self->sb, self->index, self->nr_entries - 1);
198 row = self->refresh(self);
199 SLsmg_set_color(HE_COLORSET_NORMAL);
200 SLsmg_fill_region(self->y + row, self->x,
201 self->height - row, self->width, ' ');
202
203 return 0;
204}
205
206int ui_browser__run(struct ui_browser *self, struct newtExitStruct *es)
207{
208 if (ui_browser__refresh(self) < 0)
209 return -1;
210
211 while (1) {
212 off_t offset;
213
214 newtFormRun(self->form, es);
215
216 if (es->reason != NEWT_EXIT_HOTKEY)
217 break;
218 if (is_exit_key(es->u.key))
219 return es->u.key;
220 switch (es->u.key) {
221 case NEWT_KEY_DOWN:
222 if (self->index == self->nr_entries - 1)
223 break;
224 ++self->index;
225 if (self->index == self->top_idx + self->height) {
226 ++self->top_idx;
227 self->seek(self, +1, SEEK_CUR);
228 }
229 break;
230 case NEWT_KEY_UP:
231 if (self->index == 0)
232 break;
233 --self->index;
234 if (self->index < self->top_idx) {
235 --self->top_idx;
236 self->seek(self, -1, SEEK_CUR);
237 }
238 break;
239 case NEWT_KEY_PGDN:
240 case ' ':
241 if (self->top_idx + self->height > self->nr_entries - 1)
242 break;
243
244 offset = self->height;
245 if (self->index + offset > self->nr_entries - 1)
246 offset = self->nr_entries - 1 - self->index;
247 self->index += offset;
248 self->top_idx += offset;
249 self->seek(self, +offset, SEEK_CUR);
250 break;
251 case NEWT_KEY_PGUP:
252 if (self->top_idx == 0)
253 break;
254
255 if (self->top_idx < self->height)
256 offset = self->top_idx;
257 else
258 offset = self->height;
259
260 self->index -= offset;
261 self->top_idx -= offset;
262 self->seek(self, -offset, SEEK_CUR);
263 break;
264 case NEWT_KEY_HOME:
265 ui_browser__reset_index(self);
266 break;
267 case NEWT_KEY_END:
268 offset = self->height - 1;
269 if (offset >= self->nr_entries)
270 offset = self->nr_entries - 1;
271
272 self->index = self->nr_entries - 1;
273 self->top_idx = self->index - offset;
274 self->seek(self, -offset, SEEK_END);
275 break;
276 default:
277 return es->u.key;
278 }
279 if (ui_browser__refresh(self) < 0)
280 return -1;
281 }
282 return 0;
283}
284
285unsigned int ui_browser__list_head_refresh(struct ui_browser *self)
286{
287 struct list_head *pos;
288 struct list_head *head = self->entries;
289 int row = 0;
290
291 if (self->top == NULL || self->top == self->entries)
292 self->top = head->next;
293
294 pos = self->top;
295
296 list_for_each_from(pos, head) {
297 SLsmg_gotorc(self->y + row, self->x);
298 self->write(self, pos, row);
299 if (++row == self->height)
300 break;
301 }
302
303 return row;
304}
305
306static struct newtPercentTreeColors {
307 const char *topColorFg, *topColorBg;
308 const char *mediumColorFg, *mediumColorBg;
309 const char *normalColorFg, *normalColorBg;
310 const char *selColorFg, *selColorBg;
311 const char *codeColorFg, *codeColorBg;
312} defaultPercentTreeColors = {
313 "red", "lightgray",
314 "green", "lightgray",
315 "black", "lightgray",
316 "lightgray", "magenta",
317 "blue", "lightgray",
318};
319
320void ui_browser__init(void)
321{
322 struct newtPercentTreeColors *c = &defaultPercentTreeColors;
323
324 sltt_set_color(HE_COLORSET_TOP, NULL, c->topColorFg, c->topColorBg);
325 sltt_set_color(HE_COLORSET_MEDIUM, NULL, c->mediumColorFg, c->mediumColorBg);
326 sltt_set_color(HE_COLORSET_NORMAL, NULL, c->normalColorFg, c->normalColorBg);
327 sltt_set_color(HE_COLORSET_SELECTED, NULL, c->selColorFg, c->selColorBg);
328 sltt_set_color(HE_COLORSET_CODE, NULL, c->codeColorFg, c->codeColorBg);
329}
diff --git a/tools/perf/util/ui/browser.h b/tools/perf/util/ui/browser.h
new file mode 100644
index 000000000000..0b9f829214f7
--- /dev/null
+++ b/tools/perf/util/ui/browser.h
@@ -0,0 +1,46 @@
1#ifndef _PERF_UI_BROWSER_H_
2#define _PERF_UI_BROWSER_H_ 1
3
4#include <stdbool.h>
5#include <newt.h>
6#include <sys/types.h>
7#include "../types.h"
8
9#define HE_COLORSET_TOP 50
10#define HE_COLORSET_MEDIUM 51
11#define HE_COLORSET_NORMAL 52
12#define HE_COLORSET_SELECTED 53
13#define HE_COLORSET_CODE 54
14
15struct ui_browser {
16 newtComponent form, sb;
17 u64 index, top_idx;
18 void *top, *entries;
19 u16 y, x, width, height;
20 void *priv;
21 unsigned int (*refresh)(struct ui_browser *self);
22 void (*write)(struct ui_browser *self, void *entry, int row);
23 void (*seek)(struct ui_browser *self, off_t offset, int whence);
24 u32 nr_entries;
25};
26
27
28int ui_browser__percent_color(double percent, bool current);
29bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row);
30void ui_browser__refresh_dimensions(struct ui_browser *self);
31void ui_browser__reset_index(struct ui_browser *self);
32
33int ui_browser__show(struct ui_browser *self, const char *title,
34 const char *helpline, ...);
35void ui_browser__hide(struct ui_browser *self);
36int ui_browser__refresh(struct ui_browser *self);
37int ui_browser__run(struct ui_browser *self, struct newtExitStruct *es);
38
39void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence);
40unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self);
41
42void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence);
43unsigned int ui_browser__list_head_refresh(struct ui_browser *self);
44
45void ui_browser__init(void);
46#endif /* _PERF_UI_BROWSER_H_ */
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c
new file mode 100644
index 000000000000..55ff792459ac
--- /dev/null
+++ b/tools/perf/util/ui/browsers/annotate.c
@@ -0,0 +1,240 @@
1#include "../browser.h"
2#include "../helpline.h"
3#include "../libslang.h"
4#include "../../hist.h"
5#include "../../sort.h"
6#include "../../symbol.h"
7
8static void ui__error_window(const char *fmt, ...)
9{
10 va_list ap;
11
12 va_start(ap, fmt);
13 newtWinMessagev((char *)"Error", (char *)"Ok", (char *)fmt, ap);
14 va_end(ap);
15}
16
17struct annotate_browser {
18 struct ui_browser b;
19 struct rb_root entries;
20 struct rb_node *curr_hot;
21};
22
23struct objdump_line_rb_node {
24 struct rb_node rb_node;
25 double percent;
26 u32 idx;
27};
28
29static inline
30struct objdump_line_rb_node *objdump_line__rb(struct objdump_line *self)
31{
32 return (struct objdump_line_rb_node *)(self + 1);
33}
34
35static void annotate_browser__write(struct ui_browser *self, void *entry, int row)
36{
37 struct objdump_line *ol = rb_entry(entry, struct objdump_line, node);
38 bool current_entry = ui_browser__is_current_entry(self, row);
39 int width = self->width;
40
41 if (ol->offset != -1) {
42 struct objdump_line_rb_node *olrb = objdump_line__rb(ol);
43 int color = ui_browser__percent_color(olrb->percent, current_entry);
44 SLsmg_set_color(color);
45 slsmg_printf(" %7.2f ", olrb->percent);
46 if (!current_entry)
47 SLsmg_set_color(HE_COLORSET_CODE);
48 } else {
49 int color = ui_browser__percent_color(0, current_entry);
50 SLsmg_set_color(color);
51 slsmg_write_nstring(" ", 9);
52 }
53
54 SLsmg_write_char(':');
55 slsmg_write_nstring(" ", 8);
56 if (!*ol->line)
57 slsmg_write_nstring(" ", width - 18);
58 else
59 slsmg_write_nstring(ol->line, width - 18);
60}
61
62static double objdump_line__calc_percent(struct objdump_line *self,
63 struct list_head *head,
64 struct symbol *sym)
65{
66 double percent = 0.0;
67
68 if (self->offset != -1) {
69 int len = sym->end - sym->start;
70 unsigned int hits = 0;
71 struct sym_priv *priv = symbol__priv(sym);
72 struct sym_ext *sym_ext = priv->ext;
73 struct sym_hist *h = priv->hist;
74 s64 offset = self->offset;
75 struct objdump_line *next = objdump__get_next_ip_line(head, self);
76
77
78 while (offset < (s64)len &&
79 (next == NULL || offset < next->offset)) {
80 if (sym_ext) {
81 percent += sym_ext[offset].percent;
82 } else
83 hits += h->ip[offset];
84
85 ++offset;
86 }
87
88 if (sym_ext == NULL && h->sum)
89 percent = 100.0 * hits / h->sum;
90 }
91
92 return percent;
93}
94
95static void objdump__insert_line(struct rb_root *self,
96 struct objdump_line_rb_node *line)
97{
98 struct rb_node **p = &self->rb_node;
99 struct rb_node *parent = NULL;
100 struct objdump_line_rb_node *l;
101
102 while (*p != NULL) {
103 parent = *p;
104 l = rb_entry(parent, struct objdump_line_rb_node, rb_node);
105 if (line->percent < l->percent)
106 p = &(*p)->rb_left;
107 else
108 p = &(*p)->rb_right;
109 }
110 rb_link_node(&line->rb_node, parent, p);
111 rb_insert_color(&line->rb_node, self);
112}
113
114static void annotate_browser__set_top(struct annotate_browser *self,
115 struct rb_node *nd)
116{
117 struct objdump_line_rb_node *rbpos;
118 struct objdump_line *pos;
119 unsigned back;
120
121 ui_browser__refresh_dimensions(&self->b);
122 back = self->b.height / 2;
123 rbpos = rb_entry(nd, struct objdump_line_rb_node, rb_node);
124 pos = ((struct objdump_line *)rbpos) - 1;
125 self->b.top_idx = self->b.index = rbpos->idx;
126
127 while (self->b.top_idx != 0 && back != 0) {
128 pos = list_entry(pos->node.prev, struct objdump_line, node);
129
130 --self->b.top_idx;
131 --back;
132 }
133
134 self->b.top = pos;
135 self->curr_hot = nd;
136}
137
138static int annotate_browser__run(struct annotate_browser *self,
139 struct newtExitStruct *es)
140{
141 struct rb_node *nd;
142 struct hist_entry *he = self->b.priv;
143
144 if (ui_browser__show(&self->b, he->ms.sym->name,
145 "<- or ESC: exit, TAB/shift+TAB: cycle thru samples") < 0)
146 return -1;
147
148 newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
149
150 nd = self->curr_hot;
151 if (nd) {
152 newtFormAddHotKey(self->b.form, NEWT_KEY_TAB);
153 newtFormAddHotKey(self->b.form, NEWT_KEY_UNTAB);
154 }
155
156 while (1) {
157 ui_browser__run(&self->b, es);
158
159 if (es->reason != NEWT_EXIT_HOTKEY)
160 break;
161
162 switch (es->u.key) {
163 case NEWT_KEY_TAB:
164 nd = rb_prev(nd);
165 if (nd == NULL)
166 nd = rb_last(&self->entries);
167 annotate_browser__set_top(self, nd);
168 break;
169 case NEWT_KEY_UNTAB:
170 nd = rb_next(nd);
171 if (nd == NULL)
172 nd = rb_first(&self->entries);
173 annotate_browser__set_top(self, nd);
174 break;
175 default:
176 goto out;
177 }
178 }
179out:
180 ui_browser__hide(&self->b);
181 return 0;
182}
183
184int hist_entry__tui_annotate(struct hist_entry *self)
185{
186 struct newtExitStruct es;
187 struct objdump_line *pos, *n;
188 struct objdump_line_rb_node *rbpos;
189 LIST_HEAD(head);
190 struct annotate_browser browser = {
191 .b = {
192 .entries = &head,
193 .refresh = ui_browser__list_head_refresh,
194 .seek = ui_browser__list_head_seek,
195 .write = annotate_browser__write,
196 .priv = self,
197 },
198 };
199 int ret;
200
201 if (self->ms.sym == NULL)
202 return -1;
203
204 if (self->ms.map->dso->annotate_warned)
205 return -1;
206
207 if (hist_entry__annotate(self, &head, sizeof(*rbpos)) < 0) {
208 ui__error_window(ui_helpline__last_msg);
209 return -1;
210 }
211
212 ui_helpline__push("Press <- or ESC to exit");
213
214 list_for_each_entry(pos, &head, node) {
215 size_t line_len = strlen(pos->line);
216 if (browser.b.width < line_len)
217 browser.b.width = line_len;
218 rbpos = objdump_line__rb(pos);
219 rbpos->idx = browser.b.nr_entries++;
220 rbpos->percent = objdump_line__calc_percent(pos, &head, self->ms.sym);
221 if (rbpos->percent < 0.01)
222 continue;
223 objdump__insert_line(&browser.entries, rbpos);
224 }
225
226 /*
227 * Position the browser at the hottest line.
228 */
229 browser.curr_hot = rb_last(&browser.entries);
230 if (browser.curr_hot)
231 annotate_browser__set_top(&browser, browser.curr_hot);
232
233 browser.b.width += 18; /* Percentage */
234 ret = annotate_browser__run(&browser, &es);
235 list_for_each_entry_safe(pos, n, &head, node) {
236 list_del(&pos->node);
237 objdump_line__free(pos);
238 }
239 return ret;
240}
diff --git a/tools/perf/util/newt.c b/tools/perf/util/ui/browsers/hists.c
index 91de99b58445..dafdf6775d77 100644
--- a/tools/perf/util/newt.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -1,978 +1,272 @@
1#define _GNU_SOURCE 1#define _GNU_SOURCE
2#include <stdio.h> 2#include <stdio.h>
3#undef _GNU_SOURCE 3#undef _GNU_SOURCE
4/* 4#include "../libslang.h"
5 * slang versions <= 2.0.6 have a "#if HAVE_LONG_LONG" that breaks
6 * the build if it isn't defined. Use the equivalent one that glibc
7 * has on features.h.
8 */
9#include <features.h>
10#ifndef HAVE_LONG_LONG
11#define HAVE_LONG_LONG __GLIBC_HAVE_LONG_LONG
12#endif
13#include <slang.h>
14#include <signal.h>
15#include <stdlib.h> 5#include <stdlib.h>
6#include <string.h>
16#include <newt.h> 7#include <newt.h>
17#include <sys/ttydefaults.h> 8#include <linux/rbtree.h>
18
19#include "cache.h"
20#include "hist.h"
21#include "pstack.h"
22#include "session.h"
23#include "sort.h"
24#include "symbol.h"
25
26#if SLANG_VERSION < 20104
27#define slsmg_printf(msg, args...) SLsmg_printf((char *)msg, ##args)
28#define slsmg_write_nstring(msg, len) SLsmg_write_nstring((char *)msg, len)
29#define sltt_set_color(obj, name, fg, bg) SLtt_set_color(obj,(char *)name,\
30 (char *)fg, (char *)bg)
31#else
32#define slsmg_printf SLsmg_printf
33#define slsmg_write_nstring SLsmg_write_nstring
34#define sltt_set_color SLtt_set_color
35#endif
36
37struct ui_progress {
38 newtComponent form, scale;
39};
40
41struct ui_progress *ui_progress__new(const char *title, u64 total)
42{
43 struct ui_progress *self = malloc(sizeof(*self));
44
45 if (self != NULL) {
46 int cols;
47
48 if (use_browser <= 0)
49 return self;
50 newtGetScreenSize(&cols, NULL);
51 cols -= 4;
52 newtCenteredWindow(cols, 1, title);
53 self->form = newtForm(NULL, NULL, 0);
54 if (self->form == NULL)
55 goto out_free_self;
56 self->scale = newtScale(0, 0, cols, total);
57 if (self->scale == NULL)
58 goto out_free_form;
59 newtFormAddComponent(self->form, self->scale);
60 newtRefresh();
61 }
62
63 return self;
64
65out_free_form:
66 newtFormDestroy(self->form);
67out_free_self:
68 free(self);
69 return NULL;
70}
71
72void ui_progress__update(struct ui_progress *self, u64 curr)
73{
74 /*
75 * FIXME: We should have a per UI backend way of showing progress,
76 * stdio will just show a percentage as NN%, etc.
77 */
78 if (use_browser <= 0)
79 return;
80 newtScaleSet(self->scale, curr);
81 newtRefresh();
82}
83 9
84void ui_progress__delete(struct ui_progress *self) 10#include "../../hist.h"
85{ 11#include "../../pstack.h"
86 if (use_browser > 0) { 12#include "../../sort.h"
87 newtFormDestroy(self->form); 13#include "../../util.h"
88 newtPopWindow();
89 }
90 free(self);
91}
92 14
93static void ui_helpline__pop(void) 15#include "../browser.h"
94{ 16#include "../helpline.h"
95 newtPopHelpLine(); 17#include "../util.h"
96} 18#include "map.h"
97 19
98static void ui_helpline__push(const char *msg) 20struct hist_browser {
99{ 21 struct ui_browser b;
100 newtPushHelpLine(msg); 22 struct hists *hists;
101} 23 struct hist_entry *he_selection;
24 struct map_symbol *selection;
25};
102 26
103static void ui_helpline__vpush(const char *fmt, va_list ap) 27static void hist_browser__refresh_dimensions(struct hist_browser *self)
104{ 28{
105 char *s; 29 /* 3 == +/- toggle symbol before actual hist_entry rendering */
106 30 self->b.width = 3 + (hists__sort_list_width(self->hists) +
107 if (vasprintf(&s, fmt, ap) < 0) 31 sizeof("[k]"));
108 vfprintf(stderr, fmt, ap);
109 else {
110 ui_helpline__push(s);
111 free(s);
112 }
113} 32}
114 33
115static void ui_helpline__fpush(const char *fmt, ...) 34static void hist_browser__reset(struct hist_browser *self)
116{ 35{
117 va_list ap; 36 self->b.nr_entries = self->hists->nr_entries;
118 37 hist_browser__refresh_dimensions(self);
119 va_start(ap, fmt); 38 ui_browser__reset_index(&self->b);
120 ui_helpline__vpush(fmt, ap);
121 va_end(ap);
122} 39}
123 40
124static void ui_helpline__puts(const char *msg) 41static char tree__folded_sign(bool unfolded)
125{ 42{
126 ui_helpline__pop(); 43 return unfolded ? '-' : '+';
127 ui_helpline__push(msg);
128} 44}
129 45
130static char browser__last_msg[1024]; 46static char map_symbol__folded(const struct map_symbol *self)
131
132int browser__show_help(const char *format, va_list ap)
133{ 47{
134 int ret; 48 return self->has_children ? tree__folded_sign(self->unfolded) : ' ';
135 static int backlog;
136
137 ret = vsnprintf(browser__last_msg + backlog,
138 sizeof(browser__last_msg) - backlog, format, ap);
139 backlog += ret;
140
141 if (browser__last_msg[backlog - 1] == '\n') {
142 ui_helpline__puts(browser__last_msg);
143 newtRefresh();
144 backlog = 0;
145 }
146
147 return ret;
148} 49}
149 50
150static void newt_form__set_exit_keys(newtComponent self) 51static char hist_entry__folded(const struct hist_entry *self)
151{ 52{
152 newtFormAddHotKey(self, NEWT_KEY_LEFT); 53 return map_symbol__folded(&self->ms);
153 newtFormAddHotKey(self, NEWT_KEY_ESCAPE);
154 newtFormAddHotKey(self, 'Q');
155 newtFormAddHotKey(self, 'q');
156 newtFormAddHotKey(self, CTRL('c'));
157} 54}
158 55
159static newtComponent newt_form__new(void) 56static char callchain_list__folded(const struct callchain_list *self)
160{ 57{
161 newtComponent self = newtForm(NULL, NULL, 0); 58 return map_symbol__folded(&self->ms);
162 if (self)
163 newt_form__set_exit_keys(self);
164 return self;
165} 59}
166 60
167static int popup_menu(int argc, char * const argv[]) 61static int callchain_node__count_rows_rb_tree(struct callchain_node *self)
168{ 62{
169 struct newtExitStruct es; 63 int n = 0;
170 int i, rc = -1, max_len = 5; 64 struct rb_node *nd;
171 newtComponent listbox, form = newt_form__new();
172
173 if (form == NULL)
174 return -1;
175 65
176 listbox = newtListbox(0, 0, argc, NEWT_FLAG_RETURNEXIT); 66 for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
177 if (listbox == NULL) 67 struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
178 goto out_destroy_form; 68 struct callchain_list *chain;
69 char folded_sign = ' '; /* No children */
179 70
180 newtFormAddComponent(form, listbox); 71 list_for_each_entry(chain, &child->val, list) {
72 ++n;
73 /* We need this because we may not have children */
74 folded_sign = callchain_list__folded(chain);
75 if (folded_sign == '+')
76 break;
77 }
181 78
182 for (i = 0; i < argc; ++i) { 79 if (folded_sign == '-') /* Have children and they're unfolded */
183 int len = strlen(argv[i]); 80 n += callchain_node__count_rows_rb_tree(child);
184 if (len > max_len)
185 max_len = len;
186 if (newtListboxAddEntry(listbox, argv[i], (void *)(long)i))
187 goto out_destroy_form;
188 } 81 }
189 82
190 newtCenteredWindow(max_len, argc, NULL); 83 return n;
191 newtFormRun(form, &es);
192 rc = newtListboxGetCurrent(listbox) - NULL;
193 if (es.reason == NEWT_EXIT_HOTKEY)
194 rc = -1;
195 newtPopWindow();
196out_destroy_form:
197 newtFormDestroy(form);
198 return rc;
199} 84}
200 85
201static int ui__help_window(const char *text) 86static int callchain_node__count_rows(struct callchain_node *node)
202{ 87{
203 struct newtExitStruct es; 88 struct callchain_list *chain;
204 newtComponent tb, form = newt_form__new(); 89 bool unfolded = false;
205 int rc = -1; 90 int n = 0;
206 int max_len = 0, nr_lines = 0;
207 const char *t;
208
209 if (form == NULL)
210 return -1;
211 91
212 t = text; 92 list_for_each_entry(chain, &node->val, list) {
213 while (1) { 93 ++n;
214 const char *sep = strchr(t, '\n'); 94 unfolded = chain->ms.unfolded;
215 int len;
216
217 if (sep == NULL)
218 sep = strchr(t, '\0');
219 len = sep - t;
220 if (max_len < len)
221 max_len = len;
222 ++nr_lines;
223 if (*sep == '\0')
224 break;
225 t = sep + 1;
226 } 95 }
227 96
228 tb = newtTextbox(0, 0, max_len, nr_lines, 0); 97 if (unfolded)
229 if (tb == NULL) 98 n += callchain_node__count_rows_rb_tree(node);
230 goto out_destroy_form;
231
232 newtTextboxSetText(tb, text);
233 newtFormAddComponent(form, tb);
234 newtCenteredWindow(max_len, nr_lines, NULL);
235 newtFormRun(form, &es);
236 newtPopWindow();
237 rc = 0;
238out_destroy_form:
239 newtFormDestroy(form);
240 return rc;
241}
242
243static bool dialog_yesno(const char *msg)
244{
245 /* newtWinChoice should really be accepting const char pointers... */
246 char yes[] = "Yes", no[] = "No";
247 return newtWinChoice(NULL, yes, no, (char *)msg) == 1;
248}
249
250static void ui__error_window(const char *fmt, ...)
251{
252 va_list ap;
253
254 va_start(ap, fmt);
255 newtWinMessagev((char *)"Error", (char *)"Ok", (char *)fmt, ap);
256 va_end(ap);
257}
258
259#define HE_COLORSET_TOP 50
260#define HE_COLORSET_MEDIUM 51
261#define HE_COLORSET_NORMAL 52
262#define HE_COLORSET_SELECTED 53
263#define HE_COLORSET_CODE 54
264 99
265static int ui_browser__percent_color(double percent, bool current) 100 return n;
266{
267 if (current)
268 return HE_COLORSET_SELECTED;
269 if (percent >= MIN_RED)
270 return HE_COLORSET_TOP;
271 if (percent >= MIN_GREEN)
272 return HE_COLORSET_MEDIUM;
273 return HE_COLORSET_NORMAL;
274} 101}
275 102
276struct ui_browser { 103static int callchain__count_rows(struct rb_root *chain)
277 newtComponent form, sb;
278 u64 index, first_visible_entry_idx;
279 void *first_visible_entry, *entries;
280 u16 top, left, width, height;
281 void *priv;
282 unsigned int (*refresh_entries)(struct ui_browser *self);
283 void (*seek)(struct ui_browser *self,
284 off_t offset, int whence);
285 u32 nr_entries;
286};
287
288static void ui_browser__list_head_seek(struct ui_browser *self,
289 off_t offset, int whence)
290{ 104{
291 struct list_head *head = self->entries; 105 struct rb_node *nd;
292 struct list_head *pos; 106 int n = 0;
293
294 switch (whence) {
295 case SEEK_SET:
296 pos = head->next;
297 break;
298 case SEEK_CUR:
299 pos = self->first_visible_entry;
300 break;
301 case SEEK_END:
302 pos = head->prev;
303 break;
304 default:
305 return;
306 }
307 107
308 if (offset > 0) { 108 for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
309 while (offset-- != 0) 109 struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
310 pos = pos->next; 110 n += callchain_node__count_rows(node);
311 } else {
312 while (offset++ != 0)
313 pos = pos->prev;
314 } 111 }
315 112
316 self->first_visible_entry = pos; 113 return n;
317}
318
319static bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row)
320{
321 return (self->first_visible_entry_idx + row) == self->index;
322} 114}
323 115
324static void ui_browser__refresh_dimensions(struct ui_browser *self) 116static bool map_symbol__toggle_fold(struct map_symbol *self)
325{ 117{
326 int cols, rows; 118 if (!self->has_children)
327 newtGetScreenSize(&cols, &rows); 119 return false;
328
329 if (self->width > cols - 4)
330 self->width = cols - 4;
331 self->height = rows - 5;
332 if (self->height > self->nr_entries)
333 self->height = self->nr_entries;
334 self->top = (rows - self->height) / 2;
335 self->left = (cols - self->width) / 2;
336}
337 120
338static void ui_browser__reset_index(struct ui_browser *self) 121 self->unfolded = !self->unfolded;
339{ 122 return true;
340 self->index = self->first_visible_entry_idx = 0;
341 self->seek(self, 0, SEEK_SET);
342} 123}
343 124
344static int ui_browser__show(struct ui_browser *self, const char *title) 125static void callchain_node__init_have_children_rb_tree(struct callchain_node *self)
345{ 126{
346 if (self->form != NULL) { 127 struct rb_node *nd = rb_first(&self->rb_root);
347 newtFormDestroy(self->form);
348 newtPopWindow();
349 }
350 ui_browser__refresh_dimensions(self);
351 newtCenteredWindow(self->width, self->height, title);
352 self->form = newt_form__new();
353 if (self->form == NULL)
354 return -1;
355
356 self->sb = newtVerticalScrollbar(self->width, 0, self->height,
357 HE_COLORSET_NORMAL,
358 HE_COLORSET_SELECTED);
359 if (self->sb == NULL)
360 return -1;
361 128
362 newtFormAddHotKey(self->form, NEWT_KEY_UP); 129 for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
363 newtFormAddHotKey(self->form, NEWT_KEY_DOWN); 130 struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
364 newtFormAddHotKey(self->form, NEWT_KEY_PGUP); 131 struct callchain_list *chain;
365 newtFormAddHotKey(self->form, NEWT_KEY_PGDN); 132 int first = true;
366 newtFormAddHotKey(self->form, NEWT_KEY_HOME);
367 newtFormAddHotKey(self->form, NEWT_KEY_END);
368 newtFormAddComponent(self->form, self->sb);
369 return 0;
370}
371 133
372static int objdump_line__show(struct objdump_line *self, struct list_head *head, 134 list_for_each_entry(chain, &child->val, list) {
373 int width, struct hist_entry *he, int len, 135 if (first) {
374 bool current_entry) 136 first = false;
375{ 137 chain->ms.has_children = chain->list.next != &child->val ||
376 if (self->offset != -1) { 138 rb_first(&child->rb_root) != NULL;
377 struct symbol *sym = he->ms.sym;
378 unsigned int hits = 0;
379 double percent = 0.0;
380 int color;
381 struct sym_priv *priv = symbol__priv(sym);
382 struct sym_ext *sym_ext = priv->ext;
383 struct sym_hist *h = priv->hist;
384 s64 offset = self->offset;
385 struct objdump_line *next = objdump__get_next_ip_line(head, self);
386
387 while (offset < (s64)len &&
388 (next == NULL || offset < next->offset)) {
389 if (sym_ext) {
390 percent += sym_ext[offset].percent;
391 } else 139 } else
392 hits += h->ip[offset]; 140 chain->ms.has_children = chain->list.next == &child->val &&
393 141 rb_first(&child->rb_root) != NULL;
394 ++offset;
395 } 142 }
396 143
397 if (sym_ext == NULL && h->sum) 144 callchain_node__init_have_children_rb_tree(child);
398 percent = 100.0 * hits / h->sum;
399
400 color = ui_browser__percent_color(percent, current_entry);
401 SLsmg_set_color(color);
402 slsmg_printf(" %7.2f ", percent);
403 if (!current_entry)
404 SLsmg_set_color(HE_COLORSET_CODE);
405 } else {
406 int color = ui_browser__percent_color(0, current_entry);
407 SLsmg_set_color(color);
408 slsmg_write_nstring(" ", 9);
409 } 145 }
410
411 SLsmg_write_char(':');
412 slsmg_write_nstring(" ", 8);
413 if (!*self->line)
414 slsmg_write_nstring(" ", width - 18);
415 else
416 slsmg_write_nstring(self->line, width - 18);
417
418 return 0;
419} 146}
420 147
421static int ui_browser__refresh_entries(struct ui_browser *self) 148static void callchain_node__init_have_children(struct callchain_node *self)
422{ 149{
423 int row; 150 struct callchain_list *chain;
424 151
425 newtScrollbarSet(self->sb, self->index, self->nr_entries - 1); 152 list_for_each_entry(chain, &self->val, list)
426 row = self->refresh_entries(self); 153 chain->ms.has_children = rb_first(&self->rb_root) != NULL;
427 SLsmg_set_color(HE_COLORSET_NORMAL);
428 SLsmg_fill_region(self->top + row, self->left,
429 self->height - row, self->width, ' ');
430 154
431 return 0; 155 callchain_node__init_have_children_rb_tree(self);
432} 156}
433 157
434static int ui_browser__run(struct ui_browser *self, struct newtExitStruct *es) 158static void callchain__init_have_children(struct rb_root *self)
435{ 159{
436 if (ui_browser__refresh_entries(self) < 0) 160 struct rb_node *nd;
437 return -1;
438
439 while (1) {
440 off_t offset;
441
442 newtFormRun(self->form, es);
443
444 if (es->reason != NEWT_EXIT_HOTKEY)
445 break;
446 if (is_exit_key(es->u.key))
447 return es->u.key;
448 switch (es->u.key) {
449 case NEWT_KEY_DOWN:
450 if (self->index == self->nr_entries - 1)
451 break;
452 ++self->index;
453 if (self->index == self->first_visible_entry_idx + self->height) {
454 ++self->first_visible_entry_idx;
455 self->seek(self, +1, SEEK_CUR);
456 }
457 break;
458 case NEWT_KEY_UP:
459 if (self->index == 0)
460 break;
461 --self->index;
462 if (self->index < self->first_visible_entry_idx) {
463 --self->first_visible_entry_idx;
464 self->seek(self, -1, SEEK_CUR);
465 }
466 break;
467 case NEWT_KEY_PGDN:
468 case ' ':
469 if (self->first_visible_entry_idx + self->height > self->nr_entries - 1)
470 break;
471
472 offset = self->height;
473 if (self->index + offset > self->nr_entries - 1)
474 offset = self->nr_entries - 1 - self->index;
475 self->index += offset;
476 self->first_visible_entry_idx += offset;
477 self->seek(self, +offset, SEEK_CUR);
478 break;
479 case NEWT_KEY_PGUP:
480 if (self->first_visible_entry_idx == 0)
481 break;
482
483 if (self->first_visible_entry_idx < self->height)
484 offset = self->first_visible_entry_idx;
485 else
486 offset = self->height;
487 161
488 self->index -= offset; 162 for (nd = rb_first(self); nd; nd = rb_next(nd)) {
489 self->first_visible_entry_idx -= offset; 163 struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
490 self->seek(self, -offset, SEEK_CUR); 164 callchain_node__init_have_children(node);
491 break;
492 case NEWT_KEY_HOME:
493 ui_browser__reset_index(self);
494 break;
495 case NEWT_KEY_END:
496 offset = self->height - 1;
497 if (offset >= self->nr_entries)
498 offset = self->nr_entries - 1;
499
500 self->index = self->nr_entries - 1;
501 self->first_visible_entry_idx = self->index - offset;
502 self->seek(self, -offset, SEEK_END);
503 break;
504 default:
505 return es->u.key;
506 }
507 if (ui_browser__refresh_entries(self) < 0)
508 return -1;
509 } 165 }
510 return 0;
511}
512
513static char *callchain_list__sym_name(struct callchain_list *self,
514 char *bf, size_t bfsize)
515{
516 if (self->ms.sym)
517 return self->ms.sym->name;
518
519 snprintf(bf, bfsize, "%#Lx", self->ip);
520 return bf;
521} 166}
522 167
523static unsigned int hist_entry__annotate_browser_refresh(struct ui_browser *self) 168static void hist_entry__init_have_children(struct hist_entry *self)
524{ 169{
525 struct objdump_line *pos; 170 if (!self->init_have_children) {
526 struct list_head *head = self->entries; 171 callchain__init_have_children(&self->sorted_chain);
527 struct hist_entry *he = self->priv; 172 self->init_have_children = true;
528 int row = 0;
529 int len = he->ms.sym->end - he->ms.sym->start;
530
531 if (self->first_visible_entry == NULL || self->first_visible_entry == self->entries)
532 self->first_visible_entry = head->next;
533
534 pos = list_entry(self->first_visible_entry, struct objdump_line, node);
535
536 list_for_each_entry_from(pos, head, node) {
537 bool current_entry = ui_browser__is_current_entry(self, row);
538 SLsmg_gotorc(self->top + row, self->left);
539 objdump_line__show(pos, head, self->width,
540 he, len, current_entry);
541 if (++row == self->height)
542 break;
543 } 173 }
544
545 return row;
546} 174}
547 175
548int hist_entry__tui_annotate(struct hist_entry *self) 176static bool hist_browser__toggle_fold(struct hist_browser *self)
549{ 177{
550 struct ui_browser browser; 178 if (map_symbol__toggle_fold(self->selection)) {
551 struct newtExitStruct es; 179 struct hist_entry *he = self->he_selection;
552 struct objdump_line *pos, *n;
553 LIST_HEAD(head);
554 int ret;
555
556 if (self->ms.sym == NULL)
557 return -1;
558 180
559 if (self->ms.map->dso->annotate_warned) 181 hist_entry__init_have_children(he);
560 return -1; 182 self->hists->nr_entries -= he->nr_rows;
561 183
562 if (hist_entry__annotate(self, &head) < 0) { 184 if (he->ms.unfolded)
563 ui__error_window(browser__last_msg); 185 he->nr_rows = callchain__count_rows(&he->sorted_chain);
564 return -1; 186 else
565 } 187 he->nr_rows = 0;
188 self->hists->nr_entries += he->nr_rows;
189 self->b.nr_entries = self->hists->nr_entries;
566 190
567 ui_helpline__push("Press <- or ESC to exit"); 191 return true;
568
569 memset(&browser, 0, sizeof(browser));
570 browser.entries = &head;
571 browser.refresh_entries = hist_entry__annotate_browser_refresh;
572 browser.seek = ui_browser__list_head_seek;
573 browser.priv = self;
574 list_for_each_entry(pos, &head, node) {
575 size_t line_len = strlen(pos->line);
576 if (browser.width < line_len)
577 browser.width = line_len;
578 ++browser.nr_entries;
579 } 192 }
580 193
581 browser.width += 18; /* Percentage */ 194 /* If it doesn't have children, no toggling performed */
582 ui_browser__show(&browser, self->ms.sym->name); 195 return false;
583 newtFormAddHotKey(browser.form, ' ');
584 ret = ui_browser__run(&browser, &es);
585 newtFormDestroy(browser.form);
586 newtPopWindow();
587 list_for_each_entry_safe(pos, n, &head, node) {
588 list_del(&pos->node);
589 objdump_line__free(pos);
590 }
591 ui_helpline__pop();
592 return ret;
593} 196}
594 197
595struct hist_browser {
596 struct ui_browser b;
597 struct hists *hists;
598 struct hist_entry *he_selection;
599 struct map_symbol *selection;
600};
601
602static void hist_browser__reset(struct hist_browser *self);
603static int hist_browser__run(struct hist_browser *self, const char *title, 198static int hist_browser__run(struct hist_browser *self, const char *title,
604 struct newtExitStruct *es); 199 struct newtExitStruct *es)
605static unsigned int hist_browser__refresh_entries(struct ui_browser *self);
606static void ui_browser__hists_seek(struct ui_browser *self,
607 off_t offset, int whence);
608
609static struct hist_browser *hist_browser__new(struct hists *hists)
610{
611 struct hist_browser *self = zalloc(sizeof(*self));
612
613 if (self) {
614 self->hists = hists;
615 self->b.refresh_entries = hist_browser__refresh_entries;
616 self->b.seek = ui_browser__hists_seek;
617 }
618
619 return self;
620}
621
622static void hist_browser__delete(struct hist_browser *self)
623{
624 newtFormDestroy(self->b.form);
625 newtPopWindow();
626 free(self);
627}
628
629static struct hist_entry *hist_browser__selected_entry(struct hist_browser *self)
630{
631 return self->he_selection;
632}
633
634static struct thread *hist_browser__selected_thread(struct hist_browser *self)
635{ 200{
636 return self->he_selection->thread; 201 char str[256], unit;
637} 202 unsigned long nr_events = self->hists->stats.nr_events[PERF_RECORD_SAMPLE];
638 203
639static int hist_browser__title(char *bf, size_t size, const char *ev_name, 204 self->b.entries = &self->hists->entries;
640 const struct dso *dso, const struct thread *thread) 205 self->b.nr_entries = self->hists->nr_entries;
641{
642 int printed = 0;
643 206
644 if (thread) 207 hist_browser__refresh_dimensions(self);
645 printed += snprintf(bf + printed, size - printed,
646 "Thread: %s(%d)",
647 (thread->comm_set ? thread->comm : ""),
648 thread->pid);
649 if (dso)
650 printed += snprintf(bf + printed, size - printed,
651 "%sDSO: %s", thread ? " " : "",
652 dso->short_name);
653 return printed ?: snprintf(bf, size, "Event: %s", ev_name);
654}
655 208
656int hists__browse(struct hists *self, const char *helpline, const char *ev_name) 209 nr_events = convert_unit(nr_events, &unit);
657{ 210 snprintf(str, sizeof(str), "Events: %lu%c ",
658 struct hist_browser *browser = hist_browser__new(self); 211 nr_events, unit);
659 struct pstack *fstack; 212 newtDrawRootText(0, 0, str);
660 const struct thread *thread_filter = NULL;
661 const struct dso *dso_filter = NULL;
662 struct newtExitStruct es;
663 char msg[160];
664 int key = -1;
665 213
666 if (browser == NULL) 214 if (ui_browser__show(&self->b, title,
215 "Press '?' for help on key bindings") < 0)
667 return -1; 216 return -1;
668 217
669 fstack = pstack__new(2); 218 newtFormAddHotKey(self->b.form, 'a');
670 if (fstack == NULL) 219 newtFormAddHotKey(self->b.form, '?');
671 goto out; 220 newtFormAddHotKey(self->b.form, 'h');
672 221 newtFormAddHotKey(self->b.form, 'd');
673 ui_helpline__push(helpline); 222 newtFormAddHotKey(self->b.form, 'D');
223 newtFormAddHotKey(self->b.form, 't');
674 224
675 hist_browser__title(msg, sizeof(msg), ev_name, 225 newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
676 dso_filter, thread_filter); 226 newtFormAddHotKey(self->b.form, NEWT_KEY_RIGHT);
227 newtFormAddHotKey(self->b.form, NEWT_KEY_ENTER);
677 228
678 while (1) { 229 while (1) {
679 const struct thread *thread; 230 ui_browser__run(&self->b, es);
680 const struct dso *dso;
681 char *options[16];
682 int nr_options = 0, choice = 0, i,
683 annotate = -2, zoom_dso = -2, zoom_thread = -2;
684 231
685 if (hist_browser__run(browser, msg, &es)) 232 if (es->reason != NEWT_EXIT_HOTKEY)
686 break; 233 break;
687 234 switch (es->u.key) {
688 thread = hist_browser__selected_thread(browser); 235 case 'D': { /* Debug */
689 dso = browser->selection->map ? browser->selection->map->dso : NULL; 236 static int seq;
690 237 struct hist_entry *h = rb_entry(self->b.top,
691 if (es.reason == NEWT_EXIT_HOTKEY) { 238 struct hist_entry, rb_node);
692 key = es.u.key; 239 ui_helpline__pop();
693 240 ui_helpline__fpush("%d: nr_ent=(%d,%d), height=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
694 switch (key) { 241 seq++, self->b.nr_entries,
695 case NEWT_KEY_F1: 242 self->hists->nr_entries,
696 goto do_help; 243 self->b.height,
697 case NEWT_KEY_TAB: 244 self->b.index,
698 case NEWT_KEY_UNTAB: 245 self->b.top_idx,
699 /* 246 h->row_offset, h->nr_rows);
700 * Exit the browser, let hists__browser_tree
701 * go to the next or previous
702 */
703 goto out_free_stack;
704 default:;
705 }
706
707 key = toupper(key);
708 switch (key) {
709 case 'A':
710 if (browser->selection->map == NULL &&
711 browser->selection->map->dso->annotate_warned)
712 continue;
713 goto do_annotate;
714 case 'D':
715 goto zoom_dso;
716 case 'T':
717 goto zoom_thread;
718 case 'H':
719 case '?':
720do_help:
721 ui__help_window("-> Zoom into DSO/Threads & Annotate current symbol\n"
722 "<- Zoom out\n"
723 "a Annotate current symbol\n"
724 "h/?/F1 Show this window\n"
725 "d Zoom into current DSO\n"
726 "t Zoom into current Thread\n"
727 "q/CTRL+C Exit browser");
728 continue;
729 default:;
730 }
731 if (is_exit_key(key)) {
732 if (key == NEWT_KEY_ESCAPE) {
733 if (dialog_yesno("Do you really want to exit?"))
734 break;
735 else
736 continue;
737 } else
738 break;
739 }
740
741 if (es.u.key == NEWT_KEY_LEFT) {
742 const void *top;
743
744 if (pstack__empty(fstack))
745 continue;
746 top = pstack__pop(fstack);
747 if (top == &dso_filter)
748 goto zoom_out_dso;
749 if (top == &thread_filter)
750 goto zoom_out_thread;
751 continue;
752 }
753 } 247 }
754
755 if (browser->selection->sym != NULL &&
756 !browser->selection->map->dso->annotate_warned &&
757 asprintf(&options[nr_options], "Annotate %s",
758 browser->selection->sym->name) > 0)
759 annotate = nr_options++;
760
761 if (thread != NULL &&
762 asprintf(&options[nr_options], "Zoom %s %s(%d) thread",
763 (thread_filter ? "out of" : "into"),
764 (thread->comm_set ? thread->comm : ""),
765 thread->pid) > 0)
766 zoom_thread = nr_options++;
767
768 if (dso != NULL &&
769 asprintf(&options[nr_options], "Zoom %s %s DSO",
770 (dso_filter ? "out of" : "into"),
771 (dso->kernel ? "the Kernel" : dso->short_name)) > 0)
772 zoom_dso = nr_options++;
773
774 options[nr_options++] = (char *)"Exit";
775
776 choice = popup_menu(nr_options, options);
777
778 for (i = 0; i < nr_options - 1; ++i)
779 free(options[i]);
780
781 if (choice == nr_options - 1)
782 break;
783
784 if (choice == -1)
785 continue; 248 continue;
786 249 case NEWT_KEY_ENTER:
787 if (choice == annotate) { 250 if (hist_browser__toggle_fold(self))
788 struct hist_entry *he; 251 break;
789do_annotate: 252 /* fall thru */
790 if (browser->selection->map->dso->origin == DSO__ORIG_KERNEL) {
791 browser->selection->map->dso->annotate_warned = 1;
792 ui_helpline__puts("No vmlinux file found, can't "
793 "annotate with just a "
794 "kallsyms file");
795 continue;
796 }
797
798 he = hist_browser__selected_entry(browser);
799 if (he == NULL)
800 continue;
801
802 hist_entry__tui_annotate(he);
803 } else if (choice == zoom_dso) {
804zoom_dso:
805 if (dso_filter) {
806 pstack__remove(fstack, &dso_filter);
807zoom_out_dso:
808 ui_helpline__pop();
809 dso_filter = NULL;
810 } else {
811 if (dso == NULL)
812 continue;
813 ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"",
814 dso->kernel ? "the Kernel" : dso->short_name);
815 dso_filter = dso;
816 pstack__push(fstack, &dso_filter);
817 }
818 hists__filter_by_dso(self, dso_filter);
819 hist_browser__title(msg, sizeof(msg), ev_name,
820 dso_filter, thread_filter);
821 hist_browser__reset(browser);
822 } else if (choice == zoom_thread) {
823zoom_thread:
824 if (thread_filter) {
825 pstack__remove(fstack, &thread_filter);
826zoom_out_thread:
827 ui_helpline__pop();
828 thread_filter = NULL;
829 } else {
830 ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"",
831 thread->comm_set ? thread->comm : "",
832 thread->pid);
833 thread_filter = thread;
834 pstack__push(fstack, &thread_filter);
835 }
836 hists__filter_by_thread(self, thread_filter);
837 hist_browser__title(msg, sizeof(msg), ev_name,
838 dso_filter, thread_filter);
839 hist_browser__reset(browser);
840 }
841 }
842out_free_stack:
843 pstack__delete(fstack);
844out:
845 hist_browser__delete(browser);
846 return key;
847}
848
849int hists__tui_browse_tree(struct rb_root *self, const char *help)
850{
851 struct rb_node *first = rb_first(self), *nd = first, *next;
852 int key = 0;
853
854 while (nd) {
855 struct hists *hists = rb_entry(nd, struct hists, rb_node);
856 const char *ev_name = __event_name(hists->type, hists->config);
857
858 key = hists__browse(hists, help, ev_name);
859
860 if (is_exit_key(key))
861 break;
862
863 switch (key) {
864 case NEWT_KEY_TAB:
865 next = rb_next(nd);
866 if (next)
867 nd = next;
868 break;
869 case NEWT_KEY_UNTAB:
870 if (nd == first)
871 continue;
872 nd = rb_prev(nd);
873 default: 253 default:
874 break; 254 return 0;
875 }
876 }
877
878 return key;
879}
880
881static struct newtPercentTreeColors {
882 const char *topColorFg, *topColorBg;
883 const char *mediumColorFg, *mediumColorBg;
884 const char *normalColorFg, *normalColorBg;
885 const char *selColorFg, *selColorBg;
886 const char *codeColorFg, *codeColorBg;
887} defaultPercentTreeColors = {
888 "red", "lightgray",
889 "green", "lightgray",
890 "black", "lightgray",
891 "lightgray", "magenta",
892 "blue", "lightgray",
893};
894
895static void newt_suspend(void *d __used)
896{
897 newtSuspend();
898 raise(SIGTSTP);
899 newtResume();
900}
901
902void setup_browser(void)
903{
904 struct newtPercentTreeColors *c = &defaultPercentTreeColors;
905
906 if (!isatty(1) || !use_browser || dump_trace) {
907 use_browser = 0;
908 setup_pager();
909 return;
910 }
911
912 use_browser = 1;
913 newtInit();
914 newtCls();
915 newtSetSuspendCallback(newt_suspend, NULL);
916 ui_helpline__puts(" ");
917 sltt_set_color(HE_COLORSET_TOP, NULL, c->topColorFg, c->topColorBg);
918 sltt_set_color(HE_COLORSET_MEDIUM, NULL, c->mediumColorFg, c->mediumColorBg);
919 sltt_set_color(HE_COLORSET_NORMAL, NULL, c->normalColorFg, c->normalColorBg);
920 sltt_set_color(HE_COLORSET_SELECTED, NULL, c->selColorFg, c->selColorBg);
921 sltt_set_color(HE_COLORSET_CODE, NULL, c->codeColorFg, c->codeColorBg);
922}
923
924void exit_browser(bool wait_for_ok)
925{
926 if (use_browser > 0) {
927 if (wait_for_ok) {
928 char title[] = "Fatal Error", ok[] = "Ok";
929 newtWinMessage(title, ok, browser__last_msg);
930 } 255 }
931 newtFinished();
932 } 256 }
933}
934
935static void hist_browser__refresh_dimensions(struct hist_browser *self)
936{
937 /* 3 == +/- toggle symbol before actual hist_entry rendering */
938 self->b.width = 3 + (hists__sort_list_width(self->hists) +
939 sizeof("[k]"));
940}
941
942static void hist_browser__reset(struct hist_browser *self)
943{
944 self->b.nr_entries = self->hists->nr_entries;
945 hist_browser__refresh_dimensions(self);
946 ui_browser__reset_index(&self->b);
947}
948
949static char tree__folded_sign(bool unfolded)
950{
951 return unfolded ? '-' : '+';
952}
953
954static char map_symbol__folded(const struct map_symbol *self)
955{
956 return self->has_children ? tree__folded_sign(self->unfolded) : ' ';
957}
958
959static char hist_entry__folded(const struct hist_entry *self)
960{
961 return map_symbol__folded(&self->ms);
962}
963 257
964static char callchain_list__folded(const struct callchain_list *self) 258 ui_browser__hide(&self->b);
965{ 259 return 0;
966 return map_symbol__folded(&self->ms);
967} 260}
968 261
969static bool map_symbol__toggle_fold(struct map_symbol *self) 262static char *callchain_list__sym_name(struct callchain_list *self,
263 char *bf, size_t bfsize)
970{ 264{
971 if (!self->has_children) 265 if (self->ms.sym)
972 return false; 266 return self->ms.sym->name;
973 267
974 self->unfolded = !self->unfolded; 268 snprintf(bf, bfsize, "%#Lx", self->ip);
975 return true; 269 return bf;
976} 270}
977 271
978#define LEVEL_OFFSET_STEP 3 272#define LEVEL_OFFSET_STEP 3
@@ -1048,7 +342,7 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
1048 } 342 }
1049 343
1050 SLsmg_set_color(color); 344 SLsmg_set_color(color);
1051 SLsmg_gotorc(self->b.top + row, self->b.left); 345 SLsmg_gotorc(self->b.y + row, self->b.x);
1052 slsmg_write_nstring(" ", offset + extra_offset); 346 slsmg_write_nstring(" ", offset + extra_offset);
1053 slsmg_printf("%c ", folded_sign); 347 slsmg_printf("%c ", folded_sign);
1054 slsmg_write_nstring(str, width); 348 slsmg_write_nstring(str, width);
@@ -1111,7 +405,7 @@ static int hist_browser__show_callchain_node(struct hist_browser *self,
1111 } 405 }
1112 406
1113 s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); 407 s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
1114 SLsmg_gotorc(self->b.top + row, self->b.left); 408 SLsmg_gotorc(self->b.y + row, self->b.x);
1115 SLsmg_set_color(color); 409 SLsmg_set_color(color);
1116 slsmg_write_nstring(" ", offset); 410 slsmg_write_nstring(" ", offset);
1117 slsmg_printf("%c ", folded_sign); 411 slsmg_printf("%c ", folded_sign);
@@ -1191,7 +485,7 @@ static int hist_browser__show_entry(struct hist_browser *self,
1191 } 485 }
1192 486
1193 SLsmg_set_color(color); 487 SLsmg_set_color(color);
1194 SLsmg_gotorc(self->b.top + row, self->b.left); 488 SLsmg_gotorc(self->b.y + row, self->b.x);
1195 if (symbol_conf.use_callchain) { 489 if (symbol_conf.use_callchain) {
1196 slsmg_printf("%c ", folded_sign); 490 slsmg_printf("%c ", folded_sign);
1197 width -= 2; 491 width -= 2;
@@ -1213,16 +507,16 @@ static int hist_browser__show_entry(struct hist_browser *self,
1213 return printed; 507 return printed;
1214} 508}
1215 509
1216static unsigned int hist_browser__refresh_entries(struct ui_browser *self) 510static unsigned int hist_browser__refresh(struct ui_browser *self)
1217{ 511{
1218 unsigned row = 0; 512 unsigned row = 0;
1219 struct rb_node *nd; 513 struct rb_node *nd;
1220 struct hist_browser *hb = container_of(self, struct hist_browser, b); 514 struct hist_browser *hb = container_of(self, struct hist_browser, b);
1221 515
1222 if (self->first_visible_entry == NULL) 516 if (self->top == NULL)
1223 self->first_visible_entry = rb_first(&hb->hists->entries); 517 self->top = rb_first(&hb->hists->entries);
1224 518
1225 for (nd = self->first_visible_entry; nd; nd = rb_next(nd)) { 519 for (nd = self->top; nd; nd = rb_next(nd)) {
1226 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 520 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1227 521
1228 if (h->filtered) 522 if (h->filtered)
@@ -1236,57 +530,6 @@ static unsigned int hist_browser__refresh_entries(struct ui_browser *self)
1236 return row; 530 return row;
1237} 531}
1238 532
1239static void callchain_node__init_have_children_rb_tree(struct callchain_node *self)
1240{
1241 struct rb_node *nd = rb_first(&self->rb_root);
1242
1243 for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
1244 struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
1245 struct callchain_list *chain;
1246 int first = true;
1247
1248 list_for_each_entry(chain, &child->val, list) {
1249 if (first) {
1250 first = false;
1251 chain->ms.has_children = chain->list.next != &child->val ||
1252 rb_first(&child->rb_root) != NULL;
1253 } else
1254 chain->ms.has_children = chain->list.next == &child->val &&
1255 rb_first(&child->rb_root) != NULL;
1256 }
1257
1258 callchain_node__init_have_children_rb_tree(child);
1259 }
1260}
1261
1262static void callchain_node__init_have_children(struct callchain_node *self)
1263{
1264 struct callchain_list *chain;
1265
1266 list_for_each_entry(chain, &self->val, list)
1267 chain->ms.has_children = rb_first(&self->rb_root) != NULL;
1268
1269 callchain_node__init_have_children_rb_tree(self);
1270}
1271
1272static void callchain__init_have_children(struct rb_root *self)
1273{
1274 struct rb_node *nd;
1275
1276 for (nd = rb_first(self); nd; nd = rb_next(nd)) {
1277 struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
1278 callchain_node__init_have_children(node);
1279 }
1280}
1281
1282static void hist_entry__init_have_children(struct hist_entry *self)
1283{
1284 if (!self->init_have_children) {
1285 callchain__init_have_children(&self->sorted_chain);
1286 self->init_have_children = true;
1287 }
1288}
1289
1290static struct rb_node *hists__filter_entries(struct rb_node *nd) 533static struct rb_node *hists__filter_entries(struct rb_node *nd)
1291{ 534{
1292 while (nd != NULL) { 535 while (nd != NULL) {
@@ -1325,7 +568,7 @@ static void ui_browser__hists_seek(struct ui_browser *self,
1325 nd = hists__filter_entries(rb_first(self->entries)); 568 nd = hists__filter_entries(rb_first(self->entries));
1326 break; 569 break;
1327 case SEEK_CUR: 570 case SEEK_CUR:
1328 nd = self->first_visible_entry; 571 nd = self->top;
1329 goto do_offset; 572 goto do_offset;
1330 case SEEK_END: 573 case SEEK_END:
1331 nd = hists__filter_prev_entries(rb_last(self->entries)); 574 nd = hists__filter_prev_entries(rb_last(self->entries));
@@ -1339,7 +582,7 @@ static void ui_browser__hists_seek(struct ui_browser *self,
1339 * Moves not relative to the first visible entry invalidates its 582 * Moves not relative to the first visible entry invalidates its
1340 * row_offset: 583 * row_offset:
1341 */ 584 */
1342 h = rb_entry(self->first_visible_entry, struct hist_entry, rb_node); 585 h = rb_entry(self->top, struct hist_entry, rb_node);
1343 h->row_offset = 0; 586 h->row_offset = 0;
1344 587
1345 /* 588 /*
@@ -1367,7 +610,7 @@ do_offset:
1367 } else { 610 } else {
1368 h->row_offset += offset; 611 h->row_offset += offset;
1369 offset = 0; 612 offset = 0;
1370 self->first_visible_entry = nd; 613 self->top = nd;
1371 break; 614 break;
1372 } 615 }
1373 } 616 }
@@ -1375,7 +618,7 @@ do_offset:
1375 if (nd == NULL) 618 if (nd == NULL)
1376 break; 619 break;
1377 --offset; 620 --offset;
1378 self->first_visible_entry = nd; 621 self->top = nd;
1379 } while (offset != 0); 622 } while (offset != 0);
1380 } else if (offset < 0) { 623 } else if (offset < 0) {
1381 while (1) { 624 while (1) {
@@ -1388,7 +631,7 @@ do_offset:
1388 } else { 631 } else {
1389 h->row_offset += offset; 632 h->row_offset += offset;
1390 offset = 0; 633 offset = 0;
1391 self->first_visible_entry = nd; 634 self->top = nd;
1392 break; 635 break;
1393 } 636 }
1394 } else { 637 } else {
@@ -1398,7 +641,7 @@ do_offset:
1398 } else { 641 } else {
1399 h->row_offset = h->nr_rows + offset; 642 h->row_offset = h->nr_rows + offset;
1400 offset = 0; 643 offset = 0;
1401 self->first_visible_entry = nd; 644 self->top = nd;
1402 break; 645 break;
1403 } 646 }
1404 } 647 }
@@ -1408,7 +651,7 @@ do_offset:
1408 if (nd == NULL) 651 if (nd == NULL)
1409 break; 652 break;
1410 ++offset; 653 ++offset;
1411 self->first_visible_entry = nd; 654 self->top = nd;
1412 if (offset == 0) { 655 if (offset == 0) {
1413 /* 656 /*
1414 * Last unfiltered hist_entry, check if it is 657 * Last unfiltered hist_entry, check if it is
@@ -1423,146 +666,283 @@ do_offset:
1423 first = false; 666 first = false;
1424 } 667 }
1425 } else { 668 } else {
1426 self->first_visible_entry = nd; 669 self->top = nd;
1427 h = rb_entry(nd, struct hist_entry, rb_node); 670 h = rb_entry(nd, struct hist_entry, rb_node);
1428 h->row_offset = 0; 671 h->row_offset = 0;
1429 } 672 }
1430} 673}
1431 674
1432static int callchain_node__count_rows_rb_tree(struct callchain_node *self) 675static struct hist_browser *hist_browser__new(struct hists *hists)
1433{ 676{
1434 int n = 0; 677 struct hist_browser *self = zalloc(sizeof(*self));
1435 struct rb_node *nd;
1436
1437 for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
1438 struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
1439 struct callchain_list *chain;
1440 char folded_sign = ' '; /* No children */
1441
1442 list_for_each_entry(chain, &child->val, list) {
1443 ++n;
1444 /* We need this because we may not have children */
1445 folded_sign = callchain_list__folded(chain);
1446 if (folded_sign == '+')
1447 break;
1448 }
1449 678
1450 if (folded_sign == '-') /* Have children and they're unfolded */ 679 if (self) {
1451 n += callchain_node__count_rows_rb_tree(child); 680 self->hists = hists;
681 self->b.refresh = hist_browser__refresh;
682 self->b.seek = ui_browser__hists_seek;
1452 } 683 }
1453 684
1454 return n; 685 return self;
1455} 686}
1456 687
1457static int callchain_node__count_rows(struct callchain_node *node) 688static void hist_browser__delete(struct hist_browser *self)
1458{ 689{
1459 struct callchain_list *chain; 690 newtFormDestroy(self->b.form);
1460 bool unfolded = false; 691 newtPopWindow();
1461 int n = 0; 692 free(self);
1462 693}
1463 list_for_each_entry(chain, &node->val, list) {
1464 ++n;
1465 unfolded = chain->ms.unfolded;
1466 }
1467
1468 if (unfolded)
1469 n += callchain_node__count_rows_rb_tree(node);
1470 694
1471 return n; 695static struct hist_entry *hist_browser__selected_entry(struct hist_browser *self)
696{
697 return self->he_selection;
1472} 698}
1473 699
1474static int callchain__count_rows(struct rb_root *chain) 700static struct thread *hist_browser__selected_thread(struct hist_browser *self)
1475{ 701{
1476 struct rb_node *nd; 702 return self->he_selection->thread;
1477 int n = 0; 703}
1478 704
1479 for (nd = rb_first(chain); nd; nd = rb_next(nd)) { 705static int hist_browser__title(char *bf, size_t size, const char *ev_name,
1480 struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); 706 const struct dso *dso, const struct thread *thread)
1481 n += callchain_node__count_rows(node); 707{
1482 } 708 int printed = 0;
1483 709
1484 return n; 710 if (thread)
711 printed += snprintf(bf + printed, size - printed,
712 "Thread: %s(%d)",
713 (thread->comm_set ? thread->comm : ""),
714 thread->pid);
715 if (dso)
716 printed += snprintf(bf + printed, size - printed,
717 "%sDSO: %s", thread ? " " : "",
718 dso->short_name);
719 return printed ?: snprintf(bf, size, "Event: %s", ev_name);
1485} 720}
1486 721
1487static bool hist_browser__toggle_fold(struct hist_browser *self) 722int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
1488{ 723{
1489 if (map_symbol__toggle_fold(self->selection)) { 724 struct hist_browser *browser = hist_browser__new(self);
1490 struct hist_entry *he = self->he_selection; 725 struct pstack *fstack;
726 const struct thread *thread_filter = NULL;
727 const struct dso *dso_filter = NULL;
728 struct newtExitStruct es;
729 char msg[160];
730 int key = -1;
1491 731
1492 hist_entry__init_have_children(he); 732 if (browser == NULL)
1493 self->hists->nr_entries -= he->nr_rows; 733 return -1;
1494 734
1495 if (he->ms.unfolded) 735 fstack = pstack__new(2);
1496 he->nr_rows = callchain__count_rows(&he->sorted_chain); 736 if (fstack == NULL)
1497 else 737 goto out;
1498 he->nr_rows = 0;
1499 self->hists->nr_entries += he->nr_rows;
1500 self->b.nr_entries = self->hists->nr_entries;
1501 738
1502 return true; 739 ui_helpline__push(helpline);
1503 }
1504 740
1505 /* If it doesn't have children, no toggling performed */ 741 hist_browser__title(msg, sizeof(msg), ev_name,
1506 return false; 742 dso_filter, thread_filter);
1507}
1508 743
1509static int hist_browser__run(struct hist_browser *self, const char *title, 744 while (1) {
1510 struct newtExitStruct *es) 745 const struct thread *thread;
1511{ 746 const struct dso *dso;
1512 char str[256], unit; 747 char *options[16];
1513 unsigned long nr_events = self->hists->stats.nr_events[PERF_RECORD_SAMPLE]; 748 int nr_options = 0, choice = 0, i,
749 annotate = -2, zoom_dso = -2, zoom_thread = -2,
750 browse_map = -2;
1514 751
1515 self->b.entries = &self->hists->entries; 752 if (hist_browser__run(browser, msg, &es))
1516 self->b.nr_entries = self->hists->nr_entries; 753 break;
1517 754
1518 hist_browser__refresh_dimensions(self); 755 thread = hist_browser__selected_thread(browser);
756 dso = browser->selection->map ? browser->selection->map->dso : NULL;
1519 757
1520 nr_events = convert_unit(nr_events, &unit); 758 if (es.reason == NEWT_EXIT_HOTKEY) {
1521 snprintf(str, sizeof(str), "Events: %lu%c ", 759 key = es.u.key;
1522 nr_events, unit);
1523 newtDrawRootText(0, 0, str);
1524 760
1525 if (ui_browser__show(&self->b, title) < 0) 761 switch (key) {
1526 return -1; 762 case NEWT_KEY_F1:
763 goto do_help;
764 case NEWT_KEY_TAB:
765 case NEWT_KEY_UNTAB:
766 /*
767 * Exit the browser, let hists__browser_tree
768 * go to the next or previous
769 */
770 goto out_free_stack;
771 default:;
772 }
1527 773
1528 newtFormAddHotKey(self->b.form, 'A'); 774 switch (key) {
1529 newtFormAddHotKey(self->b.form, 'a'); 775 case 'a':
1530 newtFormAddHotKey(self->b.form, '?'); 776 if (browser->selection->map == NULL &&
1531 newtFormAddHotKey(self->b.form, 'h'); 777 browser->selection->map->dso->annotate_warned)
1532 newtFormAddHotKey(self->b.form, 'H'); 778 continue;
1533 newtFormAddHotKey(self->b.form, 'd'); 779 goto do_annotate;
780 case 'd':
781 goto zoom_dso;
782 case 't':
783 goto zoom_thread;
784 case 'h':
785 case '?':
786do_help:
787 ui__help_window("-> Zoom into DSO/Threads & Annotate current symbol\n"
788 "<- Zoom out\n"
789 "a Annotate current symbol\n"
790 "h/?/F1 Show this window\n"
791 "d Zoom into current DSO\n"
792 "t Zoom into current Thread\n"
793 "q/CTRL+C Exit browser");
794 continue;
795 default:;
796 }
797 if (is_exit_key(key)) {
798 if (key == NEWT_KEY_ESCAPE &&
799 !ui__dialog_yesno("Do you really want to exit?"))
800 continue;
801 break;
802 }
1534 803
1535 newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT); 804 if (es.u.key == NEWT_KEY_LEFT) {
1536 newtFormAddHotKey(self->b.form, NEWT_KEY_RIGHT); 805 const void *top;
1537 newtFormAddHotKey(self->b.form, NEWT_KEY_ENTER);
1538 806
1539 while (1) { 807 if (pstack__empty(fstack))
1540 ui_browser__run(&self->b, es); 808 continue;
809 top = pstack__pop(fstack);
810 if (top == &dso_filter)
811 goto zoom_out_dso;
812 if (top == &thread_filter)
813 goto zoom_out_thread;
814 continue;
815 }
816 }
1541 817
1542 if (es->reason != NEWT_EXIT_HOTKEY) 818 if (browser->selection->sym != NULL &&
819 !browser->selection->map->dso->annotate_warned &&
820 asprintf(&options[nr_options], "Annotate %s",
821 browser->selection->sym->name) > 0)
822 annotate = nr_options++;
823
824 if (thread != NULL &&
825 asprintf(&options[nr_options], "Zoom %s %s(%d) thread",
826 (thread_filter ? "out of" : "into"),
827 (thread->comm_set ? thread->comm : ""),
828 thread->pid) > 0)
829 zoom_thread = nr_options++;
830
831 if (dso != NULL &&
832 asprintf(&options[nr_options], "Zoom %s %s DSO",
833 (dso_filter ? "out of" : "into"),
834 (dso->kernel ? "the Kernel" : dso->short_name)) > 0)
835 zoom_dso = nr_options++;
836
837 if (browser->selection->map != NULL &&
838 asprintf(&options[nr_options], "Browse map details") > 0)
839 browse_map = nr_options++;
840
841 options[nr_options++] = (char *)"Exit";
842
843 choice = ui__popup_menu(nr_options, options);
844
845 for (i = 0; i < nr_options - 1; ++i)
846 free(options[i]);
847
848 if (choice == nr_options - 1)
1543 break; 849 break;
1544 switch (es->u.key) { 850
1545 case 'd': { /* Debug */ 851 if (choice == -1)
1546 static int seq;
1547 struct hist_entry *h = rb_entry(self->b.first_visible_entry,
1548 struct hist_entry, rb_node);
1549 ui_helpline__pop();
1550 ui_helpline__fpush("%d: nr_ent=(%d,%d), height=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
1551 seq++, self->b.nr_entries,
1552 self->hists->nr_entries,
1553 self->b.height,
1554 self->b.index,
1555 self->b.first_visible_entry_idx,
1556 h->row_offset, h->nr_rows);
1557 }
1558 continue; 852 continue;
1559 case NEWT_KEY_ENTER: 853
1560 if (hist_browser__toggle_fold(self)) 854 if (choice == annotate) {
1561 break; 855 struct hist_entry *he;
1562 /* fall thru */ 856do_annotate:
857 if (browser->selection->map->dso->origin == DSO__ORIG_KERNEL) {
858 browser->selection->map->dso->annotate_warned = 1;
859 ui_helpline__puts("No vmlinux file found, can't "
860 "annotate with just a "
861 "kallsyms file");
862 continue;
863 }
864
865 he = hist_browser__selected_entry(browser);
866 if (he == NULL)
867 continue;
868
869 hist_entry__tui_annotate(he);
870 } else if (choice == browse_map)
871 map__browse(browser->selection->map);
872 else if (choice == zoom_dso) {
873zoom_dso:
874 if (dso_filter) {
875 pstack__remove(fstack, &dso_filter);
876zoom_out_dso:
877 ui_helpline__pop();
878 dso_filter = NULL;
879 } else {
880 if (dso == NULL)
881 continue;
882 ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"",
883 dso->kernel ? "the Kernel" : dso->short_name);
884 dso_filter = dso;
885 pstack__push(fstack, &dso_filter);
886 }
887 hists__filter_by_dso(self, dso_filter);
888 hist_browser__title(msg, sizeof(msg), ev_name,
889 dso_filter, thread_filter);
890 hist_browser__reset(browser);
891 } else if (choice == zoom_thread) {
892zoom_thread:
893 if (thread_filter) {
894 pstack__remove(fstack, &thread_filter);
895zoom_out_thread:
896 ui_helpline__pop();
897 thread_filter = NULL;
898 } else {
899 ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"",
900 thread->comm_set ? thread->comm : "",
901 thread->pid);
902 thread_filter = thread;
903 pstack__push(fstack, &thread_filter);
904 }
905 hists__filter_by_thread(self, thread_filter);
906 hist_browser__title(msg, sizeof(msg), ev_name,
907 dso_filter, thread_filter);
908 hist_browser__reset(browser);
909 }
910 }
911out_free_stack:
912 pstack__delete(fstack);
913out:
914 hist_browser__delete(browser);
915 return key;
916}
917
918int hists__tui_browse_tree(struct rb_root *self, const char *help)
919{
920 struct rb_node *first = rb_first(self), *nd = first, *next;
921 int key = 0;
922
923 while (nd) {
924 struct hists *hists = rb_entry(nd, struct hists, rb_node);
925 const char *ev_name = __event_name(hists->type, hists->config);
926
927 key = hists__browse(hists, help, ev_name);
928
929 if (is_exit_key(key))
930 break;
931
932 switch (key) {
933 case NEWT_KEY_TAB:
934 next = rb_next(nd);
935 if (next)
936 nd = next;
937 break;
938 case NEWT_KEY_UNTAB:
939 if (nd == first)
940 continue;
941 nd = rb_prev(nd);
1563 default: 942 default:
1564 return 0; 943 break;
1565 } 944 }
1566 } 945 }
1567 return 0; 946
947 return key;
1568} 948}
diff --git a/tools/perf/util/ui/browsers/map.c b/tools/perf/util/ui/browsers/map.c
new file mode 100644
index 000000000000..142b825b42bf
--- /dev/null
+++ b/tools/perf/util/ui/browsers/map.c
@@ -0,0 +1,161 @@
1#include "../libslang.h"
2#include <elf.h>
3#include <newt.h>
4#include <sys/ttydefaults.h>
5#include <ctype.h>
6#include <string.h>
7#include <linux/bitops.h>
8#include "../../debug.h"
9#include "../../symbol.h"
10#include "../browser.h"
11#include "../helpline.h"
12#include "map.h"
13
14static int ui_entry__read(const char *title, char *bf, size_t size, int width)
15{
16 struct newtExitStruct es;
17 newtComponent form, entry;
18 const char *result;
19 int err = -1;
20
21 newtCenteredWindow(width, 1, title);
22 form = newtForm(NULL, NULL, 0);
23 if (form == NULL)
24 return -1;
25
26 entry = newtEntry(0, 0, "0x", width, &result, NEWT_FLAG_SCROLL);
27 if (entry == NULL)
28 goto out_free_form;
29
30 newtFormAddComponent(form, entry);
31 newtFormAddHotKey(form, NEWT_KEY_ENTER);
32 newtFormAddHotKey(form, NEWT_KEY_ESCAPE);
33 newtFormAddHotKey(form, NEWT_KEY_LEFT);
34 newtFormAddHotKey(form, CTRL('c'));
35 newtFormRun(form, &es);
36
37 if (result != NULL) {
38 strncpy(bf, result, size);
39 err = 0;
40 }
41out_free_form:
42 newtPopWindow();
43 newtFormDestroy(form);
44 return 0;
45}
46
47struct map_browser {
48 struct ui_browser b;
49 struct map *map;
50 u16 namelen;
51 u8 addrlen;
52};
53
54static void map_browser__write(struct ui_browser *self, void *nd, int row)
55{
56 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
57 struct map_browser *mb = container_of(self, struct map_browser, b);
58 bool current_entry = ui_browser__is_current_entry(self, row);
59 int color = ui_browser__percent_color(0, current_entry);
60
61 SLsmg_set_color(color);
62 slsmg_printf("%*llx %*llx %c ",
63 mb->addrlen, sym->start, mb->addrlen, sym->end,
64 sym->binding == STB_GLOBAL ? 'g' :
65 sym->binding == STB_LOCAL ? 'l' : 'w');
66 slsmg_write_nstring(sym->name, mb->namelen);
67}
68
69/* FIXME uber-kludgy, see comment on cmd_report... */
70static u32 *symbol__browser_index(struct symbol *self)
71{
72 return ((void *)self) - sizeof(struct rb_node) - sizeof(u32);
73}
74
75static int map_browser__search(struct map_browser *self)
76{
77 char target[512];
78 struct symbol *sym;
79 int err = ui_entry__read("Search by name/addr", target, sizeof(target), 40);
80
81 if (err)
82 return err;
83
84 if (target[0] == '0' && tolower(target[1]) == 'x') {
85 u64 addr = strtoull(target, NULL, 16);
86 sym = map__find_symbol(self->map, addr, NULL);
87 } else
88 sym = map__find_symbol_by_name(self->map, target, NULL);
89
90 if (sym != NULL) {
91 u32 *idx = symbol__browser_index(sym);
92
93 self->b.top = &sym->rb_node;
94 self->b.index = self->b.top_idx = *idx;
95 } else
96 ui_helpline__fpush("%s not found!", target);
97
98 return 0;
99}
100
101static int map_browser__run(struct map_browser *self, struct newtExitStruct *es)
102{
103 if (ui_browser__show(&self->b, self->map->dso->long_name,
104 "Press <- or ESC to exit, %s / to search",
105 verbose ? "" : "restart with -v to use") < 0)
106 return -1;
107
108 newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
109 newtFormAddHotKey(self->b.form, NEWT_KEY_ENTER);
110 if (verbose)
111 newtFormAddHotKey(self->b.form, '/');
112
113 while (1) {
114 ui_browser__run(&self->b, es);
115
116 if (es->reason != NEWT_EXIT_HOTKEY)
117 break;
118 if (verbose && es->u.key == '/')
119 map_browser__search(self);
120 else
121 break;
122 }
123
124 ui_browser__hide(&self->b);
125 return 0;
126}
127
128int map__browse(struct map *self)
129{
130 struct map_browser mb = {
131 .b = {
132 .entries = &self->dso->symbols[self->type],
133 .refresh = ui_browser__rb_tree_refresh,
134 .seek = ui_browser__rb_tree_seek,
135 .write = map_browser__write,
136 },
137 .map = self,
138 };
139 struct newtExitStruct es;
140 struct rb_node *nd;
141 char tmp[BITS_PER_LONG / 4];
142 u64 maxaddr = 0;
143
144 for (nd = rb_first(mb.b.entries); nd; nd = rb_next(nd)) {
145 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
146
147 if (mb.namelen < pos->namelen)
148 mb.namelen = pos->namelen;
149 if (maxaddr < pos->end)
150 maxaddr = pos->end;
151 if (verbose) {
152 u32 *idx = symbol__browser_index(pos);
153 *idx = mb.b.nr_entries;
154 }
155 ++mb.b.nr_entries;
156 }
157
158 mb.addrlen = snprintf(tmp, sizeof(tmp), "%llx", maxaddr);
159 mb.b.width += mb.addrlen * 2 + 4 + mb.namelen;
160 return map_browser__run(&mb, &es);
161}
diff --git a/tools/perf/util/ui/browsers/map.h b/tools/perf/util/ui/browsers/map.h
new file mode 100644
index 000000000000..df8581a43e17
--- /dev/null
+++ b/tools/perf/util/ui/browsers/map.h
@@ -0,0 +1,6 @@
1#ifndef _PERF_UI_MAP_BROWSER_H_
2#define _PERF_UI_MAP_BROWSER_H_ 1
3struct map;
4
5int map__browse(struct map *self);
6#endif /* _PERF_UI_MAP_BROWSER_H_ */
diff --git a/tools/perf/util/ui/helpline.c b/tools/perf/util/ui/helpline.c
new file mode 100644
index 000000000000..8d79daa4458a
--- /dev/null
+++ b/tools/perf/util/ui/helpline.c
@@ -0,0 +1,69 @@
1#define _GNU_SOURCE
2#include <stdio.h>
3#include <stdlib.h>
4#include <newt.h>
5
6#include "../debug.h"
7#include "helpline.h"
8
9void ui_helpline__pop(void)
10{
11 newtPopHelpLine();
12}
13
14void ui_helpline__push(const char *msg)
15{
16 newtPushHelpLine(msg);
17}
18
19void ui_helpline__vpush(const char *fmt, va_list ap)
20{
21 char *s;
22
23 if (vasprintf(&s, fmt, ap) < 0)
24 vfprintf(stderr, fmt, ap);
25 else {
26 ui_helpline__push(s);
27 free(s);
28 }
29}
30
31void ui_helpline__fpush(const char *fmt, ...)
32{
33 va_list ap;
34
35 va_start(ap, fmt);
36 ui_helpline__vpush(fmt, ap);
37 va_end(ap);
38}
39
40void ui_helpline__puts(const char *msg)
41{
42 ui_helpline__pop();
43 ui_helpline__push(msg);
44}
45
46void ui_helpline__init(void)
47{
48 ui_helpline__puts(" ");
49}
50
51char ui_helpline__last_msg[1024];
52
53int ui_helpline__show_help(const char *format, va_list ap)
54{
55 int ret;
56 static int backlog;
57
58 ret = vsnprintf(ui_helpline__last_msg + backlog,
59 sizeof(ui_helpline__last_msg) - backlog, format, ap);
60 backlog += ret;
61
62 if (ui_helpline__last_msg[backlog - 1] == '\n') {
63 ui_helpline__puts(ui_helpline__last_msg);
64 newtRefresh();
65 backlog = 0;
66 }
67
68 return ret;
69}
diff --git a/tools/perf/util/ui/helpline.h b/tools/perf/util/ui/helpline.h
new file mode 100644
index 000000000000..ab6028d0c401
--- /dev/null
+++ b/tools/perf/util/ui/helpline.h
@@ -0,0 +1,11 @@
1#ifndef _PERF_UI_HELPLINE_H_
2#define _PERF_UI_HELPLINE_H_ 1
3
4void ui_helpline__init(void);
5void ui_helpline__pop(void);
6void ui_helpline__push(const char *msg);
7void ui_helpline__vpush(const char *fmt, va_list ap);
8void ui_helpline__fpush(const char *fmt, ...);
9void ui_helpline__puts(const char *msg);
10
11#endif /* _PERF_UI_HELPLINE_H_ */
diff --git a/tools/perf/util/ui/libslang.h b/tools/perf/util/ui/libslang.h
new file mode 100644
index 000000000000..5623da8e8080
--- /dev/null
+++ b/tools/perf/util/ui/libslang.h
@@ -0,0 +1,27 @@
1#ifndef _PERF_UI_SLANG_H_
2#define _PERF_UI_SLANG_H_ 1
3/*
4 * slang versions <= 2.0.6 have a "#if HAVE_LONG_LONG" that breaks
5 * the build if it isn't defined. Use the equivalent one that glibc
6 * has on features.h.
7 */
8#include <features.h>
9#ifndef HAVE_LONG_LONG
10#define HAVE_LONG_LONG __GLIBC_HAVE_LONG_LONG
11#endif
12#include <slang.h>
13
14#if SLANG_VERSION < 20104
15#define slsmg_printf(msg, args...) \
16 SLsmg_printf((char *)msg, ##args)
17#define slsmg_write_nstring(msg, len) \
18 SLsmg_write_nstring((char *)msg, len)
19#define sltt_set_color(obj, name, fg, bg) \
20 SLtt_set_color(obj,(char *)name, (char *)fg, (char *)bg)
21#else
22#define slsmg_printf SLsmg_printf
23#define slsmg_write_nstring SLsmg_write_nstring
24#define sltt_set_color SLtt_set_color
25#endif
26
27#endif /* _PERF_UI_SLANG_H_ */
diff --git a/tools/perf/util/ui/progress.c b/tools/perf/util/ui/progress.c
new file mode 100644
index 000000000000..d7fc399d36b3
--- /dev/null
+++ b/tools/perf/util/ui/progress.c
@@ -0,0 +1,60 @@
1#include <stdlib.h>
2#include <newt.h>
3#include "../cache.h"
4#include "progress.h"
5
6struct ui_progress {
7 newtComponent form, scale;
8};
9
10struct ui_progress *ui_progress__new(const char *title, u64 total)
11{
12 struct ui_progress *self = malloc(sizeof(*self));
13
14 if (self != NULL) {
15 int cols;
16
17 if (use_browser <= 0)
18 return self;
19 newtGetScreenSize(&cols, NULL);
20 cols -= 4;
21 newtCenteredWindow(cols, 1, title);
22 self->form = newtForm(NULL, NULL, 0);
23 if (self->form == NULL)
24 goto out_free_self;
25 self->scale = newtScale(0, 0, cols, total);
26 if (self->scale == NULL)
27 goto out_free_form;
28 newtFormAddComponent(self->form, self->scale);
29 newtRefresh();
30 }
31
32 return self;
33
34out_free_form:
35 newtFormDestroy(self->form);
36out_free_self:
37 free(self);
38 return NULL;
39}
40
41void ui_progress__update(struct ui_progress *self, u64 curr)
42{
43 /*
44 * FIXME: We should have a per UI backend way of showing progress,
45 * stdio will just show a percentage as NN%, etc.
46 */
47 if (use_browser <= 0)
48 return;
49 newtScaleSet(self->scale, curr);
50 newtRefresh();
51}
52
53void ui_progress__delete(struct ui_progress *self)
54{
55 if (use_browser > 0) {
56 newtFormDestroy(self->form);
57 newtPopWindow();
58 }
59 free(self);
60}
diff --git a/tools/perf/util/ui/progress.h b/tools/perf/util/ui/progress.h
new file mode 100644
index 000000000000..a3820a0beb5b
--- /dev/null
+++ b/tools/perf/util/ui/progress.h
@@ -0,0 +1,11 @@
1#ifndef _PERF_UI_PROGRESS_H_
2#define _PERF_UI_PROGRESS_H_ 1
3
4struct ui_progress;
5
6struct ui_progress *ui_progress__new(const char *title, u64 total);
7void ui_progress__delete(struct ui_progress *self);
8
9void ui_progress__update(struct ui_progress *self, u64 curr);
10
11#endif
diff --git a/tools/perf/util/ui/setup.c b/tools/perf/util/ui/setup.c
new file mode 100644
index 000000000000..662085032eb7
--- /dev/null
+++ b/tools/perf/util/ui/setup.c
@@ -0,0 +1,42 @@
1#include <newt.h>
2#include <signal.h>
3#include <stdbool.h>
4
5#include "../cache.h"
6#include "../debug.h"
7#include "browser.h"
8#include "helpline.h"
9
10static void newt_suspend(void *d __used)
11{
12 newtSuspend();
13 raise(SIGTSTP);
14 newtResume();
15}
16
17void setup_browser(void)
18{
19 if (!isatty(1) || !use_browser || dump_trace) {
20 use_browser = 0;
21 setup_pager();
22 return;
23 }
24
25 use_browser = 1;
26 newtInit();
27 newtCls();
28 newtSetSuspendCallback(newt_suspend, NULL);
29 ui_helpline__init();
30 ui_browser__init();
31}
32
33void exit_browser(bool wait_for_ok)
34{
35 if (use_browser > 0) {
36 if (wait_for_ok) {
37 char title[] = "Fatal Error", ok[] = "Ok";
38 newtWinMessage(title, ok, ui_helpline__last_msg);
39 }
40 newtFinished();
41 }
42}
diff --git a/tools/perf/util/ui/util.c b/tools/perf/util/ui/util.c
new file mode 100644
index 000000000000..04600e26ceea
--- /dev/null
+++ b/tools/perf/util/ui/util.c
@@ -0,0 +1,114 @@
1#include <newt.h>
2#include <signal.h>
3#include <stdio.h>
4#include <stdbool.h>
5#include <string.h>
6#include <sys/ttydefaults.h>
7
8#include "../cache.h"
9#include "../debug.h"
10#include "browser.h"
11#include "helpline.h"
12#include "util.h"
13
14newtComponent newt_form__new(void);
15
16static void newt_form__set_exit_keys(newtComponent self)
17{
18 newtFormAddHotKey(self, NEWT_KEY_LEFT);
19 newtFormAddHotKey(self, NEWT_KEY_ESCAPE);
20 newtFormAddHotKey(self, 'Q');
21 newtFormAddHotKey(self, 'q');
22 newtFormAddHotKey(self, CTRL('c'));
23}
24
25newtComponent newt_form__new(void)
26{
27 newtComponent self = newtForm(NULL, NULL, 0);
28 if (self)
29 newt_form__set_exit_keys(self);
30 return self;
31}
32
33int ui__popup_menu(int argc, char * const argv[])
34{
35 struct newtExitStruct es;
36 int i, rc = -1, max_len = 5;
37 newtComponent listbox, form = newt_form__new();
38
39 if (form == NULL)
40 return -1;
41
42 listbox = newtListbox(0, 0, argc, NEWT_FLAG_RETURNEXIT);
43 if (listbox == NULL)
44 goto out_destroy_form;
45
46 newtFormAddComponent(form, listbox);
47
48 for (i = 0; i < argc; ++i) {
49 int len = strlen(argv[i]);
50 if (len > max_len)
51 max_len = len;
52 if (newtListboxAddEntry(listbox, argv[i], (void *)(long)i))
53 goto out_destroy_form;
54 }
55
56 newtCenteredWindow(max_len, argc, NULL);
57 newtFormRun(form, &es);
58 rc = newtListboxGetCurrent(listbox) - NULL;
59 if (es.reason == NEWT_EXIT_HOTKEY)
60 rc = -1;
61 newtPopWindow();
62out_destroy_form:
63 newtFormDestroy(form);
64 return rc;
65}
66
67int ui__help_window(const char *text)
68{
69 struct newtExitStruct es;
70 newtComponent tb, form = newt_form__new();
71 int rc = -1;
72 int max_len = 0, nr_lines = 0;
73 const char *t;
74
75 if (form == NULL)
76 return -1;
77
78 t = text;
79 while (1) {
80 const char *sep = strchr(t, '\n');
81 int len;
82
83 if (sep == NULL)
84 sep = strchr(t, '\0');
85 len = sep - t;
86 if (max_len < len)
87 max_len = len;
88 ++nr_lines;
89 if (*sep == '\0')
90 break;
91 t = sep + 1;
92 }
93
94 tb = newtTextbox(0, 0, max_len, nr_lines, 0);
95 if (tb == NULL)
96 goto out_destroy_form;
97
98 newtTextboxSetText(tb, text);
99 newtFormAddComponent(form, tb);
100 newtCenteredWindow(max_len, nr_lines, NULL);
101 newtFormRun(form, &es);
102 newtPopWindow();
103 rc = 0;
104out_destroy_form:
105 newtFormDestroy(form);
106 return rc;
107}
108
109bool ui__dialog_yesno(const char *msg)
110{
111 /* newtWinChoice should really be accepting const char pointers... */
112 char yes[] = "Yes", no[] = "No";
113 return newtWinChoice(NULL, yes, no, (char *)msg) == 1;
114}
diff --git a/tools/perf/util/ui/util.h b/tools/perf/util/ui/util.h
new file mode 100644
index 000000000000..afcbc1d99531
--- /dev/null
+++ b/tools/perf/util/ui/util.h
@@ -0,0 +1,10 @@
1#ifndef _PERF_UI_UTIL_H_
2#define _PERF_UI_UTIL_H_ 1
3
4#include <stdbool.h>
5
6int ui__popup_menu(int argc, char * const argv[]);
7int ui__help_window(const char *text);
8bool ui__dialog_yesno(const char *msg);
9
10#endif /* _PERF_UI_UTIL_H_ */