diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-02 02:05:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-02 02:05:12 -0400 |
commit | d19458a4ead14da70c4c852659ccb0234ecd769e (patch) | |
tree | 865659278bc94f5dbcbc8a9cfb9cd0bbe08eb81c | |
parent | 888411be09739443271d254c9d8c1f5188c05509 (diff) | |
parent | d4d969909bef4c1e103eec0fc2c820773811fb72 (diff) |
Merge branch 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 platform updates from Ingo Molnar:
"Most of the commits are continued SGI UV4 hardware-enablement changes,
plus there's also new Bluetooth support for the Intel Edison platform"
* 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/platform/intel-mid: Enable Bluetooth support on Intel Edison
x86/platform/uv/BAU: Implement uv4_wait_completion with read_status
x86/platform/uv/BAU: Add wait_completion to bau_operations
x86/platform/uv/BAU: Add status mmr location fields to bau_control
x86/platform/uv/BAU: Cleanup bau_operations declaration and instances
x86/platform/uv/BAU: Add payload descriptor qualifier
x86/platform/uv/BAU: Add uv_bau_version enumerated constants
-rw-r--r-- | arch/x86/include/asm/uv/uv_bau.h | 82 | ||||
-rw-r--r-- | arch/x86/platform/intel-mid/device_libs/Makefile | 3 | ||||
-rw-r--r-- | arch/x86/platform/intel-mid/device_libs/platform_bt.c | 108 | ||||
-rw-r--r-- | arch/x86/platform/uv/tlb_uv.c | 195 |
4 files changed, 299 insertions, 89 deletions
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index 57ab86d94d64..7cac79802ad2 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
@@ -185,6 +185,15 @@ | |||
185 | #define MSG_REGULAR 1 | 185 | #define MSG_REGULAR 1 |
186 | #define MSG_RETRY 2 | 186 | #define MSG_RETRY 2 |
187 | 187 | ||
188 | #define BAU_DESC_QUALIFIER 0x534749 | ||
189 | |||
190 | enum uv_bau_version { | ||
191 | UV_BAU_V1 = 1, | ||
192 | UV_BAU_V2, | ||
193 | UV_BAU_V3, | ||
194 | UV_BAU_V4, | ||
195 | }; | ||
196 | |||
188 | /* | 197 | /* |
189 | * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor) | 198 | * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor) |
190 | * If the 'multilevel' flag in the header portion of the descriptor | 199 | * If the 'multilevel' flag in the header portion of the descriptor |
@@ -222,20 +231,32 @@ struct bau_local_cpumask { | |||
222 | * the s/w ack bit vector ] | 231 | * the s/w ack bit vector ] |
223 | */ | 232 | */ |
224 | 233 | ||
225 | /* | 234 | /** |
226 | * The payload is software-defined for INTD transactions | 235 | * struct uv1_2_3_bau_msg_payload - defines payload for INTD transactions |
236 | * @address: Signifies a page or all TLB's of the cpu | ||
237 | * @sending_cpu: CPU from which the message originates | ||
238 | * @acknowledge_count: CPUs on the destination Hub that received the interrupt | ||
227 | */ | 239 | */ |
228 | struct bau_msg_payload { | 240 | struct uv1_2_3_bau_msg_payload { |
229 | unsigned long address; /* signifies a page or all | 241 | u64 address; |
230 | TLB's of the cpu */ | 242 | u16 sending_cpu; |
231 | /* 64 bits */ | 243 | u16 acknowledge_count; |
232 | unsigned short sending_cpu; /* filled in by sender */ | ||
233 | /* 16 bits */ | ||
234 | unsigned short acknowledge_count; /* filled in by destination */ | ||
235 | /* 16 bits */ | ||
236 | unsigned int reserved1:32; /* not usable */ | ||
237 | }; | 244 | }; |
238 | 245 | ||
246 | /** | ||
247 | * struct uv4_bau_msg_payload - defines payload for INTD transactions | ||
248 | * @address: Signifies a page or all TLB's of the cpu | ||
249 | * @sending_cpu: CPU from which the message originates | ||
250 | * @acknowledge_count: CPUs on the destination Hub that received the interrupt | ||
251 | * @qualifier: Set by source to verify origin of INTD broadcast | ||
252 | */ | ||
253 | struct uv4_bau_msg_payload { | ||
254 | u64 address; | ||
255 | u16 sending_cpu; | ||
256 | u16 acknowledge_count; | ||
257 | u32 reserved:8; | ||
258 | u32 qualifier:24; | ||
259 | }; | ||
239 | 260 | ||
240 | /* | 261 | /* |
241 | * UV1 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor) | 262 | * UV1 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor) |
@@ -385,17 +406,6 @@ struct uv2_3_bau_msg_header { | |||
385 | /* bits 127:120 */ | 406 | /* bits 127:120 */ |
386 | }; | 407 | }; |
387 | 408 | ||
388 | /* Abstracted BAU functions */ | ||
389 | struct bau_operations { | ||
390 | unsigned long (*read_l_sw_ack)(void); | ||
391 | unsigned long (*read_g_sw_ack)(int pnode); | ||
392 | unsigned long (*bau_gpa_to_offset)(unsigned long vaddr); | ||
393 | void (*write_l_sw_ack)(unsigned long mmr); | ||
394 | void (*write_g_sw_ack)(int pnode, unsigned long mmr); | ||
395 | void (*write_payload_first)(int pnode, unsigned long mmr); | ||
396 | void (*write_payload_last)(int pnode, unsigned long mmr); | ||
397 | }; | ||
398 | |||
399 | /* | 409 | /* |
400 | * The activation descriptor: | 410 | * The activation descriptor: |
401 | * The format of the message to send, plus all accompanying control | 411 | * The format of the message to send, plus all accompanying control |
@@ -411,7 +421,10 @@ struct bau_desc { | |||
411 | struct uv2_3_bau_msg_header uv2_3_hdr; | 421 | struct uv2_3_bau_msg_header uv2_3_hdr; |
412 | } header; | 422 | } header; |
413 | 423 | ||
414 | struct bau_msg_payload payload; | 424 | union bau_payload_header { |
425 | struct uv1_2_3_bau_msg_payload uv1_2_3; | ||
426 | struct uv4_bau_msg_payload uv4; | ||
427 | } payload; | ||
415 | }; | 428 | }; |
416 | /* UV1: | 429 | /* UV1: |
417 | * -payload-- ---------header------ | 430 | * -payload-- ---------header------ |
@@ -588,8 +601,12 @@ struct uvhub_desc { | |||
588 | struct socket_desc socket[2]; | 601 | struct socket_desc socket[2]; |
589 | }; | 602 | }; |
590 | 603 | ||
591 | /* | 604 | /** |
592 | * one per-cpu; to locate the software tables | 605 | * struct bau_control |
606 | * @status_mmr: location of status mmr, determined by uvhub_cpu | ||
607 | * @status_index: index of ERR|BUSY bits in status mmr, determined by uvhub_cpu | ||
608 | * | ||
609 | * Per-cpu control struct containing CPU topology information and BAU tuneables. | ||
593 | */ | 610 | */ |
594 | struct bau_control { | 611 | struct bau_control { |
595 | struct bau_desc *descriptor_base; | 612 | struct bau_desc *descriptor_base; |
@@ -607,6 +624,8 @@ struct bau_control { | |||
607 | int timeout_tries; | 624 | int timeout_tries; |
608 | int ipi_attempts; | 625 | int ipi_attempts; |
609 | int conseccompletes; | 626 | int conseccompletes; |
627 | u64 status_mmr; | ||
628 | int status_index; | ||
610 | bool nobau; | 629 | bool nobau; |
611 | short baudisabled; | 630 | short baudisabled; |
612 | short cpu; | 631 | short cpu; |
@@ -644,6 +663,19 @@ struct bau_control { | |||
644 | struct hub_and_pnode *thp; | 663 | struct hub_and_pnode *thp; |
645 | }; | 664 | }; |
646 | 665 | ||
666 | /* Abstracted BAU functions */ | ||
667 | struct bau_operations { | ||
668 | unsigned long (*read_l_sw_ack)(void); | ||
669 | unsigned long (*read_g_sw_ack)(int pnode); | ||
670 | unsigned long (*bau_gpa_to_offset)(unsigned long vaddr); | ||
671 | void (*write_l_sw_ack)(unsigned long mmr); | ||
672 | void (*write_g_sw_ack)(int pnode, unsigned long mmr); | ||
673 | void (*write_payload_first)(int pnode, unsigned long mmr); | ||
674 | void (*write_payload_last)(int pnode, unsigned long mmr); | ||
675 | int (*wait_completion)(struct bau_desc*, | ||
676 | struct bau_control*, long try); | ||
677 | }; | ||
678 | |||
647 | static inline void write_mmr_data_broadcast(int pnode, unsigned long mmr_image) | 679 | static inline void write_mmr_data_broadcast(int pnode, unsigned long mmr_image) |
648 | { | 680 | { |
649 | write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image); | 681 | write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image); |
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile index 3dbde04febdc..53e0235e308f 100644 --- a/arch/x86/platform/intel-mid/device_libs/Makefile +++ b/arch/x86/platform/intel-mid/device_libs/Makefile | |||
@@ -2,8 +2,9 @@ | |||
2 | obj-$(subst m,y,$(CONFIG_PINCTRL_MERRIFIELD)) += platform_mrfld_pinctrl.o | 2 | obj-$(subst m,y,$(CONFIG_PINCTRL_MERRIFIELD)) += platform_mrfld_pinctrl.o |
3 | # SDHCI Devices | 3 | # SDHCI Devices |
4 | obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += platform_mrfld_sd.o | 4 | obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += platform_mrfld_sd.o |
5 | # WiFi | 5 | # WiFi + BT |
6 | obj-$(subst m,y,$(CONFIG_BRCMFMAC_SDIO)) += platform_bcm43xx.o | 6 | obj-$(subst m,y,$(CONFIG_BRCMFMAC_SDIO)) += platform_bcm43xx.o |
7 | obj-$(subst m,y,$(CONFIG_BT_HCIUART_BCM)) += platform_bt.o | ||
7 | # IPC Devices | 8 | # IPC Devices |
8 | obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic.o | 9 | obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic.o |
9 | obj-$(subst m,y,$(CONFIG_SND_MFLD_MACHINE)) += platform_msic_audio.o | 10 | obj-$(subst m,y,$(CONFIG_SND_MFLD_MACHINE)) += platform_msic_audio.o |
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c new file mode 100644 index 000000000000..5a0483e7bf66 --- /dev/null +++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c | |||
@@ -0,0 +1,108 @@ | |||
1 | /* | ||
2 | * Bluetooth platform data initialization file | ||
3 | * | ||
4 | * (C) Copyright 2017 Intel Corporation | ||
5 | * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; version 2 | ||
10 | * of the License. | ||
11 | */ | ||
12 | |||
13 | #include <linux/gpio/machine.h> | ||
14 | #include <linux/pci.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | |||
17 | #include <asm/cpu_device_id.h> | ||
18 | #include <asm/intel-family.h> | ||
19 | #include <asm/intel-mid.h> | ||
20 | |||
21 | struct bt_sfi_data { | ||
22 | struct device *dev; | ||
23 | const char *name; | ||
24 | int (*setup)(struct bt_sfi_data *ddata); | ||
25 | }; | ||
26 | |||
27 | static struct gpiod_lookup_table tng_bt_sfi_gpio_table = { | ||
28 | .dev_id = "hci_bcm", | ||
29 | .table = { | ||
30 | GPIO_LOOKUP("0000:00:0c.0", -1, "device-wakeup", GPIO_ACTIVE_HIGH), | ||
31 | GPIO_LOOKUP("0000:00:0c.0", -1, "shutdown", GPIO_ACTIVE_HIGH), | ||
32 | GPIO_LOOKUP("0000:00:0c.0", -1, "host-wakeup", GPIO_ACTIVE_HIGH), | ||
33 | { }, | ||
34 | }, | ||
35 | }; | ||
36 | |||
37 | #define TNG_BT_SFI_GPIO_DEVICE_WAKEUP "bt_wakeup" | ||
38 | #define TNG_BT_SFI_GPIO_SHUTDOWN "BT-reset" | ||
39 | #define TNG_BT_SFI_GPIO_HOST_WAKEUP "bt_uart_enable" | ||
40 | |||
41 | static int __init tng_bt_sfi_setup(struct bt_sfi_data *ddata) | ||
42 | { | ||
43 | struct gpiod_lookup_table *table = &tng_bt_sfi_gpio_table; | ||
44 | struct gpiod_lookup *lookup = table->table; | ||
45 | struct pci_dev *pdev; | ||
46 | |||
47 | /* Connected to /dev/ttyS0 */ | ||
48 | pdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(4, 1)); | ||
49 | if (!pdev) | ||
50 | return -ENODEV; | ||
51 | |||
52 | ddata->dev = &pdev->dev; | ||
53 | ddata->name = table->dev_id; | ||
54 | |||
55 | lookup[0].chip_hwnum = get_gpio_by_name(TNG_BT_SFI_GPIO_DEVICE_WAKEUP); | ||
56 | lookup[1].chip_hwnum = get_gpio_by_name(TNG_BT_SFI_GPIO_SHUTDOWN); | ||
57 | lookup[2].chip_hwnum = get_gpio_by_name(TNG_BT_SFI_GPIO_HOST_WAKEUP); | ||
58 | |||
59 | gpiod_add_lookup_table(table); | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static struct bt_sfi_data tng_bt_sfi_data __initdata = { | ||
64 | .setup = tng_bt_sfi_setup, | ||
65 | }; | ||
66 | |||
67 | #define ICPU(model, ddata) \ | ||
68 | { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (kernel_ulong_t)&ddata } | ||
69 | |||
70 | static const struct x86_cpu_id bt_sfi_cpu_ids[] = { | ||
71 | ICPU(INTEL_FAM6_ATOM_MERRIFIELD, tng_bt_sfi_data), | ||
72 | {} | ||
73 | }; | ||
74 | |||
75 | static int __init bt_sfi_init(void) | ||
76 | { | ||
77 | struct platform_device_info info; | ||
78 | struct platform_device *pdev; | ||
79 | const struct x86_cpu_id *id; | ||
80 | struct bt_sfi_data *ddata; | ||
81 | int ret; | ||
82 | |||
83 | id = x86_match_cpu(bt_sfi_cpu_ids); | ||
84 | if (!id) | ||
85 | return -ENODEV; | ||
86 | |||
87 | ddata = (struct bt_sfi_data *)id->driver_data; | ||
88 | if (!ddata) | ||
89 | return -ENODEV; | ||
90 | |||
91 | ret = ddata->setup(ddata); | ||
92 | if (ret) | ||
93 | return ret; | ||
94 | |||
95 | memset(&info, 0, sizeof(info)); | ||
96 | info.fwnode = ddata->dev->fwnode; | ||
97 | info.parent = ddata->dev; | ||
98 | info.name = ddata->name, | ||
99 | info.id = PLATFORM_DEVID_NONE, | ||
100 | |||
101 | pdev = platform_device_register_full(&info); | ||
102 | if (IS_ERR(pdev)) | ||
103 | return PTR_ERR(pdev); | ||
104 | |||
105 | dev_info(ddata->dev, "Registered Bluetooth device: %s\n", ddata->name); | ||
106 | return 0; | ||
107 | } | ||
108 | device_initcall(bt_sfi_init); | ||
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index f25982cdff90..42e65fee5673 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -23,28 +23,7 @@ | |||
23 | #include <asm/irq_vectors.h> | 23 | #include <asm/irq_vectors.h> |
24 | #include <asm/timer.h> | 24 | #include <asm/timer.h> |
25 | 25 | ||
26 | static struct bau_operations ops; | 26 | static struct bau_operations ops __ro_after_init; |
27 | |||
28 | static struct bau_operations uv123_bau_ops = { | ||
29 | .bau_gpa_to_offset = uv_gpa_to_offset, | ||
30 | .read_l_sw_ack = read_mmr_sw_ack, | ||
31 | .read_g_sw_ack = read_gmmr_sw_ack, | ||
32 | .write_l_sw_ack = write_mmr_sw_ack, | ||
33 | .write_g_sw_ack = write_gmmr_sw_ack, | ||
34 | .write_payload_first = write_mmr_payload_first, | ||
35 | .write_payload_last = write_mmr_payload_last, | ||
36 | }; | ||
37 | |||
38 | static struct bau_operations uv4_bau_ops = { | ||
39 | .bau_gpa_to_offset = uv_gpa_to_soc_phys_ram, | ||
40 | .read_l_sw_ack = read_mmr_proc_sw_ack, | ||
41 | .read_g_sw_ack = read_gmmr_proc_sw_ack, | ||
42 | .write_l_sw_ack = write_mmr_proc_sw_ack, | ||
43 | .write_g_sw_ack = write_gmmr_proc_sw_ack, | ||
44 | .write_payload_first = write_mmr_proc_payload_first, | ||
45 | .write_payload_last = write_mmr_proc_payload_last, | ||
46 | }; | ||
47 | |||
48 | 27 | ||
49 | /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */ | 28 | /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */ |
50 | static int timeout_base_ns[] = { | 29 | static int timeout_base_ns[] = { |
@@ -548,11 +527,12 @@ static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift) | |||
548 | * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP | 527 | * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP |
549 | */ | 528 | */ |
550 | static int uv1_wait_completion(struct bau_desc *bau_desc, | 529 | static int uv1_wait_completion(struct bau_desc *bau_desc, |
551 | unsigned long mmr_offset, int right_shift, | ||
552 | struct bau_control *bcp, long try) | 530 | struct bau_control *bcp, long try) |
553 | { | 531 | { |
554 | unsigned long descriptor_status; | 532 | unsigned long descriptor_status; |
555 | cycles_t ttm; | 533 | cycles_t ttm; |
534 | u64 mmr_offset = bcp->status_mmr; | ||
535 | int right_shift = bcp->status_index; | ||
556 | struct ptc_stats *stat = bcp->statp; | 536 | struct ptc_stats *stat = bcp->statp; |
557 | 537 | ||
558 | descriptor_status = uv1_read_status(mmr_offset, right_shift); | 538 | descriptor_status = uv1_read_status(mmr_offset, right_shift); |
@@ -640,11 +620,12 @@ int handle_uv2_busy(struct bau_control *bcp) | |||
640 | } | 620 | } |
641 | 621 | ||
642 | static int uv2_3_wait_completion(struct bau_desc *bau_desc, | 622 | static int uv2_3_wait_completion(struct bau_desc *bau_desc, |
643 | unsigned long mmr_offset, int right_shift, | ||
644 | struct bau_control *bcp, long try) | 623 | struct bau_control *bcp, long try) |
645 | { | 624 | { |
646 | unsigned long descriptor_stat; | 625 | unsigned long descriptor_stat; |
647 | cycles_t ttm; | 626 | cycles_t ttm; |
627 | u64 mmr_offset = bcp->status_mmr; | ||
628 | int right_shift = bcp->status_index; | ||
648 | int desc = bcp->uvhub_cpu; | 629 | int desc = bcp->uvhub_cpu; |
649 | long busy_reps = 0; | 630 | long busy_reps = 0; |
650 | struct ptc_stats *stat = bcp->statp; | 631 | struct ptc_stats *stat = bcp->statp; |
@@ -706,28 +687,59 @@ static int uv2_3_wait_completion(struct bau_desc *bau_desc, | |||
706 | } | 687 | } |
707 | 688 | ||
708 | /* | 689 | /* |
709 | * There are 2 status registers; each and array[32] of 2 bits. Set up for | 690 | * Returns the status of current BAU message for cpu desc as a bit field |
710 | * which register to read and position in that register based on cpu in | 691 | * [Error][Busy][Aux] |
711 | * current hub. | ||
712 | */ | 692 | */ |
713 | static int wait_completion(struct bau_desc *bau_desc, struct bau_control *bcp, long try) | 693 | static u64 read_status(u64 status_mmr, int index, int desc) |
714 | { | 694 | { |
715 | int right_shift; | 695 | u64 stat; |
716 | unsigned long mmr_offset; | 696 | |
697 | stat = ((read_lmmr(status_mmr) >> index) & UV_ACT_STATUS_MASK) << 1; | ||
698 | stat |= (read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_2) >> desc) & 0x1; | ||
699 | |||
700 | return stat; | ||
701 | } | ||
702 | |||
703 | static int uv4_wait_completion(struct bau_desc *bau_desc, | ||
704 | struct bau_control *bcp, long try) | ||
705 | { | ||
706 | struct ptc_stats *stat = bcp->statp; | ||
707 | u64 descriptor_stat; | ||
708 | u64 mmr = bcp->status_mmr; | ||
709 | int index = bcp->status_index; | ||
717 | int desc = bcp->uvhub_cpu; | 710 | int desc = bcp->uvhub_cpu; |
718 | 711 | ||
719 | if (desc < UV_CPUS_PER_AS) { | 712 | descriptor_stat = read_status(mmr, index, desc); |
720 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; | ||
721 | right_shift = desc * UV_ACT_STATUS_SIZE; | ||
722 | } else { | ||
723 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1; | ||
724 | right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE); | ||
725 | } | ||
726 | 713 | ||
727 | if (bcp->uvhub_version == 1) | 714 | /* spin on the status MMR, waiting for it to go idle */ |
728 | return uv1_wait_completion(bau_desc, mmr_offset, right_shift, bcp, try); | 715 | while (descriptor_stat != UV2H_DESC_IDLE) { |
729 | else | 716 | switch (descriptor_stat) { |
730 | return uv2_3_wait_completion(bau_desc, mmr_offset, right_shift, bcp, try); | 717 | case UV2H_DESC_SOURCE_TIMEOUT: |
718 | stat->s_stimeout++; | ||
719 | return FLUSH_GIVEUP; | ||
720 | |||
721 | case UV2H_DESC_DEST_TIMEOUT: | ||
722 | stat->s_dtimeout++; | ||
723 | bcp->conseccompletes = 0; | ||
724 | return FLUSH_RETRY_TIMEOUT; | ||
725 | |||
726 | case UV2H_DESC_DEST_STRONG_NACK: | ||
727 | stat->s_plugged++; | ||
728 | bcp->conseccompletes = 0; | ||
729 | return FLUSH_RETRY_PLUGGED; | ||
730 | |||
731 | case UV2H_DESC_DEST_PUT_ERR: | ||
732 | bcp->conseccompletes = 0; | ||
733 | return FLUSH_GIVEUP; | ||
734 | |||
735 | default: | ||
736 | /* descriptor_stat is still BUSY */ | ||
737 | cpu_relax(); | ||
738 | } | ||
739 | descriptor_stat = read_status(mmr, index, desc); | ||
740 | } | ||
741 | bcp->conseccompletes++; | ||
742 | return FLUSH_COMPLETE; | ||
731 | } | 743 | } |
732 | 744 | ||
733 | /* | 745 | /* |
@@ -918,7 +930,7 @@ int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp, | |||
918 | struct uv1_bau_msg_header *uv1_hdr = NULL; | 930 | struct uv1_bau_msg_header *uv1_hdr = NULL; |
919 | struct uv2_3_bau_msg_header *uv2_3_hdr = NULL; | 931 | struct uv2_3_bau_msg_header *uv2_3_hdr = NULL; |
920 | 932 | ||
921 | if (bcp->uvhub_version == 1) { | 933 | if (bcp->uvhub_version == UV_BAU_V1) { |
922 | uv1 = 1; | 934 | uv1 = 1; |
923 | uv1_throttle(hmaster, stat); | 935 | uv1_throttle(hmaster, stat); |
924 | } | 936 | } |
@@ -958,7 +970,7 @@ int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp, | |||
958 | write_mmr_activation(index); | 970 | write_mmr_activation(index); |
959 | 971 | ||
960 | try++; | 972 | try++; |
961 | completion_stat = wait_completion(bau_desc, bcp, try); | 973 | completion_stat = ops.wait_completion(bau_desc, bcp, try); |
962 | 974 | ||
963 | handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat); | 975 | handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat); |
964 | 976 | ||
@@ -1114,15 +1126,12 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
1114 | unsigned long end, | 1126 | unsigned long end, |
1115 | unsigned int cpu) | 1127 | unsigned int cpu) |
1116 | { | 1128 | { |
1117 | int locals = 0; | 1129 | int locals = 0, remotes = 0, hubs = 0; |
1118 | int remotes = 0; | ||
1119 | int hubs = 0; | ||
1120 | struct bau_desc *bau_desc; | 1130 | struct bau_desc *bau_desc; |
1121 | struct cpumask *flush_mask; | 1131 | struct cpumask *flush_mask; |
1122 | struct ptc_stats *stat; | 1132 | struct ptc_stats *stat; |
1123 | struct bau_control *bcp; | 1133 | struct bau_control *bcp; |
1124 | unsigned long descriptor_status; | 1134 | unsigned long descriptor_status, status, address; |
1125 | unsigned long status; | ||
1126 | 1135 | ||
1127 | bcp = &per_cpu(bau_control, cpu); | 1136 | bcp = &per_cpu(bau_control, cpu); |
1128 | 1137 | ||
@@ -1171,10 +1180,24 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
1171 | record_send_statistics(stat, locals, hubs, remotes, bau_desc); | 1180 | record_send_statistics(stat, locals, hubs, remotes, bau_desc); |
1172 | 1181 | ||
1173 | if (!end || (end - start) <= PAGE_SIZE) | 1182 | if (!end || (end - start) <= PAGE_SIZE) |
1174 | bau_desc->payload.address = start; | 1183 | address = start; |
1175 | else | 1184 | else |
1176 | bau_desc->payload.address = TLB_FLUSH_ALL; | 1185 | address = TLB_FLUSH_ALL; |
1177 | bau_desc->payload.sending_cpu = cpu; | 1186 | |
1187 | switch (bcp->uvhub_version) { | ||
1188 | case UV_BAU_V1: | ||
1189 | case UV_BAU_V2: | ||
1190 | case UV_BAU_V3: | ||
1191 | bau_desc->payload.uv1_2_3.address = address; | ||
1192 | bau_desc->payload.uv1_2_3.sending_cpu = cpu; | ||
1193 | break; | ||
1194 | case UV_BAU_V4: | ||
1195 | bau_desc->payload.uv4.address = address; | ||
1196 | bau_desc->payload.uv4.sending_cpu = cpu; | ||
1197 | bau_desc->payload.uv4.qualifier = BAU_DESC_QUALIFIER; | ||
1198 | break; | ||
1199 | } | ||
1200 | |||
1178 | /* | 1201 | /* |
1179 | * uv_flush_send_and_wait returns 0 if all cpu's were messaged, | 1202 | * uv_flush_send_and_wait returns 0 if all cpu's were messaged, |
1180 | * or 1 if it gave up and the original cpumask should be returned. | 1203 | * or 1 if it gave up and the original cpumask should be returned. |
@@ -1296,7 +1319,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs) | |||
1296 | 1319 | ||
1297 | msgdesc.msg_slot = msg - msgdesc.queue_first; | 1320 | msgdesc.msg_slot = msg - msgdesc.queue_first; |
1298 | msgdesc.msg = msg; | 1321 | msgdesc.msg = msg; |
1299 | if (bcp->uvhub_version == 2) | 1322 | if (bcp->uvhub_version == UV_BAU_V2) |
1300 | process_uv2_message(&msgdesc, bcp); | 1323 | process_uv2_message(&msgdesc, bcp); |
1301 | else | 1324 | else |
1302 | /* no error workaround for uv1 or uv3 */ | 1325 | /* no error workaround for uv1 or uv3 */ |
@@ -1838,7 +1861,7 @@ static void pq_init(int node, int pnode) | |||
1838 | * and the payload queue tail must be maintained by the kernel. | 1861 | * and the payload queue tail must be maintained by the kernel. |
1839 | */ | 1862 | */ |
1840 | bcp = &per_cpu(bau_control, smp_processor_id()); | 1863 | bcp = &per_cpu(bau_control, smp_processor_id()); |
1841 | if (bcp->uvhub_version <= 3) { | 1864 | if (bcp->uvhub_version <= UV_BAU_V3) { |
1842 | tail = first; | 1865 | tail = first; |
1843 | gnode = uv_gpa_to_gnode(uv_gpa(pqp)); | 1866 | gnode = uv_gpa_to_gnode(uv_gpa(pqp)); |
1844 | first = (gnode << UV_PAYLOADQ_GNODE_SHIFT) | tail; | 1867 | first = (gnode << UV_PAYLOADQ_GNODE_SHIFT) | tail; |
@@ -2034,8 +2057,7 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp, | |||
2034 | struct bau_control **smasterp, | 2057 | struct bau_control **smasterp, |
2035 | struct bau_control **hmasterp) | 2058 | struct bau_control **hmasterp) |
2036 | { | 2059 | { |
2037 | int i; | 2060 | int i, cpu, uvhub_cpu; |
2038 | int cpu; | ||
2039 | struct bau_control *bcp; | 2061 | struct bau_control *bcp; |
2040 | 2062 | ||
2041 | for (i = 0; i < sdp->num_cpus; i++) { | 2063 | for (i = 0; i < sdp->num_cpus; i++) { |
@@ -2052,19 +2074,33 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp, | |||
2052 | bcp->socket_master = *smasterp; | 2074 | bcp->socket_master = *smasterp; |
2053 | bcp->uvhub = bdp->uvhub; | 2075 | bcp->uvhub = bdp->uvhub; |
2054 | if (is_uv1_hub()) | 2076 | if (is_uv1_hub()) |
2055 | bcp->uvhub_version = 1; | 2077 | bcp->uvhub_version = UV_BAU_V1; |
2056 | else if (is_uv2_hub()) | 2078 | else if (is_uv2_hub()) |
2057 | bcp->uvhub_version = 2; | 2079 | bcp->uvhub_version = UV_BAU_V2; |
2058 | else if (is_uv3_hub()) | 2080 | else if (is_uv3_hub()) |
2059 | bcp->uvhub_version = 3; | 2081 | bcp->uvhub_version = UV_BAU_V3; |
2060 | else if (is_uv4_hub()) | 2082 | else if (is_uv4_hub()) |
2061 | bcp->uvhub_version = 4; | 2083 | bcp->uvhub_version = UV_BAU_V4; |
2062 | else { | 2084 | else { |
2063 | pr_emerg("uvhub version not 1, 2, 3, or 4\n"); | 2085 | pr_emerg("uvhub version not 1, 2, 3, or 4\n"); |
2064 | return 1; | 2086 | return 1; |
2065 | } | 2087 | } |
2066 | bcp->uvhub_master = *hmasterp; | 2088 | bcp->uvhub_master = *hmasterp; |
2067 | bcp->uvhub_cpu = uv_cpu_blade_processor_id(cpu); | 2089 | uvhub_cpu = uv_cpu_blade_processor_id(cpu); |
2090 | bcp->uvhub_cpu = uvhub_cpu; | ||
2091 | |||
2092 | /* | ||
2093 | * The ERROR and BUSY status registers are located pairwise over | ||
2094 | * the STATUS_0 and STATUS_1 mmrs; each an array[32] of 2 bits. | ||
2095 | */ | ||
2096 | if (uvhub_cpu < UV_CPUS_PER_AS) { | ||
2097 | bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; | ||
2098 | bcp->status_index = uvhub_cpu * UV_ACT_STATUS_SIZE; | ||
2099 | } else { | ||
2100 | bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_1; | ||
2101 | bcp->status_index = (uvhub_cpu - UV_CPUS_PER_AS) | ||
2102 | * UV_ACT_STATUS_SIZE; | ||
2103 | } | ||
2068 | 2104 | ||
2069 | if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) { | 2105 | if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) { |
2070 | pr_emerg("%d cpus per uvhub invalid\n", | 2106 | pr_emerg("%d cpus per uvhub invalid\n", |
@@ -2147,6 +2183,39 @@ fail: | |||
2147 | return 1; | 2183 | return 1; |
2148 | } | 2184 | } |
2149 | 2185 | ||
2186 | static const struct bau_operations uv1_bau_ops __initconst = { | ||
2187 | .bau_gpa_to_offset = uv_gpa_to_offset, | ||
2188 | .read_l_sw_ack = read_mmr_sw_ack, | ||
2189 | .read_g_sw_ack = read_gmmr_sw_ack, | ||
2190 | .write_l_sw_ack = write_mmr_sw_ack, | ||
2191 | .write_g_sw_ack = write_gmmr_sw_ack, | ||
2192 | .write_payload_first = write_mmr_payload_first, | ||
2193 | .write_payload_last = write_mmr_payload_last, | ||
2194 | .wait_completion = uv1_wait_completion, | ||
2195 | }; | ||
2196 | |||
2197 | static const struct bau_operations uv2_3_bau_ops __initconst = { | ||
2198 | .bau_gpa_to_offset = uv_gpa_to_offset, | ||
2199 | .read_l_sw_ack = read_mmr_sw_ack, | ||
2200 | .read_g_sw_ack = read_gmmr_sw_ack, | ||
2201 | .write_l_sw_ack = write_mmr_sw_ack, | ||
2202 | .write_g_sw_ack = write_gmmr_sw_ack, | ||
2203 | .write_payload_first = write_mmr_payload_first, | ||
2204 | .write_payload_last = write_mmr_payload_last, | ||
2205 | .wait_completion = uv2_3_wait_completion, | ||
2206 | }; | ||
2207 | |||
2208 | static const struct bau_operations uv4_bau_ops __initconst = { | ||
2209 | .bau_gpa_to_offset = uv_gpa_to_soc_phys_ram, | ||
2210 | .read_l_sw_ack = read_mmr_proc_sw_ack, | ||
2211 | .read_g_sw_ack = read_gmmr_proc_sw_ack, | ||
2212 | .write_l_sw_ack = write_mmr_proc_sw_ack, | ||
2213 | .write_g_sw_ack = write_gmmr_proc_sw_ack, | ||
2214 | .write_payload_first = write_mmr_proc_payload_first, | ||
2215 | .write_payload_last = write_mmr_proc_payload_last, | ||
2216 | .wait_completion = uv4_wait_completion, | ||
2217 | }; | ||
2218 | |||
2150 | /* | 2219 | /* |
2151 | * Initialization of BAU-related structures | 2220 | * Initialization of BAU-related structures |
2152 | */ | 2221 | */ |
@@ -2166,11 +2235,11 @@ static int __init uv_bau_init(void) | |||
2166 | if (is_uv4_hub()) | 2235 | if (is_uv4_hub()) |
2167 | ops = uv4_bau_ops; | 2236 | ops = uv4_bau_ops; |
2168 | else if (is_uv3_hub()) | 2237 | else if (is_uv3_hub()) |
2169 | ops = uv123_bau_ops; | 2238 | ops = uv2_3_bau_ops; |
2170 | else if (is_uv2_hub()) | 2239 | else if (is_uv2_hub()) |
2171 | ops = uv123_bau_ops; | 2240 | ops = uv2_3_bau_ops; |
2172 | else if (is_uv1_hub()) | 2241 | else if (is_uv1_hub()) |
2173 | ops = uv123_bau_ops; | 2242 | ops = uv1_bau_ops; |
2174 | 2243 | ||
2175 | for_each_possible_cpu(cur_cpu) { | 2244 | for_each_possible_cpu(cur_cpu) { |
2176 | mask = &per_cpu(uv_flush_tlb_mask, cur_cpu); | 2245 | mask = &per_cpu(uv_flush_tlb_mask, cur_cpu); |