aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-22 19:43:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-22 19:43:01 -0400
commitb3ba283d831fed464a1f9c18e7ee82b020ab1a1e (patch)
treede8e90034536ce5eff57193113ab537dac3777c6
parentd43e4f44ba47cace184c78f99723d80dea3e22e0 (diff)
parentcc2749e4095cbbcb35518fb2db5e926b85c3f25f (diff)
Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 CPU features from Ingo Molnar: "Various CPU feature support related changes: in particular the /proc/cpuinfo model name sanitization change should be monitored, it has a chance to break stuff. (but really shouldn't and there are no regression reports)" * 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/cpu/amd: Give access to the number of nodes in a physical package x86/cpu: Trim model ID whitespace x86/cpu: Strip any /proc/cpuinfo model name field whitespace x86/cpu/amd: Set X86_FEATURE_EXTD_APICID for future processors x86/gart: Check for GART support before accessing GART registers
-rw-r--r--arch/x86/include/asm/amd_nb.h11
-rw-r--r--arch/x86/include/asm/processor.h1
-rw-r--r--arch/x86/kernel/amd_nb.c4
-rw-r--r--arch/x86/kernel/aperture_64.c8
-rw-r--r--arch/x86/kernel/cpu/amd.c35
-rw-r--r--arch/x86/kernel/cpu/common.c27
6 files changed, 63 insertions, 23 deletions
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index aaac3b2fb746..1a5da2e63aee 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -98,11 +98,22 @@ static inline u16 amd_get_node_id(struct pci_dev *pdev)
98 return 0; 98 return 0;
99} 99}
100 100
101static inline bool amd_gart_present(void)
102{
103 /* GART present only on Fam15h, upto model 0fh */
104 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
105 (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
106 return true;
107
108 return false;
109}
110
101#else 111#else
102 112
103#define amd_nb_num(x) 0 113#define amd_nb_num(x) 0
104#define amd_nb_has_feature(x) false 114#define amd_nb_has_feature(x) false
105#define node_to_amd_nb(x) NULL 115#define node_to_amd_nb(x) NULL
116#define amd_gart_present(x) false
106 117
107#endif 118#endif
108 119
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 23ba6765b718..9aa52fd13a78 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -946,6 +946,7 @@ static inline int mpx_disable_management(struct task_struct *tsk)
946#endif /* CONFIG_X86_INTEL_MPX */ 946#endif /* CONFIG_X86_INTEL_MPX */
947 947
948extern u16 amd_get_nb_id(int cpu); 948extern u16 amd_get_nb_id(int cpu);
949extern u32 amd_get_nodes_per_socket(void);
949 950
950static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) 951static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
951{ 952{
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 5caed1dd7ccf..29fa475ec518 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -89,9 +89,7 @@ int amd_cache_northbridges(void)
89 next_northbridge(link, amd_nb_link_ids); 89 next_northbridge(link, amd_nb_link_ids);
90 } 90 }
91 91
92 /* GART present only on Fam15h upto model 0fh */ 92 if (amd_gart_present())
93 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
94 (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
95 amd_northbridges.flags |= AMD_NB_GART; 93 amd_northbridges.flags |= AMD_NB_GART;
96 94
97 /* 95 /*
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 76164e173a24..6e85f713641d 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -262,6 +262,9 @@ void __init early_gart_iommu_check(void)
262 u64 aper_base = 0, last_aper_base = 0; 262 u64 aper_base = 0, last_aper_base = 0;
263 int aper_enabled = 0, last_aper_enabled = 0, last_valid = 0; 263 int aper_enabled = 0, last_aper_enabled = 0, last_valid = 0;
264 264
265 if (!amd_gart_present())
266 return;
267
265 if (!early_pci_allowed()) 268 if (!early_pci_allowed())
266 return; 269 return;
267 270
@@ -355,6 +358,9 @@ int __init gart_iommu_hole_init(void)
355 int fix, slot, valid_agp = 0; 358 int fix, slot, valid_agp = 0;
356 int i, node; 359 int i, node;
357 360
361 if (!amd_gart_present())
362 return -ENODEV;
363
358 if (gart_iommu_aperture_disabled || !fix_aperture || 364 if (gart_iommu_aperture_disabled || !fix_aperture ||
359 !early_pci_allowed()) 365 !early_pci_allowed())
360 return -ENODEV; 366 return -ENODEV;
@@ -452,7 +458,7 @@ out:
452 force_iommu || 458 force_iommu ||
453 valid_agp || 459 valid_agp ||
454 fallback_aper_force) { 460 fallback_aper_force) {
455 pr_info("Your BIOS doesn't leave a aperture memory hole\n"); 461 pr_info("Your BIOS doesn't leave an aperture memory hole\n");
456 pr_info("Please enable the IOMMU option in the BIOS setup\n"); 462 pr_info("Please enable the IOMMU option in the BIOS setup\n");
457 pr_info("This costs you %dMB of RAM\n", 463 pr_info("This costs you %dMB of RAM\n",
458 32 << fallback_aper_order); 464 32 << fallback_aper_order);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index e4cf63301ff4..56cae1964a81 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -19,6 +19,13 @@
19 19
20#include "cpu.h" 20#include "cpu.h"
21 21
22/*
23 * nodes_per_socket: Stores the number of nodes per socket.
24 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
25 * Node Identifiers[10:8]
26 */
27static u32 nodes_per_socket = 1;
28
22static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) 29static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
23{ 30{
24 u32 gprs[8] = { 0 }; 31 u32 gprs[8] = { 0 };
@@ -291,7 +298,7 @@ static int nearby_node(int apicid)
291#ifdef CONFIG_X86_HT 298#ifdef CONFIG_X86_HT
292static void amd_get_topology(struct cpuinfo_x86 *c) 299static void amd_get_topology(struct cpuinfo_x86 *c)
293{ 300{
294 u32 nodes, cores_per_cu = 1; 301 u32 cores_per_cu = 1;
295 u8 node_id; 302 u8 node_id;
296 int cpu = smp_processor_id(); 303 int cpu = smp_processor_id();
297 304
@@ -300,7 +307,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
300 u32 eax, ebx, ecx, edx; 307 u32 eax, ebx, ecx, edx;
301 308
302 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); 309 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
303 nodes = ((ecx >> 8) & 7) + 1; 310 nodes_per_socket = ((ecx >> 8) & 7) + 1;
304 node_id = ecx & 7; 311 node_id = ecx & 7;
305 312
306 /* get compute unit information */ 313 /* get compute unit information */
@@ -311,18 +318,18 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
311 u64 value; 318 u64 value;
312 319
313 rdmsrl(MSR_FAM10H_NODE_ID, value); 320 rdmsrl(MSR_FAM10H_NODE_ID, value);
314 nodes = ((value >> 3) & 7) + 1; 321 nodes_per_socket = ((value >> 3) & 7) + 1;
315 node_id = value & 7; 322 node_id = value & 7;
316 } else 323 } else
317 return; 324 return;
318 325
319 /* fixup multi-node processor information */ 326 /* fixup multi-node processor information */
320 if (nodes > 1) { 327 if (nodes_per_socket > 1) {
321 u32 cores_per_node; 328 u32 cores_per_node;
322 u32 cus_per_node; 329 u32 cus_per_node;
323 330
324 set_cpu_cap(c, X86_FEATURE_AMD_DCM); 331 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
325 cores_per_node = c->x86_max_cores / nodes; 332 cores_per_node = c->x86_max_cores / nodes_per_socket;
326 cus_per_node = cores_per_node / cores_per_cu; 333 cus_per_node = cores_per_node / cores_per_cu;
327 334
328 /* store NodeID, use llc_shared_map to store sibling info */ 335 /* store NodeID, use llc_shared_map to store sibling info */
@@ -366,6 +373,12 @@ u16 amd_get_nb_id(int cpu)
366} 373}
367EXPORT_SYMBOL_GPL(amd_get_nb_id); 374EXPORT_SYMBOL_GPL(amd_get_nb_id);
368 375
376u32 amd_get_nodes_per_socket(void)
377{
378 return nodes_per_socket;
379}
380EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
381
369static void srat_detect_node(struct cpuinfo_x86 *c) 382static void srat_detect_node(struct cpuinfo_x86 *c)
370{ 383{
371#ifdef CONFIG_NUMA 384#ifdef CONFIG_NUMA
@@ -520,8 +533,16 @@ static void early_init_amd(struct cpuinfo_x86 *c)
520 set_cpu_cap(c, X86_FEATURE_K6_MTRR); 533 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
521#endif 534#endif
522#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) 535#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
523 /* check CPU config space for extended APIC ID */ 536 /*
524 if (cpu_has_apic && c->x86 >= 0xf) { 537 * ApicID can always be treated as an 8-bit value for AMD APIC versions
538 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
539 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
540 * after 16h.
541 */
542 if (cpu_has_apic && c->x86 > 0x16) {
543 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
544 } else if (cpu_has_apic && c->x86 >= 0xf) {
545 /* check CPU config space for extended APIC ID */
525 unsigned int val; 546 unsigned int val;
526 val = read_pci_config(0, 24, 0, 0x68); 547 val = read_pci_config(0, 24, 0, 0x68);
527 if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18))) 548 if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index a62cf04dac8a..351197cbbc8e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -5,6 +5,7 @@
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/percpu.h> 6#include <linux/percpu.h>
7#include <linux/string.h> 7#include <linux/string.h>
8#include <linux/ctype.h>
8#include <linux/delay.h> 9#include <linux/delay.h>
9#include <linux/sched.h> 10#include <linux/sched.h>
10#include <linux/init.h> 11#include <linux/init.h>
@@ -419,7 +420,7 @@ static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
419static void get_model_name(struct cpuinfo_x86 *c) 420static void get_model_name(struct cpuinfo_x86 *c)
420{ 421{
421 unsigned int *v; 422 unsigned int *v;
422 char *p, *q; 423 char *p, *q, *s;
423 424
424 if (c->extended_cpuid_level < 0x80000004) 425 if (c->extended_cpuid_level < 0x80000004)
425 return; 426 return;
@@ -430,19 +431,21 @@ static void get_model_name(struct cpuinfo_x86 *c)
430 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 431 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
431 c->x86_model_id[48] = 0; 432 c->x86_model_id[48] = 0;
432 433
433 /* 434 /* Trim whitespace */
434 * Intel chips right-justify this string for some dumb reason; 435 p = q = s = &c->x86_model_id[0];
435 * undo that brain damage: 436
436 */
437 p = q = &c->x86_model_id[0];
438 while (*p == ' ') 437 while (*p == ' ')
439 p++; 438 p++;
440 if (p != q) { 439
441 while (*p) 440 while (*p) {
442 *q++ = *p++; 441 /* Note the last non-whitespace index */
443 while (q <= &c->x86_model_id[48]) 442 if (!isspace(*p))
444 *q++ = '\0'; /* Zero-pad the rest */ 443 s = q;
444
445 *q++ = *p++;
445 } 446 }
447
448 *(s + 1) = '\0';
446} 449}
447 450
448void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) 451void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
@@ -1122,7 +1125,7 @@ void print_cpu_info(struct cpuinfo_x86 *c)
1122 printk(KERN_CONT "%s ", vendor); 1125 printk(KERN_CONT "%s ", vendor);
1123 1126
1124 if (c->x86_model_id[0]) 1127 if (c->x86_model_id[0])
1125 printk(KERN_CONT "%s", strim(c->x86_model_id)); 1128 printk(KERN_CONT "%s", c->x86_model_id);
1126 else 1129 else
1127 printk(KERN_CONT "%d86", c->x86); 1130 printk(KERN_CONT "%d86", c->x86);
1128 1131
g ioaddr, int eep_addr); static int mii_wait_link (struct net_device *dev, int wait); static int mii_set_media (struct net_device *dev); static int mii_get_media (struct net_device *dev); static int mii_set_media_pcs (struct net_device *dev); static int mii_get_media_pcs (struct net_device *dev); static int mii_read (struct net_device *dev, int phy_addr, int reg_num); static int mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data); static const struct ethtool_ops ethtool_ops; static const struct net_device_ops netdev_ops = { .ndo_open = rio_open, .ndo_start_xmit = start_xmit, .ndo_stop = rio_close, .ndo_get_stats = get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_set_multicast_list = set_multicast, .ndo_do_ioctl = rio_ioctl, .ndo_tx_timeout = rio_tx_timeout, .ndo_change_mtu = change_mtu, }; static int __devinit rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct netdev_private *np; static int card_idx; int chip_idx = ent->driver_data; int err, irq; long ioaddr; static int version_printed; void *ring_space; dma_addr_t ring_dma; if (!version_printed++) printk ("%s", version); err = pci_enable_device (pdev); if (err) return err; irq = pdev->irq; err = pci_request_regions (pdev, "dl2k"); if (err) goto err_out_disable; pci_set_master (pdev); dev = alloc_etherdev (sizeof (*np)); if (!dev) { err = -ENOMEM; goto err_out_res; } SET_NETDEV_DEV(dev, &pdev->dev); #ifdef MEM_MAPPING ioaddr = pci_resource_start (pdev, 1); ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE); if (!ioaddr) { err = -ENOMEM; goto err_out_dev; } #else ioaddr = pci_resource_start (pdev, 0); #endif dev->base_addr = ioaddr; dev->irq = irq; np = netdev_priv(dev); np->chip_id = chip_idx; np->pdev = pdev; spin_lock_init (&np->tx_lock); spin_lock_init (&np->rx_lock); /* Parse manual configuration */ np->an_enable = 1; np->tx_coalesce = 1; if (card_idx < MAX_UNITS) { if (media[card_idx] != NULL) { np->an_enable = 0; if (strcmp (media[card_idx], "auto") == 0 || strcmp (media[card_idx], "autosense") == 0 || strcmp (media[card_idx], "0") == 0 ) { np->an_enable = 2; } else if (strcmp (media[card_idx], "100mbps_fd") == 0 || strcmp (media[card_idx], "4") == 0) { np->speed = 100; np->full_duplex = 1; } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || strcmp (media[card_idx], "3") == 0) { np->speed = 100; np->full_duplex = 0; } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || strcmp (media[card_idx], "2") == 0) { np->speed = 10; np->full_duplex = 1; } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || strcmp (media[card_idx], "1") == 0) { np->speed = 10; np->full_duplex = 0; } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 || strcmp (media[card_idx], "6") == 0) { np->speed=1000; np->full_duplex=1; } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 || strcmp (media[card_idx], "5") == 0) { np->speed = 1000; np->full_duplex = 0; } else { np->an_enable = 1; } } if (jumbo[card_idx] != 0) { np->jumbo = 1; dev->mtu = MAX_JUMBO; } else { np->jumbo = 0; if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE) dev->mtu = mtu[card_idx]; } np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ? vlan[card_idx] : 0; if (rx_coalesce > 0 && rx_timeout > 0) { np->rx_coalesce = rx_coalesce; np->rx_timeout = rx_timeout; np->coalesce = 1; } np->tx_flow = (tx_flow == 0) ? 0 : 1; np->rx_flow = (rx_flow == 0) ? 0 : 1; if (tx_coalesce < 1) tx_coalesce = 1; else if (tx_coalesce > TX_RING_SIZE-1) tx_coalesce = TX_RING_SIZE - 1; } dev->netdev_ops = &netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; SET_ETHTOOL_OPS(dev, &ethtool_ops); #if 0 dev->features = NETIF_F_IP_CSUM; #endif pci_set_drvdata (pdev, dev); ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma); if (!ring_space) goto err_out_iounmap; np->tx_ring = (struct netdev_desc *) ring_space; np->tx_ring_dma = ring_dma; ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma); if (!ring_space) goto err_out_unmap_tx; np->rx_ring = (struct netdev_desc *) ring_space; np->rx_ring_dma = ring_dma; /* Parse eeprom data */ parse_eeprom (dev); /* Find PHY address */ err = find_miiphy (dev); if (err) goto err_out_unmap_rx; /* Fiber device? */ np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0; np->link_status = 0; /* Set media and reset PHY */ if (np->phy_media) { /* default Auto-Negotiation for fiber deivices */ if (np->an_enable == 2) { np->an_enable = 1; } mii_set_media_pcs (dev); } else { /* Auto-Negotiation is mandatory for 1000BASE-T, IEEE 802.3ab Annex 28D page 14 */ if (np->speed == 1000) np->an_enable = 1; mii_set_media (dev); } err = register_netdev (dev); if (err)