diff options
Diffstat (limited to 'drivers/net/igb')
-rw-r--r-- | drivers/net/igb/igb.h | 13 | ||||
-rw-r--r-- | drivers/net/igb/igb_ethtool.c | 181 | ||||
-rw-r--r-- | drivers/net/igb/igb_main.c | 213 |
3 files changed, 220 insertions, 187 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index 3298f5a11dab..63abd1c0d75e 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h | |||
@@ -59,10 +59,10 @@ struct igb_adapter; | |||
59 | #define MAX_Q_VECTORS 8 | 59 | #define MAX_Q_VECTORS 8 |
60 | 60 | ||
61 | /* Transmit and receive queues */ | 61 | /* Transmit and receive queues */ |
62 | #define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? \ | 62 | #define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \ |
63 | (adapter->vfs_allocated_count > 6 ? 1 : 2) : 4) | 63 | (hw->mac.type > e1000_82575 ? 8 : 4)) |
64 | #define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES | 64 | #define IGB_ABS_MAX_TX_QUEUES 8 |
65 | #define IGB_ABS_MAX_TX_QUEUES 4 | 65 | #define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES |
66 | 66 | ||
67 | #define IGB_MAX_VF_MC_ENTRIES 30 | 67 | #define IGB_MAX_VF_MC_ENTRIES 30 |
68 | #define IGB_MAX_VF_FUNCTIONS 8 | 68 | #define IGB_MAX_VF_FUNCTIONS 8 |
@@ -249,10 +249,6 @@ struct igb_adapter { | |||
249 | u16 link_speed; | 249 | u16 link_speed; |
250 | u16 link_duplex; | 250 | u16 link_duplex; |
251 | 251 | ||
252 | unsigned int total_tx_bytes; | ||
253 | unsigned int total_tx_packets; | ||
254 | unsigned int total_rx_bytes; | ||
255 | unsigned int total_rx_packets; | ||
256 | /* Interrupt Throttle Rate */ | 252 | /* Interrupt Throttle Rate */ |
257 | u32 rx_itr_setting; | 253 | u32 rx_itr_setting; |
258 | u32 tx_itr_setting; | 254 | u32 tx_itr_setting; |
@@ -315,6 +311,7 @@ struct igb_adapter { | |||
315 | u16 rx_ring_count; | 311 | u16 rx_ring_count; |
316 | unsigned int vfs_allocated_count; | 312 | unsigned int vfs_allocated_count; |
317 | struct vf_data_storage *vf_data; | 313 | struct vf_data_storage *vf_data; |
314 | u32 rss_queues; | ||
318 | }; | 315 | }; |
319 | 316 | ||
320 | #define IGB_FLAG_HAS_MSI (1 << 0) | 317 | #define IGB_FLAG_HAS_MSI (1 << 0) |
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index 90b89a81f669..c1cde5b44906 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c | |||
@@ -37,77 +37,88 @@ | |||
37 | 37 | ||
38 | #include "igb.h" | 38 | #include "igb.h" |
39 | 39 | ||
40 | enum {NETDEV_STATS, IGB_STATS}; | ||
41 | |||
42 | struct igb_stats { | 40 | struct igb_stats { |
43 | char stat_string[ETH_GSTRING_LEN]; | 41 | char stat_string[ETH_GSTRING_LEN]; |
44 | int type; | ||
45 | int sizeof_stat; | 42 | int sizeof_stat; |
46 | int stat_offset; | 43 | int stat_offset; |
47 | }; | 44 | }; |
48 | 45 | ||
49 | #define IGB_STAT(m) IGB_STATS, \ | 46 | #define IGB_STAT(_name, _stat) { \ |
50 | FIELD_SIZEOF(struct igb_adapter, m), \ | 47 | .stat_string = _name, \ |
51 | offsetof(struct igb_adapter, m) | 48 | .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \ |
52 | #define IGB_NETDEV_STAT(m) NETDEV_STATS, \ | 49 | .stat_offset = offsetof(struct igb_adapter, _stat) \ |
53 | FIELD_SIZEOF(struct net_device, m), \ | 50 | } |
54 | offsetof(struct net_device, m) | ||
55 | |||
56 | static const struct igb_stats igb_gstrings_stats[] = { | 51 | static const struct igb_stats igb_gstrings_stats[] = { |
57 | { "rx_packets", IGB_STAT(stats.gprc) }, | 52 | IGB_STAT("rx_packets", stats.gprc), |
58 | { "tx_packets", IGB_STAT(stats.gptc) }, | 53 | IGB_STAT("tx_packets", stats.gptc), |
59 | { "rx_bytes", IGB_STAT(stats.gorc) }, | 54 | IGB_STAT("rx_bytes", stats.gorc), |
60 | { "tx_bytes", IGB_STAT(stats.gotc) }, | 55 | IGB_STAT("tx_bytes", stats.gotc), |
61 | { "rx_broadcast", IGB_STAT(stats.bprc) }, | 56 | IGB_STAT("rx_broadcast", stats.bprc), |
62 | { "tx_broadcast", IGB_STAT(stats.bptc) }, | 57 | IGB_STAT("tx_broadcast", stats.bptc), |
63 | { "rx_multicast", IGB_STAT(stats.mprc) }, | 58 | IGB_STAT("rx_multicast", stats.mprc), |
64 | { "tx_multicast", IGB_STAT(stats.mptc) }, | 59 | IGB_STAT("tx_multicast", stats.mptc), |
65 | { "rx_errors", IGB_NETDEV_STAT(stats.rx_errors) }, | 60 | IGB_STAT("multicast", stats.mprc), |
66 | { "tx_errors", IGB_NETDEV_STAT(stats.tx_errors) }, | 61 | IGB_STAT("collisions", stats.colc), |
67 | { "tx_dropped", IGB_NETDEV_STAT(stats.tx_dropped) }, | 62 | IGB_STAT("rx_crc_errors", stats.crcerrs), |
68 | { "multicast", IGB_STAT(stats.mprc) }, | 63 | IGB_STAT("rx_no_buffer_count", stats.rnbc), |
69 | { "collisions", IGB_STAT(stats.colc) }, | 64 | IGB_STAT("rx_missed_errors", stats.mpc), |
70 | { "rx_length_errors", IGB_NETDEV_STAT(stats.rx_length_errors) }, | 65 | IGB_STAT("tx_aborted_errors", stats.ecol), |
71 | { "rx_over_errors", IGB_NETDEV_STAT(stats.rx_over_errors) }, | 66 | IGB_STAT("tx_carrier_errors", stats.tncrs), |
72 | { "rx_crc_errors", IGB_STAT(stats.crcerrs) }, | 67 | IGB_STAT("tx_window_errors", stats.latecol), |
73 | { "rx_frame_errors", IGB_NETDEV_STAT(stats.rx_frame_errors) }, | 68 | IGB_STAT("tx_abort_late_coll", stats.latecol), |
74 | { "rx_no_buffer_count", IGB_STAT(stats.rnbc) }, | 69 | IGB_STAT("tx_deferred_ok", stats.dc), |
75 | { "rx_queue_drop_packet_count", IGB_NETDEV_STAT(stats.rx_fifo_errors) }, | 70 | IGB_STAT("tx_single_coll_ok", stats.scc), |
76 | { "rx_missed_errors", IGB_STAT(stats.mpc) }, | 71 | IGB_STAT("tx_multi_coll_ok", stats.mcc), |
77 | { "tx_aborted_errors", IGB_STAT(stats.ecol) }, | 72 | IGB_STAT("tx_timeout_count", tx_timeout_count), |
78 | { "tx_carrier_errors", IGB_STAT(stats.tncrs) }, | 73 | IGB_STAT("rx_long_length_errors", stats.roc), |
79 | { "tx_fifo_errors", IGB_NETDEV_STAT(stats.tx_fifo_errors) }, | 74 | IGB_STAT("rx_short_length_errors", stats.ruc), |
80 | { "tx_heartbeat_errors", IGB_NETDEV_STAT(stats.tx_heartbeat_errors) }, | 75 | IGB_STAT("rx_align_errors", stats.algnerrc), |
81 | { "tx_window_errors", IGB_STAT(stats.latecol) }, | 76 | IGB_STAT("tx_tcp_seg_good", stats.tsctc), |
82 | { "tx_abort_late_coll", IGB_STAT(stats.latecol) }, | 77 | IGB_STAT("tx_tcp_seg_failed", stats.tsctfc), |
83 | { "tx_deferred_ok", IGB_STAT(stats.dc) }, | 78 | IGB_STAT("rx_flow_control_xon", stats.xonrxc), |
84 | { "tx_single_coll_ok", IGB_STAT(stats.scc) }, | 79 | IGB_STAT("rx_flow_control_xoff", stats.xoffrxc), |
85 | { "tx_multi_coll_ok", IGB_STAT(stats.mcc) }, | 80 | IGB_STAT("tx_flow_control_xon", stats.xontxc), |
86 | { "tx_timeout_count", IGB_STAT(tx_timeout_count) }, | 81 | IGB_STAT("tx_flow_control_xoff", stats.xofftxc), |
87 | { "rx_long_length_errors", IGB_STAT(stats.roc) }, | 82 | IGB_STAT("rx_long_byte_count", stats.gorc), |
88 | { "rx_short_length_errors", IGB_STAT(stats.ruc) }, | 83 | IGB_STAT("tx_dma_out_of_sync", stats.doosync), |
89 | { "rx_align_errors", IGB_STAT(stats.algnerrc) }, | 84 | IGB_STAT("tx_smbus", stats.mgptc), |
90 | { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) }, | 85 | IGB_STAT("rx_smbus", stats.mgprc), |
91 | { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) }, | 86 | IGB_STAT("dropped_smbus", stats.mgpdc), |
92 | { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) }, | 87 | }; |
93 | { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) }, | 88 | |
94 | { "tx_flow_control_xon", IGB_STAT(stats.xontxc) }, | 89 | #define IGB_NETDEV_STAT(_net_stat) { \ |
95 | { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) }, | 90 | .stat_string = __stringify(_net_stat), \ |
96 | { "rx_long_byte_count", IGB_STAT(stats.gorc) }, | 91 | .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ |
97 | { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) }, | 92 | .stat_offset = offsetof(struct net_device_stats, _net_stat) \ |
98 | { "tx_smbus", IGB_STAT(stats.mgptc) }, | 93 | } |
99 | { "rx_smbus", IGB_STAT(stats.mgprc) }, | 94 | static const struct igb_stats igb_gstrings_net_stats[] = { |
100 | { "dropped_smbus", IGB_STAT(stats.mgpdc) }, | 95 | IGB_NETDEV_STAT(rx_errors), |
96 | IGB_NETDEV_STAT(tx_errors), | ||
97 | IGB_NETDEV_STAT(tx_dropped), | ||
98 | IGB_NETDEV_STAT(rx_length_errors), | ||
99 | IGB_NETDEV_STAT(rx_over_errors), | ||
100 | IGB_NETDEV_STAT(rx_frame_errors), | ||
101 | IGB_NETDEV_STAT(rx_fifo_errors), | ||
102 | IGB_NETDEV_STAT(tx_fifo_errors), | ||
103 | IGB_NETDEV_STAT(tx_heartbeat_errors) | ||
101 | }; | 104 | }; |
102 | 105 | ||
106 | #define IGB_GLOBAL_STATS_LEN \ | ||
107 | (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) | ||
108 | #define IGB_NETDEV_STATS_LEN \ | ||
109 | (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) | ||
110 | #define IGB_RX_QUEUE_STATS_LEN \ | ||
111 | (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) | ||
112 | #define IGB_TX_QUEUE_STATS_LEN \ | ||
113 | (sizeof(struct igb_tx_queue_stats) / sizeof(u64)) | ||
103 | #define IGB_QUEUE_STATS_LEN \ | 114 | #define IGB_QUEUE_STATS_LEN \ |
104 | ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ | 115 | ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ |
105 | (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \ | 116 | IGB_RX_QUEUE_STATS_LEN) + \ |
106 | (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \ | 117 | (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \ |
107 | (sizeof(struct igb_tx_queue_stats) / sizeof(u64)))) | 118 | IGB_TX_QUEUE_STATS_LEN)) |
108 | #define IGB_GLOBAL_STATS_LEN \ | 119 | #define IGB_STATS_LEN \ |
109 | (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) | 120 | (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) |
110 | #define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN) | 121 | |
111 | static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { | 122 | static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { |
112 | "Register test (offline)", "Eeprom test (offline)", | 123 | "Register test (offline)", "Eeprom test (offline)", |
113 | "Interrupt test (offline)", "Loopback test (offline)", | 124 | "Interrupt test (offline)", "Loopback test (offline)", |
@@ -735,17 +746,17 @@ static int igb_set_ringparam(struct net_device *netdev, | |||
735 | struct igb_adapter *adapter = netdev_priv(netdev); | 746 | struct igb_adapter *adapter = netdev_priv(netdev); |
736 | struct igb_ring *temp_ring; | 747 | struct igb_ring *temp_ring; |
737 | int i, err = 0; | 748 | int i, err = 0; |
738 | u32 new_rx_count, new_tx_count; | 749 | u16 new_rx_count, new_tx_count; |
739 | 750 | ||
740 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | 751 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) |
741 | return -EINVAL; | 752 | return -EINVAL; |
742 | 753 | ||
743 | new_rx_count = min(ring->rx_pending, (u32)IGB_MAX_RXD); | 754 | new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD); |
744 | new_rx_count = max(new_rx_count, (u32)IGB_MIN_RXD); | 755 | new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD); |
745 | new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); | 756 | new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); |
746 | 757 | ||
747 | new_tx_count = min(ring->tx_pending, (u32)IGB_MAX_TXD); | 758 | new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD); |
748 | new_tx_count = max(new_tx_count, (u32)IGB_MIN_TXD); | 759 | new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD); |
749 | new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); | 760 | new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); |
750 | 761 | ||
751 | if ((new_tx_count == adapter->tx_ring_count) && | 762 | if ((new_tx_count == adapter->tx_ring_count) && |
@@ -1922,43 +1933,32 @@ static void igb_get_ethtool_stats(struct net_device *netdev, | |||
1922 | struct ethtool_stats *stats, u64 *data) | 1933 | struct ethtool_stats *stats, u64 *data) |
1923 | { | 1934 | { |
1924 | struct igb_adapter *adapter = netdev_priv(netdev); | 1935 | struct igb_adapter *adapter = netdev_priv(netdev); |
1936 | struct net_device_stats *net_stats = &netdev->stats; | ||
1925 | u64 *queue_stat; | 1937 | u64 *queue_stat; |
1926 | int stat_count_tx = sizeof(struct igb_tx_queue_stats) / sizeof(u64); | 1938 | int i, j, k; |
1927 | int stat_count_rx = sizeof(struct igb_rx_queue_stats) / sizeof(u64); | 1939 | char *p; |
1928 | int j; | ||
1929 | int i; | ||
1930 | char *p = NULL; | ||
1931 | 1940 | ||
1932 | igb_update_stats(adapter); | 1941 | igb_update_stats(adapter); |
1933 | 1942 | ||
1934 | for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { | 1943 | for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { |
1935 | switch (igb_gstrings_stats[i].type) { | 1944 | p = (char *)adapter + igb_gstrings_stats[i].stat_offset; |
1936 | case NETDEV_STATS: | ||
1937 | p = (char *) netdev + | ||
1938 | igb_gstrings_stats[i].stat_offset; | ||
1939 | break; | ||
1940 | case IGB_STATS: | ||
1941 | p = (char *) adapter + | ||
1942 | igb_gstrings_stats[i].stat_offset; | ||
1943 | break; | ||
1944 | } | ||
1945 | |||
1946 | data[i] = (igb_gstrings_stats[i].sizeof_stat == | 1945 | data[i] = (igb_gstrings_stats[i].sizeof_stat == |
1947 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; | 1946 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; |
1948 | } | 1947 | } |
1948 | for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) { | ||
1949 | p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset; | ||
1950 | data[i] = (igb_gstrings_net_stats[j].sizeof_stat == | ||
1951 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; | ||
1952 | } | ||
1949 | for (j = 0; j < adapter->num_tx_queues; j++) { | 1953 | for (j = 0; j < adapter->num_tx_queues; j++) { |
1950 | int k; | ||
1951 | queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats; | 1954 | queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats; |
1952 | for (k = 0; k < stat_count_tx; k++) | 1955 | for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++) |
1953 | data[i + k] = queue_stat[k]; | 1956 | data[i] = queue_stat[k]; |
1954 | i += k; | ||
1955 | } | 1957 | } |
1956 | for (j = 0; j < adapter->num_rx_queues; j++) { | 1958 | for (j = 0; j < adapter->num_rx_queues; j++) { |
1957 | int k; | ||
1958 | queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats; | 1959 | queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats; |
1959 | for (k = 0; k < stat_count_rx; k++) | 1960 | for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++) |
1960 | data[i + k] = queue_stat[k]; | 1961 | data[i] = queue_stat[k]; |
1961 | i += k; | ||
1962 | } | 1962 | } |
1963 | } | 1963 | } |
1964 | 1964 | ||
@@ -1979,6 +1979,11 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |||
1979 | ETH_GSTRING_LEN); | 1979 | ETH_GSTRING_LEN); |
1980 | p += ETH_GSTRING_LEN; | 1980 | p += ETH_GSTRING_LEN; |
1981 | } | 1981 | } |
1982 | for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) { | ||
1983 | memcpy(p, igb_gstrings_net_stats[i].stat_string, | ||
1984 | ETH_GSTRING_LEN); | ||
1985 | p += ETH_GSTRING_LEN; | ||
1986 | } | ||
1982 | for (i = 0; i < adapter->num_tx_queues; i++) { | 1987 | for (i = 0; i < adapter->num_tx_queues; i++) { |
1983 | sprintf(p, "tx_queue_%u_packets", i); | 1988 | sprintf(p, "tx_queue_%u_packets", i); |
1984 | p += ETH_GSTRING_LEN; | 1989 | p += ETH_GSTRING_LEN; |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index b044c985df0b..0cab5e2b0894 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -296,10 +296,10 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) | |||
296 | * and continue consuming queues in the same sequence | 296 | * and continue consuming queues in the same sequence |
297 | */ | 297 | */ |
298 | if (adapter->vfs_allocated_count) { | 298 | if (adapter->vfs_allocated_count) { |
299 | for (; i < adapter->num_rx_queues; i++) | 299 | for (; i < adapter->rss_queues; i++) |
300 | adapter->rx_ring[i].reg_idx = rbase_offset + | 300 | adapter->rx_ring[i].reg_idx = rbase_offset + |
301 | Q_IDX_82576(i); | 301 | Q_IDX_82576(i); |
302 | for (; j < adapter->num_tx_queues; j++) | 302 | for (; j < adapter->rss_queues; j++) |
303 | adapter->tx_ring[j].reg_idx = rbase_offset + | 303 | adapter->tx_ring[j].reg_idx = rbase_offset + |
304 | Q_IDX_82576(j); | 304 | Q_IDX_82576(j); |
305 | } | 305 | } |
@@ -618,14 +618,15 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) | |||
618 | int numvecs, i; | 618 | int numvecs, i; |
619 | 619 | ||
620 | /* Number of supported queues. */ | 620 | /* Number of supported queues. */ |
621 | adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); | 621 | adapter->num_rx_queues = adapter->rss_queues; |
622 | adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus()); | 622 | adapter->num_tx_queues = adapter->rss_queues; |
623 | 623 | ||
624 | /* start with one vector for every rx queue */ | 624 | /* start with one vector for every rx queue */ |
625 | numvecs = adapter->num_rx_queues; | 625 | numvecs = adapter->num_rx_queues; |
626 | 626 | ||
627 | /* if tx handler is seperate add 1 for every tx queue */ | 627 | /* if tx handler is seperate add 1 for every tx queue */ |
628 | numvecs += adapter->num_tx_queues; | 628 | if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) |
629 | numvecs += adapter->num_tx_queues; | ||
629 | 630 | ||
630 | /* store the number of vectors reserved for queues */ | 631 | /* store the number of vectors reserved for queues */ |
631 | adapter->num_q_vectors = numvecs; | 632 | adapter->num_q_vectors = numvecs; |
@@ -666,6 +667,7 @@ msi_only: | |||
666 | } | 667 | } |
667 | #endif | 668 | #endif |
668 | adapter->vfs_allocated_count = 0; | 669 | adapter->vfs_allocated_count = 0; |
670 | adapter->rss_queues = 1; | ||
669 | adapter->flags |= IGB_FLAG_QUEUE_PAIRS; | 671 | adapter->flags |= IGB_FLAG_QUEUE_PAIRS; |
670 | adapter->num_rx_queues = 1; | 672 | adapter->num_rx_queues = 1; |
671 | adapter->num_tx_queues = 1; | 673 | adapter->num_tx_queues = 1; |
@@ -1566,56 +1568,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1566 | } | 1568 | } |
1567 | 1569 | ||
1568 | #endif | 1570 | #endif |
1569 | switch (hw->mac.type) { | ||
1570 | case e1000_82576: | ||
1571 | /* | ||
1572 | * Initialize hardware timer: we keep it running just in case | ||
1573 | * that some program needs it later on. | ||
1574 | */ | ||
1575 | memset(&adapter->cycles, 0, sizeof(adapter->cycles)); | ||
1576 | adapter->cycles.read = igb_read_clock; | ||
1577 | adapter->cycles.mask = CLOCKSOURCE_MASK(64); | ||
1578 | adapter->cycles.mult = 1; | ||
1579 | /** | ||
1580 | * Scale the NIC clock cycle by a large factor so that | ||
1581 | * relatively small clock corrections can be added or | ||
1582 | * substracted at each clock tick. The drawbacks of a large | ||
1583 | * factor are a) that the clock register overflows more quickly | ||
1584 | * (not such a big deal) and b) that the increment per tick has | ||
1585 | * to fit into 24 bits. As a result we need to use a shift of | ||
1586 | * 19 so we can fit a value of 16 into the TIMINCA register. | ||
1587 | */ | ||
1588 | adapter->cycles.shift = IGB_82576_TSYNC_SHIFT; | ||
1589 | wr32(E1000_TIMINCA, | ||
1590 | (1 << E1000_TIMINCA_16NS_SHIFT) | | ||
1591 | (16 << IGB_82576_TSYNC_SHIFT)); | ||
1592 | |||
1593 | /* Set registers so that rollover occurs soon to test this. */ | ||
1594 | wr32(E1000_SYSTIML, 0x00000000); | ||
1595 | wr32(E1000_SYSTIMH, 0xFF800000); | ||
1596 | wrfl(); | ||
1597 | |||
1598 | timecounter_init(&adapter->clock, | ||
1599 | &adapter->cycles, | ||
1600 | ktime_to_ns(ktime_get_real())); | ||
1601 | /* | ||
1602 | * Synchronize our NIC clock against system wall clock. NIC | ||
1603 | * time stamp reading requires ~3us per sample, each sample | ||
1604 | * was pretty stable even under load => only require 10 | ||
1605 | * samples for each offset comparison. | ||
1606 | */ | ||
1607 | memset(&adapter->compare, 0, sizeof(adapter->compare)); | ||
1608 | adapter->compare.source = &adapter->clock; | ||
1609 | adapter->compare.target = ktime_get_real; | ||
1610 | adapter->compare.num_samples = 10; | ||
1611 | timecompare_update(&adapter->compare, 0); | ||
1612 | break; | ||
1613 | case e1000_82575: | ||
1614 | /* 82575 does not support timesync */ | ||
1615 | default: | ||
1616 | break; | ||
1617 | } | ||
1618 | |||
1619 | dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); | 1571 | dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); |
1620 | /* print bus type/speed/width info */ | 1572 | /* print bus type/speed/width info */ |
1621 | dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", | 1573 | dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", |
@@ -1781,6 +1733,70 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter) | |||
1781 | #endif /* CONFIG_PCI_IOV */ | 1733 | #endif /* CONFIG_PCI_IOV */ |
1782 | } | 1734 | } |
1783 | 1735 | ||
1736 | |||
1737 | /** | ||
1738 | * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp | ||
1739 | * @adapter: board private structure to initialize | ||
1740 | * | ||
1741 | * igb_init_hw_timer initializes the function pointer and values for the hw | ||
1742 | * timer found in hardware. | ||
1743 | **/ | ||
1744 | static void igb_init_hw_timer(struct igb_adapter *adapter) | ||
1745 | { | ||
1746 | struct e1000_hw *hw = &adapter->hw; | ||
1747 | |||
1748 | switch (hw->mac.type) { | ||
1749 | case e1000_82576: | ||
1750 | /* | ||
1751 | * Initialize hardware timer: we keep it running just in case | ||
1752 | * that some program needs it later on. | ||
1753 | */ | ||
1754 | memset(&adapter->cycles, 0, sizeof(adapter->cycles)); | ||
1755 | adapter->cycles.read = igb_read_clock; | ||
1756 | adapter->cycles.mask = CLOCKSOURCE_MASK(64); | ||
1757 | adapter->cycles.mult = 1; | ||
1758 | /** | ||
1759 | * Scale the NIC clock cycle by a large factor so that | ||
1760 | * relatively small clock corrections can be added or | ||
1761 | * substracted at each clock tick. The drawbacks of a large | ||
1762 | * factor are a) that the clock register overflows more quickly | ||
1763 | * (not such a big deal) and b) that the increment per tick has | ||
1764 | * to fit into 24 bits. As a result we need to use a shift of | ||
1765 | * 19 so we can fit a value of 16 into the TIMINCA register. | ||
1766 | */ | ||
1767 | adapter->cycles.shift = IGB_82576_TSYNC_SHIFT; | ||
1768 | wr32(E1000_TIMINCA, | ||
1769 | (1 << E1000_TIMINCA_16NS_SHIFT) | | ||
1770 | (16 << IGB_82576_TSYNC_SHIFT)); | ||
1771 | |||
1772 | /* Set registers so that rollover occurs soon to test this. */ | ||
1773 | wr32(E1000_SYSTIML, 0x00000000); | ||
1774 | wr32(E1000_SYSTIMH, 0xFF800000); | ||
1775 | wrfl(); | ||
1776 | |||
1777 | timecounter_init(&adapter->clock, | ||
1778 | &adapter->cycles, | ||
1779 | ktime_to_ns(ktime_get_real())); | ||
1780 | /* | ||
1781 | * Synchronize our NIC clock against system wall clock. NIC | ||
1782 | * time stamp reading requires ~3us per sample, each sample | ||
1783 | * was pretty stable even under load => only require 10 | ||
1784 | * samples for each offset comparison. | ||
1785 | */ | ||
1786 | memset(&adapter->compare, 0, sizeof(adapter->compare)); | ||
1787 | adapter->compare.source = &adapter->clock; | ||
1788 | adapter->compare.target = ktime_get_real; | ||
1789 | adapter->compare.num_samples = 10; | ||
1790 | timecompare_update(&adapter->compare, 0); | ||
1791 | break; | ||
1792 | case e1000_82575: | ||
1793 | /* 82575 does not support timesync */ | ||
1794 | default: | ||
1795 | break; | ||
1796 | } | ||
1797 | |||
1798 | } | ||
1799 | |||
1784 | /** | 1800 | /** |
1785 | * igb_sw_init - Initialize general software structures (struct igb_adapter) | 1801 | * igb_sw_init - Initialize general software structures (struct igb_adapter) |
1786 | * @adapter: board private structure to initialize | 1802 | * @adapter: board private structure to initialize |
@@ -1810,12 +1826,24 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) | |||
1810 | adapter->vfs_allocated_count = max_vfs; | 1826 | adapter->vfs_allocated_count = max_vfs; |
1811 | 1827 | ||
1812 | #endif /* CONFIG_PCI_IOV */ | 1828 | #endif /* CONFIG_PCI_IOV */ |
1829 | adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); | ||
1830 | |||
1831 | /* | ||
1832 | * if rss_queues > 4 or vfs are going to be allocated with rss_queues | ||
1833 | * then we should combine the queues into a queue pair in order to | ||
1834 | * conserve interrupts due to limited supply | ||
1835 | */ | ||
1836 | if ((adapter->rss_queues > 4) || | ||
1837 | ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6))) | ||
1838 | adapter->flags |= IGB_FLAG_QUEUE_PAIRS; | ||
1839 | |||
1813 | /* This call may decrease the number of queues */ | 1840 | /* This call may decrease the number of queues */ |
1814 | if (igb_init_interrupt_scheme(adapter)) { | 1841 | if (igb_init_interrupt_scheme(adapter)) { |
1815 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); | 1842 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); |
1816 | return -ENOMEM; | 1843 | return -ENOMEM; |
1817 | } | 1844 | } |
1818 | 1845 | ||
1846 | igb_init_hw_timer(adapter); | ||
1819 | igb_probe_vfs(adapter); | 1847 | igb_probe_vfs(adapter); |
1820 | 1848 | ||
1821 | /* Explicitly disable IRQ since the NIC can be in any state. */ | 1849 | /* Explicitly disable IRQ since the NIC can be in any state. */ |
@@ -2000,7 +2028,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter) | |||
2000 | } | 2028 | } |
2001 | } | 2029 | } |
2002 | 2030 | ||
2003 | for (i = 0; i < IGB_MAX_TX_QUEUES; i++) { | 2031 | for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) { |
2004 | int r_idx = i % adapter->num_tx_queues; | 2032 | int r_idx = i % adapter->num_tx_queues; |
2005 | adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; | 2033 | adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; |
2006 | } | 2034 | } |
@@ -2184,7 +2212,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) | |||
2184 | array_wr32(E1000_RSSRK(0), j, rsskey); | 2212 | array_wr32(E1000_RSSRK(0), j, rsskey); |
2185 | } | 2213 | } |
2186 | 2214 | ||
2187 | num_rx_queues = adapter->num_rx_queues; | 2215 | num_rx_queues = adapter->rss_queues; |
2188 | 2216 | ||
2189 | if (adapter->vfs_allocated_count) { | 2217 | if (adapter->vfs_allocated_count) { |
2190 | /* 82575 and 82576 supports 2 RSS queues for VMDq */ | 2218 | /* 82575 and 82576 supports 2 RSS queues for VMDq */ |
@@ -2240,7 +2268,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) | |||
2240 | E1000_VT_CTL_DEFAULT_POOL_SHIFT; | 2268 | E1000_VT_CTL_DEFAULT_POOL_SHIFT; |
2241 | wr32(E1000_VT_CTL, vtctl); | 2269 | wr32(E1000_VT_CTL, vtctl); |
2242 | } | 2270 | } |
2243 | if (adapter->num_rx_queues > 1) | 2271 | if (adapter->rss_queues > 1) |
2244 | mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q; | 2272 | mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q; |
2245 | else | 2273 | else |
2246 | mrqc = E1000_MRQC_ENABLE_VMDQ; | 2274 | mrqc = E1000_MRQC_ENABLE_VMDQ; |
@@ -2370,7 +2398,7 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn) | |||
2370 | /* clear all bits that might not be set */ | 2398 | /* clear all bits that might not be set */ |
2371 | vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); | 2399 | vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); |
2372 | 2400 | ||
2373 | if (adapter->num_rx_queues > 1 && vfn == adapter->vfs_allocated_count) | 2401 | if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) |
2374 | vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ | 2402 | vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ |
2375 | /* | 2403 | /* |
2376 | * for VMDq only allow the VFs and pool 0 to accept broadcast and | 2404 | * for VMDq only allow the VFs and pool 0 to accept broadcast and |
@@ -2915,7 +2943,6 @@ static void igb_watchdog_task(struct work_struct *work) | |||
2915 | watchdog_task); | 2943 | watchdog_task); |
2916 | struct e1000_hw *hw = &adapter->hw; | 2944 | struct e1000_hw *hw = &adapter->hw; |
2917 | struct net_device *netdev = adapter->netdev; | 2945 | struct net_device *netdev = adapter->netdev; |
2918 | struct igb_ring *tx_ring = adapter->tx_ring; | ||
2919 | u32 link; | 2946 | u32 link; |
2920 | int i; | 2947 | int i; |
2921 | 2948 | ||
@@ -2985,22 +3012,24 @@ static void igb_watchdog_task(struct work_struct *work) | |||
2985 | igb_update_stats(adapter); | 3012 | igb_update_stats(adapter); |
2986 | igb_update_adaptive(hw); | 3013 | igb_update_adaptive(hw); |
2987 | 3014 | ||
2988 | if (!netif_carrier_ok(netdev)) { | 3015 | for (i = 0; i < adapter->num_tx_queues; i++) { |
2989 | if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { | 3016 | struct igb_ring *tx_ring = &adapter->tx_ring[i]; |
3017 | if (!netif_carrier_ok(netdev)) { | ||
2990 | /* We've lost link, so the controller stops DMA, | 3018 | /* We've lost link, so the controller stops DMA, |
2991 | * but we've got queued Tx work that's never going | 3019 | * but we've got queued Tx work that's never going |
2992 | * to get done, so reset controller to flush Tx. | 3020 | * to get done, so reset controller to flush Tx. |
2993 | * (Do the reset outside of interrupt context). */ | 3021 | * (Do the reset outside of interrupt context). */ |
2994 | adapter->tx_timeout_count++; | 3022 | if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { |
2995 | schedule_work(&adapter->reset_task); | 3023 | adapter->tx_timeout_count++; |
2996 | /* return immediately since reset is imminent */ | 3024 | schedule_work(&adapter->reset_task); |
2997 | return; | 3025 | /* return immediately since reset is imminent */ |
3026 | return; | ||
3027 | } | ||
2998 | } | 3028 | } |
2999 | } | ||
3000 | 3029 | ||
3001 | /* Force detection of hung controller every watchdog period */ | 3030 | /* Force detection of hung controller every watchdog period */ |
3002 | for (i = 0; i < adapter->num_tx_queues; i++) | 3031 | tx_ring->detect_tx_hung = true; |
3003 | adapter->tx_ring[i].detect_tx_hung = true; | 3032 | } |
3004 | 3033 | ||
3005 | /* Cause software interrupt to ensure rx ring is cleaned */ | 3034 | /* Cause software interrupt to ensure rx ring is cleaned */ |
3006 | if (adapter->msix_entries) { | 3035 | if (adapter->msix_entries) { |
@@ -3761,7 +3790,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) | |||
3761 | 3790 | ||
3762 | void igb_update_stats(struct igb_adapter *adapter) | 3791 | void igb_update_stats(struct igb_adapter *adapter) |
3763 | { | 3792 | { |
3764 | struct net_device *netdev = adapter->netdev; | 3793 | struct net_device_stats *net_stats = igb_get_stats(adapter->netdev); |
3765 | struct e1000_hw *hw = &adapter->hw; | 3794 | struct e1000_hw *hw = &adapter->hw; |
3766 | struct pci_dev *pdev = adapter->pdev; | 3795 | struct pci_dev *pdev = adapter->pdev; |
3767 | u32 rnbc; | 3796 | u32 rnbc; |
@@ -3785,13 +3814,13 @@ void igb_update_stats(struct igb_adapter *adapter) | |||
3785 | for (i = 0; i < adapter->num_rx_queues; i++) { | 3814 | for (i = 0; i < adapter->num_rx_queues; i++) { |
3786 | u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; | 3815 | u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; |
3787 | adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp; | 3816 | adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp; |
3788 | netdev->stats.rx_fifo_errors += rqdpc_tmp; | 3817 | net_stats->rx_fifo_errors += rqdpc_tmp; |
3789 | bytes += adapter->rx_ring[i].rx_stats.bytes; | 3818 | bytes += adapter->rx_ring[i].rx_stats.bytes; |
3790 | packets += adapter->rx_ring[i].rx_stats.packets; | 3819 | packets += adapter->rx_ring[i].rx_stats.packets; |
3791 | } | 3820 | } |
3792 | 3821 | ||
3793 | netdev->stats.rx_bytes = bytes; | 3822 | net_stats->rx_bytes = bytes; |
3794 | netdev->stats.rx_packets = packets; | 3823 | net_stats->rx_packets = packets; |
3795 | 3824 | ||
3796 | bytes = 0; | 3825 | bytes = 0; |
3797 | packets = 0; | 3826 | packets = 0; |
@@ -3799,8 +3828,8 @@ void igb_update_stats(struct igb_adapter *adapter) | |||
3799 | bytes += adapter->tx_ring[i].tx_stats.bytes; | 3828 | bytes += adapter->tx_ring[i].tx_stats.bytes; |
3800 | packets += adapter->tx_ring[i].tx_stats.packets; | 3829 | packets += adapter->tx_ring[i].tx_stats.packets; |
3801 | } | 3830 | } |
3802 | netdev->stats.tx_bytes = bytes; | 3831 | net_stats->tx_bytes = bytes; |
3803 | netdev->stats.tx_packets = packets; | 3832 | net_stats->tx_packets = packets; |
3804 | 3833 | ||
3805 | /* read stats registers */ | 3834 | /* read stats registers */ |
3806 | adapter->stats.crcerrs += rd32(E1000_CRCERRS); | 3835 | adapter->stats.crcerrs += rd32(E1000_CRCERRS); |
@@ -3837,7 +3866,7 @@ void igb_update_stats(struct igb_adapter *adapter) | |||
3837 | rd32(E1000_GOTCH); /* clear GOTCL */ | 3866 | rd32(E1000_GOTCH); /* clear GOTCL */ |
3838 | rnbc = rd32(E1000_RNBC); | 3867 | rnbc = rd32(E1000_RNBC); |
3839 | adapter->stats.rnbc += rnbc; | 3868 | adapter->stats.rnbc += rnbc; |
3840 | netdev->stats.rx_fifo_errors += rnbc; | 3869 | net_stats->rx_fifo_errors += rnbc; |
3841 | adapter->stats.ruc += rd32(E1000_RUC); | 3870 | adapter->stats.ruc += rd32(E1000_RUC); |
3842 | adapter->stats.rfc += rd32(E1000_RFC); | 3871 | adapter->stats.rfc += rd32(E1000_RFC); |
3843 | adapter->stats.rjc += rd32(E1000_RJC); | 3872 | adapter->stats.rjc += rd32(E1000_RJC); |
@@ -3878,29 +3907,29 @@ void igb_update_stats(struct igb_adapter *adapter) | |||
3878 | adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); | 3907 | adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); |
3879 | 3908 | ||
3880 | /* Fill out the OS statistics structure */ | 3909 | /* Fill out the OS statistics structure */ |
3881 | netdev->stats.multicast = adapter->stats.mprc; | 3910 | net_stats->multicast = adapter->stats.mprc; |
3882 | netdev->stats.collisions = adapter->stats.colc; | 3911 | net_stats->collisions = adapter->stats.colc; |
3883 | 3912 | ||
3884 | /* Rx Errors */ | 3913 | /* Rx Errors */ |
3885 | 3914 | ||
3886 | /* RLEC on some newer hardware can be incorrect so build | 3915 | /* RLEC on some newer hardware can be incorrect so build |
3887 | * our own version based on RUC and ROC */ | 3916 | * our own version based on RUC and ROC */ |
3888 | netdev->stats.rx_errors = adapter->stats.rxerrc + | 3917 | net_stats->rx_errors = adapter->stats.rxerrc + |
3889 | adapter->stats.crcerrs + adapter->stats.algnerrc + | 3918 | adapter->stats.crcerrs + adapter->stats.algnerrc + |
3890 | adapter->stats.ruc + adapter->stats.roc + | 3919 | adapter->stats.ruc + adapter->stats.roc + |
3891 | adapter->stats.cexterr; | 3920 | adapter->stats.cexterr; |
3892 | netdev->stats.rx_length_errors = adapter->stats.ruc + | 3921 | net_stats->rx_length_errors = adapter->stats.ruc + |
3893 | adapter->stats.roc; | 3922 | adapter->stats.roc; |
3894 | netdev->stats.rx_crc_errors = adapter->stats.crcerrs; | 3923 | net_stats->rx_crc_errors = adapter->stats.crcerrs; |
3895 | netdev->stats.rx_frame_errors = adapter->stats.algnerrc; | 3924 | net_stats->rx_frame_errors = adapter->stats.algnerrc; |
3896 | netdev->stats.rx_missed_errors = adapter->stats.mpc; | 3925 | net_stats->rx_missed_errors = adapter->stats.mpc; |
3897 | 3926 | ||
3898 | /* Tx Errors */ | 3927 | /* Tx Errors */ |
3899 | netdev->stats.tx_errors = adapter->stats.ecol + | 3928 | net_stats->tx_errors = adapter->stats.ecol + |
3900 | adapter->stats.latecol; | 3929 | adapter->stats.latecol; |
3901 | netdev->stats.tx_aborted_errors = adapter->stats.ecol; | 3930 | net_stats->tx_aborted_errors = adapter->stats.ecol; |
3902 | netdev->stats.tx_window_errors = adapter->stats.latecol; | 3931 | net_stats->tx_window_errors = adapter->stats.latecol; |
3903 | netdev->stats.tx_carrier_errors = adapter->stats.tncrs; | 3932 | net_stats->tx_carrier_errors = adapter->stats.tncrs; |
3904 | 3933 | ||
3905 | /* Tx Dropped needs to be maintained elsewhere */ | 3934 | /* Tx Dropped needs to be maintained elsewhere */ |
3906 | 3935 | ||
@@ -4923,6 +4952,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, | |||
4923 | struct sk_buff *skb; | 4952 | struct sk_buff *skb; |
4924 | bool cleaned = false; | 4953 | bool cleaned = false; |
4925 | int cleaned_count = 0; | 4954 | int cleaned_count = 0; |
4955 | int current_node = numa_node_id(); | ||
4926 | unsigned int total_bytes = 0, total_packets = 0; | 4956 | unsigned int total_bytes = 0, total_packets = 0; |
4927 | unsigned int i; | 4957 | unsigned int i; |
4928 | u32 staterr; | 4958 | u32 staterr; |
@@ -4977,7 +5007,8 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, | |||
4977 | buffer_info->page_offset, | 5007 | buffer_info->page_offset, |
4978 | length); | 5008 | length); |
4979 | 5009 | ||
4980 | if (page_count(buffer_info->page) != 1) | 5010 | if ((page_count(buffer_info->page) != 1) || |
5011 | (page_to_nid(buffer_info->page) != current_node)) | ||
4981 | buffer_info->page = NULL; | 5012 | buffer_info->page = NULL; |
4982 | else | 5013 | else |
4983 | get_page(buffer_info->page); | 5014 | get_page(buffer_info->page); |