diff options
123 files changed, 996 insertions, 543 deletions
diff --git a/Documentation/hwmon/max16064 b/Documentation/hwmon/max16064 new file mode 100644 index 000000000000..41728999e142 --- /dev/null +++ b/Documentation/hwmon/max16064 | |||
| @@ -0,0 +1,62 @@ | |||
| 1 | Kernel driver max16064 | ||
| 2 | ====================== | ||
| 3 | |||
| 4 | Supported chips: | ||
| 5 | * Maxim MAX16064 | ||
| 6 | Prefix: 'max16064' | ||
| 7 | Addresses scanned: - | ||
| 8 | Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf | ||
| 9 | |||
| 10 | Author: Guenter Roeck <guenter.roeck@ericsson.com> | ||
| 11 | |||
| 12 | |||
| 13 | Description | ||
| 14 | ----------- | ||
| 15 | |||
| 16 | This driver supports hardware montoring for Maxim MAX16064 Quad Power-Supply | ||
| 17 | Controller with Active-Voltage Output Control and PMBus Interface. | ||
| 18 | |||
| 19 | The driver is a client driver to the core PMBus driver. | ||
| 20 | Please see Documentation/hwmon/pmbus for details on PMBus client drivers. | ||
| 21 | |||
| 22 | |||
| 23 | Usage Notes | ||
| 24 | ----------- | ||
| 25 | |||
| 26 | This driver does not auto-detect devices. You will have to instantiate the | ||
| 27 | devices explicitly. Please see Documentation/i2c/instantiating-devices for | ||
| 28 | details. | ||
| 29 | |||
| 30 | |||
| 31 | Platform data support | ||
| 32 | --------------------- | ||
| 33 | |||
| 34 | The driver supports standard PMBus driver platform data. | ||
| 35 | |||
| 36 | |||
| 37 | Sysfs entries | ||
| 38 | ------------- | ||
| 39 | |||
| 40 | The following attributes are supported. Limits are read-write; all other | ||
| 41 | attributes are read-only. | ||
| 42 | |||
| 43 | in[1-4]_label "vout[1-4]" | ||
| 44 | in[1-4]_input Measured voltage. From READ_VOUT register. | ||
| 45 | in[1-4]_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register. | ||
| 46 | in[1-4]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register. | ||
| 47 | in[1-4]_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register. | ||
| 48 | in[1-4]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register. | ||
| 49 | in[1-4]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status. | ||
| 50 | in[1-4]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status. | ||
| 51 | in[1-4]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status. | ||
| 52 | in[1-4]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status. | ||
| 53 | |||
| 54 | temp1_input Measured temperature. From READ_TEMPERATURE_1 register. | ||
| 55 | temp1_max Maximum temperature. From OT_WARN_LIMIT register. | ||
| 56 | temp1_crit Critical high temperature. From OT_FAULT_LIMIT register. | ||
| 57 | temp1_max_alarm Chip temperature high alarm. Set by comparing | ||
| 58 | READ_TEMPERATURE_1 with OT_WARN_LIMIT if TEMP_OT_WARNING | ||
| 59 | status is set. | ||
| 60 | temp1_crit_alarm Chip temperature critical high alarm. Set by comparing | ||
| 61 | READ_TEMPERATURE_1 with OT_FAULT_LIMIT if TEMP_OT_FAULT | ||
| 62 | status is set. | ||
diff --git a/Documentation/hwmon/max34440 b/Documentation/hwmon/max34440 new file mode 100644 index 000000000000..6c525dd07d59 --- /dev/null +++ b/Documentation/hwmon/max34440 | |||
| @@ -0,0 +1,79 @@ | |||
| 1 | Kernel driver max34440 | ||
| 2 | ====================== | ||
| 3 | |||
| 4 | Supported chips: | ||
| 5 | * Maxim MAX34440 | ||
| 6 | Prefixes: 'max34440' | ||
| 7 | Addresses scanned: - | ||
| 8 | Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf | ||
| 9 | * Maxim MAX34441 | ||
| 10 | PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller | ||
| 11 | Prefixes: 'max34441' | ||
| 12 | Addresses scanned: - | ||
| 13 | Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf | ||
| 14 | |||
| 15 | Author: Guenter Roeck <guenter.roeck@ericsson.com> | ||
| 16 | |||
| 17 | |||
| 18 | Description | ||
| 19 | ----------- | ||
| 20 | |||
| 21 | This driver supports hardware montoring for Maxim MAX34440 PMBus 6-Channel | ||
| 22 | Power-Supply Manager and MAX34441 PMBus 5-Channel Power-Supply Manager | ||
| 23 | and Intelligent Fan Controller. | ||
| 24 | |||
| 25 | The driver is a client driver to the core PMBus driver. Please see | ||
| 26 | Documentation/hwmon/pmbus for details on PMBus client drivers. | ||
| 27 | |||
| 28 | |||
| 29 | Usage Notes | ||
| 30 | ----------- | ||
| 31 | |||
| 32 | This driver does not auto-detect devices. You will have to instantiate the | ||
| 33 | devices explicitly. Please see Documentation/i2c/instantiating-devices for | ||
| 34 | details. | ||
| 35 | |||
| 36 | |||
| 37 | Platform data support | ||
| 38 | --------------------- | ||
| 39 | |||
| 40 | The driver supports standard PMBus driver platform data. | ||
| 41 | |||
| 42 | |||
| 43 | Sysfs entries | ||
| 44 | ------------- | ||
| 45 | |||
| 46 | The following attributes are supported. Limits are read-write; all other | ||
| 47 | attributes are read-only. | ||
| 48 | |||
| 49 | in[1-6]_label "vout[1-6]". | ||
| 50 | in[1-6]_input Measured voltage. From READ_VOUT register. | ||
| 51 | in[1-6]_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register. | ||
| 52 | in[1-6]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register. | ||
| 53 | in[1-6]_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register. | ||
| 54 | in[1-6]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register. | ||
| 55 | in[1-6]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status. | ||
| 56 | in[1-6]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status. | ||
| 57 | in[1-6]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status. | ||
| 58 | in[1-6]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status. | ||
| 59 | |||
| 60 | curr[1-6]_label "iout[1-6]". | ||
| 61 | curr[1-6]_input Measured current. From READ_IOUT register. | ||
| 62 | curr[1-6]_max Maximum current. From IOUT_OC_WARN_LIMIT register. | ||
| 63 | curr[1-6]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register. | ||
| 64 | curr[1-6]_max_alarm Current high alarm. From IOUT_OC_WARNING status. | ||
| 65 | curr[1-6]_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status. | ||
| 66 | |||
| 67 | in6 and curr6 attributes only exist for MAX34440. | ||
| 68 | |||
| 69 | temp[1-8]_input Measured temperatures. From READ_TEMPERATURE_1 register. | ||
| 70 | temp1 is the chip's internal temperature. temp2..temp5 | ||
| 71 | are remote I2C temperature sensors. For MAX34441, temp6 | ||
| 72 | is a remote thermal-diode sensor. For MAX34440, temp6..8 | ||
| 73 | are remote I2C temperature sensors. | ||
| 74 | temp[1-8]_max Maximum temperature. From OT_WARN_LIMIT register. | ||
| 75 | temp[1-8]_crit Critical high temperature. From OT_FAULT_LIMIT register. | ||
| 76 | temp[1-8]_max_alarm Temperature high alarm. | ||
| 77 | temp[1-8]_crit_alarm Temperature critical high alarm. | ||
| 78 | |||
| 79 | temp7 and temp8 attributes only exist for MAX34440. | ||
diff --git a/Documentation/hwmon/max8688 b/Documentation/hwmon/max8688 new file mode 100644 index 000000000000..0ddd3a412030 --- /dev/null +++ b/Documentation/hwmon/max8688 | |||
| @@ -0,0 +1,69 @@ | |||
| 1 | Kernel driver max8688 | ||
| 2 | ===================== | ||
| 3 | |||
| 4 | Supported chips: | ||
| 5 | * Maxim MAX8688 | ||
| 6 | Prefix: 'max8688' | ||
| 7 | Addresses scanned: - | ||
| 8 | Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf | ||
| 9 | |||
| 10 | Author: Guenter Roeck <guenter.roeck@ericsson.com> | ||
| 11 | |||
| 12 | |||
| 13 | Description | ||
| 14 | ----------- | ||
| 15 | |||
| 16 | This driver supports hardware montoring for Maxim MAX8688 Digital Power-Supply | ||
| 17 | Controller/Monitor with PMBus Interface. | ||
| 18 | |||
| 19 | The driver is a client driver to the core PMBus driver. Please see | ||
| 20 | Documentation/hwmon/pmbus for details on PMBus client drivers. | ||
| 21 | |||
| 22 | |||
| 23 | Usage Notes | ||
| 24 | ----------- | ||
| 25 | |||
| 26 | This driver does not auto-detect devices. You will have to instantiate the | ||
| 27 | devices explicitly. Please see Documentation/i2c/instantiating-devices for | ||
| 28 | details. | ||
| 29 | |||
| 30 | |||
| 31 | Platform data support | ||
| 32 | --------------------- | ||
| 33 | |||
| 34 | The driver supports standard PMBus driver platform data. | ||
| 35 | |||
| 36 | |||
| 37 | Sysfs entries | ||
| 38 | ------------- | ||
| 39 | |||
| 40 | The following attributes are supported. Limits are read-write; all other | ||
| 41 | attributes are read-only. | ||
| 42 | |||
| 43 | in1_label "vout1" | ||
| 44 | in1_input Measured voltage. From READ_VOUT register. | ||
| 45 | in1_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register. | ||
| 46 | in1_max Maximum voltage. From VOUT_OV_WARN_LIMIT register. | ||
| 47 | in1_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register. | ||
| 48 | in1_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register. | ||
| 49 | in1_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status. | ||
| 50 | in1_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status. | ||
| 51 | in1_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status. | ||
| 52 | in1_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status. | ||
| 53 | |||
| 54 | curr1_label "iout1" | ||
| 55 | curr1_input Measured current. From READ_IOUT register. | ||
| 56 | curr1_max Maximum current. From IOUT_OC_WARN_LIMIT register. | ||
| 57 | curr1_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register. | ||
| 58 | curr1_max_alarm Current high alarm. From IOUT_OC_WARN_LIMIT register. | ||
| 59 | curr1_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status. | ||
| 60 | |||
| 61 | temp1_input Measured temperature. From READ_TEMPERATURE_1 register. | ||
| 62 | temp1_max Maximum temperature. From OT_WARN_LIMIT register. | ||
| 63 | temp1_crit Critical high temperature. From OT_FAULT_LIMIT register. | ||
| 64 | temp1_max_alarm Chip temperature high alarm. Set by comparing | ||
| 65 | READ_TEMPERATURE_1 with OT_WARN_LIMIT if TEMP_OT_WARNING | ||
| 66 | status is set. | ||
| 67 | temp1_crit_alarm Chip temperature critical high alarm. Set by comparing | ||
| 68 | READ_TEMPERATURE_1 with OT_FAULT_LIMIT if TEMP_OT_FAULT | ||
| 69 | status is set. | ||
diff --git a/Documentation/hwmon/pmbus b/Documentation/hwmon/pmbus index dc4933e96344..5e462fc7f99b 100644 --- a/Documentation/hwmon/pmbus +++ b/Documentation/hwmon/pmbus | |||
| @@ -13,26 +13,6 @@ Supported chips: | |||
| 13 | Prefix: 'ltc2978' | 13 | Prefix: 'ltc2978' |
| 14 | Addresses scanned: - | 14 | Addresses scanned: - |
| 15 | Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf | 15 | Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf |
| 16 | * Maxim MAX16064 | ||
| 17 | Quad Power-Supply Controller | ||
| 18 | Prefix: 'max16064' | ||
| 19 | Addresses scanned: - | ||
| 20 | Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf | ||
| 21 | * Maxim MAX34440 | ||
| 22 | PMBus 6-Channel Power-Supply Manager | ||
| 23 | Prefixes: 'max34440' | ||
| 24 | Addresses scanned: - | ||
| 25 | Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf | ||
| 26 | * Maxim MAX34441 | ||
| 27 | PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller | ||
| 28 | Prefixes: 'max34441' | ||
| 29 | Addresses scanned: - | ||
| 30 | Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf | ||
| 31 | * Maxim MAX8688 | ||
| 32 | Digital Power-Supply Controller/Monitor | ||
| 33 | Prefix: 'max8688' | ||
| 34 | Addresses scanned: - | ||
| 35 | Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf | ||
| 36 | * Generic PMBus devices | 16 | * Generic PMBus devices |
| 37 | Prefix: 'pmbus' | 17 | Prefix: 'pmbus' |
| 38 | Addresses scanned: - | 18 | Addresses scanned: - |
| @@ -175,11 +155,13 @@ currX_crit Critical maximum current. | |||
| 175 | From IIN_OC_FAULT_LIMIT or IOUT_OC_FAULT_LIMIT register. | 155 | From IIN_OC_FAULT_LIMIT or IOUT_OC_FAULT_LIMIT register. |
| 176 | currX_alarm Current high alarm. | 156 | currX_alarm Current high alarm. |
| 177 | From IIN_OC_WARNING or IOUT_OC_WARNING status. | 157 | From IIN_OC_WARNING or IOUT_OC_WARNING status. |
| 158 | currX_max_alarm Current high alarm. | ||
| 159 | From IIN_OC_WARN_LIMIT or IOUT_OC_WARN_LIMIT status. | ||
| 178 | currX_lcrit_alarm Output current critical low alarm. | 160 | currX_lcrit_alarm Output current critical low alarm. |
| 179 | From IOUT_UC_FAULT status. | 161 | From IOUT_UC_FAULT status. |
| 180 | currX_crit_alarm Current critical high alarm. | 162 | currX_crit_alarm Current critical high alarm. |
| 181 | From IIN_OC_FAULT or IOUT_OC_FAULT status. | 163 | From IIN_OC_FAULT or IOUT_OC_FAULT status. |
| 182 | currX_label "iin" or "vinY" | 164 | currX_label "iin" or "ioutY" |
| 183 | 165 | ||
| 184 | powerX_input Measured power. From READ_PIN or READ_POUT register. | 166 | powerX_input Measured power. From READ_PIN or READ_POUT register. |
| 185 | powerX_cap Output power cap. From POUT_MAX register. | 167 | powerX_cap Output power cap. From POUT_MAX register. |
| @@ -193,13 +175,13 @@ powerX_crit_alarm Output power critical high alarm. | |||
| 193 | From POUT_OP_FAULT status. | 175 | From POUT_OP_FAULT status. |
| 194 | powerX_label "pin" or "poutY" | 176 | powerX_label "pin" or "poutY" |
| 195 | 177 | ||
| 196 | tempX_input Measured tempererature. | 178 | tempX_input Measured temperature. |
| 197 | From READ_TEMPERATURE_X register. | 179 | From READ_TEMPERATURE_X register. |
| 198 | tempX_min Mimimum tempererature. From UT_WARN_LIMIT register. | 180 | tempX_min Mimimum temperature. From UT_WARN_LIMIT register. |
| 199 | tempX_max Maximum tempererature. From OT_WARN_LIMIT register. | 181 | tempX_max Maximum temperature. From OT_WARN_LIMIT register. |
| 200 | tempX_lcrit Critical low tempererature. | 182 | tempX_lcrit Critical low temperature. |
| 201 | From UT_FAULT_LIMIT register. | 183 | From UT_FAULT_LIMIT register. |
| 202 | tempX_crit Critical high tempererature. | 184 | tempX_crit Critical high temperature. |
| 203 | From OT_FAULT_LIMIT register. | 185 | From OT_FAULT_LIMIT register. |
| 204 | tempX_min_alarm Chip temperature low alarm. Set by comparing | 186 | tempX_min_alarm Chip temperature low alarm. Set by comparing |
| 205 | READ_TEMPERATURE_X with UT_WARN_LIMIT if | 187 | READ_TEMPERATURE_X with UT_WARN_LIMIT if |
diff --git a/Documentation/hwmon/smm665 b/Documentation/hwmon/smm665 index 3820fc9ca52d..59e316140542 100644 --- a/Documentation/hwmon/smm665 +++ b/Documentation/hwmon/smm665 | |||
| @@ -150,8 +150,8 @@ in8_crit_alarm Channel F critical alarm | |||
| 150 | in9_crit_alarm AIN1 critical alarm | 150 | in9_crit_alarm AIN1 critical alarm |
| 151 | in10_crit_alarm AIN2 critical alarm | 151 | in10_crit_alarm AIN2 critical alarm |
| 152 | 152 | ||
| 153 | temp1_input Chip tempererature | 153 | temp1_input Chip temperature |
| 154 | temp1_min Mimimum chip tempererature | 154 | temp1_min Mimimum chip temperature |
| 155 | temp1_max Maximum chip tempererature | 155 | temp1_max Maximum chip temperature |
| 156 | temp1_crit Critical chip tempererature | 156 | temp1_crit Critical chip temperature |
| 157 | temp1_crit_alarm Temperature critical alarm | 157 | temp1_crit_alarm Temperature critical alarm |
diff --git a/Documentation/hwmon/submitting-patches b/Documentation/hwmon/submitting-patches new file mode 100644 index 000000000000..86f42e8e9e49 --- /dev/null +++ b/Documentation/hwmon/submitting-patches | |||
| @@ -0,0 +1,109 @@ | |||
| 1 | How to Get Your Patch Accepted Into the Hwmon Subsystem | ||
| 2 | ------------------------------------------------------- | ||
| 3 | |||
| 4 | This text is is a collection of suggestions for people writing patches or | ||
| 5 | drivers for the hwmon subsystem. Following these suggestions will greatly | ||
| 6 | increase the chances of your change being accepted. | ||
| 7 | |||
| 8 | |||
| 9 | 1. General | ||
| 10 | ---------- | ||
| 11 | |||
| 12 | * It should be unnecessary to mention, but please read and follow | ||
| 13 | Documentation/SubmitChecklist | ||
| 14 | Documentation/SubmittingDrivers | ||
| 15 | Documentation/SubmittingPatches | ||
| 16 | Documentation/CodingStyle | ||
| 17 | |||
| 18 | * If your patch generates checkpatch warnings, please refrain from explanations | ||
| 19 | such as "I don't like that coding style". Keep in mind that each unnecessary | ||
| 20 | warning helps hiding a real problem. If you don't like the kernel coding | ||
| 21 | style, don't write kernel drivers. | ||
| 22 | |||
| 23 | * Please test your patch thoroughly. We are not your test group. | ||
| 24 | Sometimes a patch can not or not completely be tested because of missing | ||
| 25 | hardware. In such cases, you should test-build the code on at least one | ||
| 26 | architecture. If run-time testing was not achieved, it should be written | ||
| 27 | explicitly below the patch header. | ||
| 28 | |||
| 29 | * If your patch (or the driver) is affected by configuration options such as | ||
| 30 | CONFIG_SMP or CONFIG_HOTPLUG, make sure it compiles for all configuration | ||
| 31 | variants. | ||
| 32 | |||
| 33 | |||
| 34 | 2. Adding functionality to existing drivers | ||
| 35 | ------------------------------------------- | ||
| 36 | |||
| 37 | * Make sure the documentation in Documentation/hwmon/<driver_name> is up to | ||
| 38 | date. | ||
| 39 | |||
| 40 | * Make sure the information in Kconfig is up to date. | ||
| 41 | |||
| 42 | * If the added functionality requires some cleanup or structural changes, split | ||
| 43 | your patch into a cleanup part and the actual addition. This makes it easier | ||
| 44 | to review your changes, and to bisect any resulting problems. | ||
| 45 | |||
| 46 | * Never mix bug fixes, cleanup, and functional enhancements in a single patch. | ||
| 47 | |||
| 48 | |||
| 49 | 3. New drivers | ||
| 50 | -------------- | ||
| 51 | |||
| 52 | * Running your patch or driver file(s) through checkpatch does not mean its | ||
| 53 | formatting is clean. If unsure about formatting in your new driver, run it | ||
| 54 | through Lindent. Lindent is not perfect, and you may have to do some minor | ||
| 55 | cleanup, but it is a good start. | ||
| 56 | |||
| 57 | * Consider adding yourself to MAINTAINERS. | ||
| 58 | |||
| 59 | * Document the driver in Documentation/hwmon/<driver_name>. | ||
| 60 | |||
| 61 | * Add the driver to Kconfig and Makefile in alphabetical order. | ||
| 62 | |||
| 63 | * Make sure that all dependencies are listed in Kconfig. For new drivers, it | ||
| 64 | is most likely prudent to add a dependency on EXPERIMENTAL. | ||
| 65 | |||
| 66 | * Avoid forward declarations if you can. Rearrange the code if necessary. | ||
| 67 | |||
| 68 | * Avoid calculations in macros and macro-generated functions. While such macros | ||
| 69 | may save a line or so in the source, it obfuscates the code and makes code | ||
| 70 | review more difficult. It may also result in code which is more complicated | ||
| 71 | than necessary. Use inline functions or just regular functions instead. | ||
| 72 | |||
| 73 | * If the driver has a detect function, make sure it is silent. Debug messages | ||
| 74 | and messages printed after a successful detection are acceptable, but it | ||
| 75 | must not print messages such as "Chip XXX not found/supported". | ||
| 76 | |||
| 77 | Keep in mind that the detect function will run for all drivers supporting an | ||
| 78 | address if a chip is detected on that address. Unnecessary messages will just | ||
| 79 | pollute the kernel log and not provide any value. | ||
| 80 | |||
| 81 | * Provide a detect function if and only if a chip can be detected reliably. | ||
| 82 | |||
| 83 | * Avoid writing to chip registers in the detect function. If you have to write, | ||
| 84 | only do it after you have already gathered enough data to be certain that the | ||
| 85 | detection is going to be successful. | ||
| 86 | |||
| 87 | Keep in mind that the chip might not be what your driver believes it is, and | ||
| 88 | writing to it might cause a bad misconfiguration. | ||
| 89 | |||
| 90 | * Make sure there are no race conditions in the probe function. Specifically, | ||
| 91 | completely initialize your chip first, then create sysfs entries and register | ||
| 92 | with the hwmon subsystem. | ||
| 93 | |||
| 94 | * Do not provide support for deprecated sysfs attributes. | ||
| 95 | |||
| 96 | * Do not create non-standard attributes unless really needed. If you have to use | ||
| 97 | non-standard attributes, or you believe you do, discuss it on the mailing list | ||
| 98 | first. Either case, provide a detailed explanation why you need the | ||
| 99 | non-standard attribute(s). | ||
| 100 | Standard attributes are specified in Documentation/hwmon/sysfs-interface. | ||
| 101 | |||
| 102 | * When deciding which sysfs attributes to support, look at the chip's | ||
| 103 | capabilities. While we do not expect your driver to support everything the | ||
| 104 | chip may offer, it should at least support all limits and alarms. | ||
| 105 | |||
| 106 | * Last but not least, please check if a driver for your chip already exists | ||
| 107 | before starting to write a new driver. Especially for temperature sensors, | ||
| 108 | new chips are often variants of previously released chips. In some cases, | ||
| 109 | a presumably new chip may simply have been relabeled. | ||
diff --git a/Documentation/md.txt b/Documentation/md.txt index a81c7b4790f2..2366b1c8cf19 100644 --- a/Documentation/md.txt +++ b/Documentation/md.txt | |||
| @@ -552,6 +552,16 @@ also have | |||
| 552 | within the array where IO will be blocked. This is currently | 552 | within the array where IO will be blocked. This is currently |
| 553 | only supported for raid4/5/6. | 553 | only supported for raid4/5/6. |
| 554 | 554 | ||
| 555 | sync_min | ||
| 556 | sync_max | ||
| 557 | The two values, given as numbers of sectors, indicate a range | ||
| 558 | withing the array where 'check'/'repair' will operate. Must be | ||
| 559 | a multiple of chunk_size. When it reaches "sync_max" it will | ||
| 560 | pause, rather than complete. | ||
| 561 | You can use 'select' or 'poll' on "sync_completed" to wait for | ||
| 562 | that number to reach sync_max. Then you can either increase | ||
| 563 | "sync_max", or can write 'idle' to "sync_action". | ||
| 564 | |||
| 555 | 565 | ||
| 556 | Each active md device may also have attributes specific to the | 566 | Each active md device may also have attributes specific to the |
| 557 | personality module that manages it. | 567 | personality module that manages it. |
diff --git a/MAINTAINERS b/MAINTAINERS index ec3600306289..1e2724e55cf0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -151,6 +151,7 @@ S: Maintained | |||
| 151 | F: drivers/net/hamradio/6pack.c | 151 | F: drivers/net/hamradio/6pack.c |
| 152 | 152 | ||
| 153 | 8169 10/100/1000 GIGABIT ETHERNET DRIVER | 153 | 8169 10/100/1000 GIGABIT ETHERNET DRIVER |
| 154 | M: Realtek linux nic maintainers <nic_swsd@realtek.com> | ||
| 154 | M: Francois Romieu <romieu@fr.zoreil.com> | 155 | M: Francois Romieu <romieu@fr.zoreil.com> |
| 155 | L: netdev@vger.kernel.org | 156 | L: netdev@vger.kernel.org |
| 156 | S: Maintained | 157 | S: Maintained |
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index ed5bc9e05a4e..cd4458f64171 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #define __ASM_ARM_CPUTYPE_H | 2 | #define __ASM_ARM_CPUTYPE_H |
| 3 | 3 | ||
| 4 | #include <linux/stringify.h> | 4 | #include <linux/stringify.h> |
| 5 | #include <linux/kernel.h> | ||
| 5 | 6 | ||
| 6 | #define CPUID_ID 0 | 7 | #define CPUID_ID 0 |
| 7 | #define CPUID_CACHETYPE 1 | 8 | #define CPUID_CACHETYPE 1 |
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index c891eb76c0e3..87dbe3e21970 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h | |||
| @@ -396,6 +396,10 @@ | |||
| 396 | #define __NR_fanotify_init (__NR_SYSCALL_BASE+367) | 396 | #define __NR_fanotify_init (__NR_SYSCALL_BASE+367) |
| 397 | #define __NR_fanotify_mark (__NR_SYSCALL_BASE+368) | 397 | #define __NR_fanotify_mark (__NR_SYSCALL_BASE+368) |
| 398 | #define __NR_prlimit64 (__NR_SYSCALL_BASE+369) | 398 | #define __NR_prlimit64 (__NR_SYSCALL_BASE+369) |
| 399 | #define __NR_name_to_handle_at (__NR_SYSCALL_BASE+370) | ||
| 400 | #define __NR_open_by_handle_at (__NR_SYSCALL_BASE+371) | ||
| 401 | #define __NR_clock_adjtime (__NR_SYSCALL_BASE+372) | ||
| 402 | #define __NR_syncfs (__NR_SYSCALL_BASE+373) | ||
| 399 | 403 | ||
| 400 | /* | 404 | /* |
| 401 | * The following SWIs are ARM private. | 405 | * The following SWIs are ARM private. |
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 5c26eccef998..7fbf28c35bb2 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S | |||
| @@ -379,6 +379,10 @@ | |||
| 379 | CALL(sys_fanotify_init) | 379 | CALL(sys_fanotify_init) |
| 380 | CALL(sys_fanotify_mark) | 380 | CALL(sys_fanotify_mark) |
| 381 | CALL(sys_prlimit64) | 381 | CALL(sys_prlimit64) |
| 382 | /* 370 */ CALL(sys_name_to_handle_at) | ||
| 383 | CALL(sys_open_by_handle_at) | ||
| 384 | CALL(sys_clock_adjtime) | ||
| 385 | CALL(sys_syncfs) | ||
| 382 | #ifndef syscalls_counted | 386 | #ifndef syscalls_counted |
| 383 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls | 387 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls |
| 384 | #define syscalls_counted | 388 | #define syscalls_counted |
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h index 3d4dab43c994..a50fc9f493b3 100644 --- a/arch/x86/include/asm/numa.h +++ b/arch/x86/include/asm/numa.h | |||
| @@ -51,7 +51,7 @@ static inline void numa_remove_cpu(int cpu) { } | |||
| 51 | #endif /* CONFIG_NUMA */ | 51 | #endif /* CONFIG_NUMA */ |
| 52 | 52 | ||
| 53 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | 53 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
| 54 | struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable); | 54 | void debug_cpumask_set_cpu(int cpu, int node, bool enable); |
| 55 | #endif | 55 | #endif |
| 56 | 56 | ||
| 57 | #endif /* _ASM_X86_NUMA_H */ | 57 | #endif /* _ASM_X86_NUMA_H */ |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 8ed8908cc9f7..c2871d3c71b6 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -312,26 +312,6 @@ void __cpuinit smp_store_cpu_info(int id) | |||
| 312 | identify_secondary_cpu(c); | 312 | identify_secondary_cpu(c); |
| 313 | } | 313 | } |
| 314 | 314 | ||
| 315 | static void __cpuinit check_cpu_siblings_on_same_node(int cpu1, int cpu2) | ||
| 316 | { | ||
| 317 | int node1 = early_cpu_to_node(cpu1); | ||
| 318 | int node2 = early_cpu_to_node(cpu2); | ||
| 319 | |||
| 320 | /* | ||
| 321 | * Our CPU scheduler assumes all logical cpus in the same physical cpu | ||
| 322 | * share the same node. But, buggy ACPI or NUMA emulation might assign | ||
| 323 | * them to different node. Fix it. | ||
| 324 | */ | ||
| 325 | if (node1 != node2) { | ||
| 326 | pr_warning("CPU %d in node %d and CPU %d in node %d are in the same physical CPU. forcing same node %d\n", | ||
| 327 | cpu1, node1, cpu2, node2, node2); | ||
| 328 | |||
| 329 | numa_remove_cpu(cpu1); | ||
| 330 | numa_set_node(cpu1, node2); | ||
| 331 | numa_add_cpu(cpu1); | ||
| 332 | } | ||
| 333 | } | ||
| 334 | |||
| 335 | static void __cpuinit link_thread_siblings(int cpu1, int cpu2) | 315 | static void __cpuinit link_thread_siblings(int cpu1, int cpu2) |
| 336 | { | 316 | { |
| 337 | cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); | 317 | cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); |
| @@ -340,7 +320,6 @@ static void __cpuinit link_thread_siblings(int cpu1, int cpu2) | |||
| 340 | cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); | 320 | cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); |
| 341 | cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); | 321 | cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); |
| 342 | cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); | 322 | cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); |
| 343 | check_cpu_siblings_on_same_node(cpu1, cpu2); | ||
| 344 | } | 323 | } |
| 345 | 324 | ||
| 346 | 325 | ||
| @@ -382,12 +361,10 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
| 382 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | 361 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { |
| 383 | cpumask_set_cpu(i, cpu_llc_shared_mask(cpu)); | 362 | cpumask_set_cpu(i, cpu_llc_shared_mask(cpu)); |
| 384 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); | 363 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); |
| 385 | check_cpu_siblings_on_same_node(cpu, i); | ||
| 386 | } | 364 | } |
| 387 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { | 365 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { |
| 388 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | 366 | cpumask_set_cpu(i, cpu_core_mask(cpu)); |
| 389 | cpumask_set_cpu(cpu, cpu_core_mask(i)); | 367 | cpumask_set_cpu(cpu, cpu_core_mask(i)); |
| 390 | check_cpu_siblings_on_same_node(cpu, i); | ||
| 391 | /* | 368 | /* |
| 392 | * Does this new cpu bringup a new core? | 369 | * Does this new cpu bringup a new core? |
| 393 | */ | 370 | */ |
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 9559d360fde7..745258dfc4dc 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c | |||
| @@ -213,53 +213,48 @@ int early_cpu_to_node(int cpu) | |||
| 213 | return per_cpu(x86_cpu_to_node_map, cpu); | 213 | return per_cpu(x86_cpu_to_node_map, cpu); |
| 214 | } | 214 | } |
| 215 | 215 | ||
| 216 | struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable) | 216 | void debug_cpumask_set_cpu(int cpu, int node, bool enable) |
| 217 | { | 217 | { |
| 218 | int node = early_cpu_to_node(cpu); | ||
| 219 | struct cpumask *mask; | 218 | struct cpumask *mask; |
| 220 | char buf[64]; | 219 | char buf[64]; |
| 221 | 220 | ||
| 222 | if (node == NUMA_NO_NODE) { | 221 | if (node == NUMA_NO_NODE) { |
| 223 | /* early_cpu_to_node() already emits a warning and trace */ | 222 | /* early_cpu_to_node() already emits a warning and trace */ |
| 224 | return NULL; | 223 | return; |
| 225 | } | 224 | } |
| 226 | mask = node_to_cpumask_map[node]; | 225 | mask = node_to_cpumask_map[node]; |
| 227 | if (!mask) { | 226 | if (!mask) { |
| 228 | pr_err("node_to_cpumask_map[%i] NULL\n", node); | 227 | pr_err("node_to_cpumask_map[%i] NULL\n", node); |
| 229 | dump_stack(); | 228 | dump_stack(); |
| 230 | return NULL; | 229 | return; |
| 231 | } | 230 | } |
| 232 | 231 | ||
| 232 | if (enable) | ||
| 233 | cpumask_set_cpu(cpu, mask); | ||
| 234 | else | ||
| 235 | cpumask_clear_cpu(cpu, mask); | ||
| 236 | |||
| 233 | cpulist_scnprintf(buf, sizeof(buf), mask); | 237 | cpulist_scnprintf(buf, sizeof(buf), mask); |
| 234 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", | 238 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", |
| 235 | enable ? "numa_add_cpu" : "numa_remove_cpu", | 239 | enable ? "numa_add_cpu" : "numa_remove_cpu", |
| 236 | cpu, node, buf); | 240 | cpu, node, buf); |
| 237 | return mask; | 241 | return; |
| 238 | } | 242 | } |
| 239 | 243 | ||
| 240 | # ifndef CONFIG_NUMA_EMU | 244 | # ifndef CONFIG_NUMA_EMU |
| 241 | static void __cpuinit numa_set_cpumask(int cpu, int enable) | 245 | static void __cpuinit numa_set_cpumask(int cpu, bool enable) |
| 242 | { | 246 | { |
| 243 | struct cpumask *mask; | 247 | debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); |
| 244 | |||
| 245 | mask = debug_cpumask_set_cpu(cpu, enable); | ||
| 246 | if (!mask) | ||
| 247 | return; | ||
| 248 | |||
| 249 | if (enable) | ||
| 250 | cpumask_set_cpu(cpu, mask); | ||
| 251 | else | ||
| 252 | cpumask_clear_cpu(cpu, mask); | ||
| 253 | } | 248 | } |
| 254 | 249 | ||
| 255 | void __cpuinit numa_add_cpu(int cpu) | 250 | void __cpuinit numa_add_cpu(int cpu) |
| 256 | { | 251 | { |
| 257 | numa_set_cpumask(cpu, 1); | 252 | numa_set_cpumask(cpu, true); |
| 258 | } | 253 | } |
| 259 | 254 | ||
| 260 | void __cpuinit numa_remove_cpu(int cpu) | 255 | void __cpuinit numa_remove_cpu(int cpu) |
| 261 | { | 256 | { |
| 262 | numa_set_cpumask(cpu, 0); | 257 | numa_set_cpumask(cpu, false); |
| 263 | } | 258 | } |
| 264 | # endif /* !CONFIG_NUMA_EMU */ | 259 | # endif /* !CONFIG_NUMA_EMU */ |
| 265 | 260 | ||
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index ad091e4cff17..de84cc140379 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c | |||
| @@ -454,10 +454,9 @@ void __cpuinit numa_remove_cpu(int cpu) | |||
| 454 | cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); | 454 | cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); |
| 455 | } | 455 | } |
| 456 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ | 456 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
| 457 | static void __cpuinit numa_set_cpumask(int cpu, int enable) | 457 | static void __cpuinit numa_set_cpumask(int cpu, bool enable) |
| 458 | { | 458 | { |
| 459 | struct cpumask *mask; | 459 | int nid, physnid; |
| 460 | int nid, physnid, i; | ||
| 461 | 460 | ||
| 462 | nid = early_cpu_to_node(cpu); | 461 | nid = early_cpu_to_node(cpu); |
| 463 | if (nid == NUMA_NO_NODE) { | 462 | if (nid == NUMA_NO_NODE) { |
| @@ -467,28 +466,21 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable) | |||
| 467 | 466 | ||
| 468 | physnid = emu_nid_to_phys[nid]; | 467 | physnid = emu_nid_to_phys[nid]; |
| 469 | 468 | ||
| 470 | for_each_online_node(i) { | 469 | for_each_online_node(nid) { |
| 471 | if (emu_nid_to_phys[nid] != physnid) | 470 | if (emu_nid_to_phys[nid] != physnid) |
| 472 | continue; | 471 | continue; |
| 473 | 472 | ||
| 474 | mask = debug_cpumask_set_cpu(cpu, enable); | 473 | debug_cpumask_set_cpu(cpu, nid, enable); |
| 475 | if (!mask) | ||
| 476 | return; | ||
| 477 | |||
| 478 | if (enable) | ||
| 479 | cpumask_set_cpu(cpu, mask); | ||
| 480 | else | ||
| 481 | cpumask_clear_cpu(cpu, mask); | ||
| 482 | } | 474 | } |
| 483 | } | 475 | } |
| 484 | 476 | ||
| 485 | void __cpuinit numa_add_cpu(int cpu) | 477 | void __cpuinit numa_add_cpu(int cpu) |
| 486 | { | 478 | { |
| 487 | numa_set_cpumask(cpu, 1); | 479 | numa_set_cpumask(cpu, true); |
| 488 | } | 480 | } |
| 489 | 481 | ||
| 490 | void __cpuinit numa_remove_cpu(int cpu) | 482 | void __cpuinit numa_remove_cpu(int cpu) |
| 491 | { | 483 | { |
| 492 | numa_set_cpumask(cpu, 0); | 484 | numa_set_cpumask(cpu, false); |
| 493 | } | 485 | } |
| 494 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ | 486 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index a991b57f91fe..aef7af92b28b 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
| @@ -1473,16 +1473,20 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
| 1473 | #endif | 1473 | #endif |
| 1474 | } | 1474 | } |
| 1475 | 1475 | ||
| 1476 | #ifdef CONFIG_X86_32 | ||
| 1476 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | 1477 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) |
| 1477 | { | 1478 | { |
| 1478 | unsigned long pfn = pte_pfn(pte); | ||
| 1479 | |||
| 1480 | #ifdef CONFIG_X86_32 | ||
| 1481 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | 1479 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ |
| 1482 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) | 1480 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) |
| 1483 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | 1481 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & |
| 1484 | pte_val_ma(pte)); | 1482 | pte_val_ma(pte)); |
| 1485 | #endif | 1483 | |
| 1484 | return pte; | ||
| 1485 | } | ||
| 1486 | #else /* CONFIG_X86_64 */ | ||
| 1487 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | ||
| 1488 | { | ||
| 1489 | unsigned long pfn = pte_pfn(pte); | ||
| 1486 | 1490 | ||
| 1487 | /* | 1491 | /* |
| 1488 | * If the new pfn is within the range of the newly allocated | 1492 | * If the new pfn is within the range of the newly allocated |
| @@ -1497,6 +1501,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | |||
| 1497 | 1501 | ||
| 1498 | return pte; | 1502 | return pte; |
| 1499 | } | 1503 | } |
| 1504 | #endif /* CONFIG_X86_64 */ | ||
| 1500 | 1505 | ||
| 1501 | /* Init-time set_pte while constructing initial pagetables, which | 1506 | /* Init-time set_pte while constructing initial pagetables, which |
| 1502 | doesn't allow RO pagetable pages to be remapped RW */ | 1507 | doesn't allow RO pagetable pages to be remapped RW */ |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index fa0269a99377..90bac0aac3a5 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
| @@ -227,7 +227,7 @@ char * __init xen_memory_setup(void) | |||
| 227 | 227 | ||
| 228 | memcpy(map_raw, map, sizeof(map)); | 228 | memcpy(map_raw, map, sizeof(map)); |
| 229 | e820.nr_map = 0; | 229 | e820.nr_map = 0; |
| 230 | xen_extra_mem_start = mem_end; | 230 | xen_extra_mem_start = max((1ULL << 32), mem_end); |
| 231 | for (i = 0; i < memmap.nr_entries; i++) { | 231 | for (i = 0; i < memmap.nr_entries; i++) { |
| 232 | unsigned long long end; | 232 | unsigned long long end; |
| 233 | 233 | ||
diff --git a/block/blk-core.c b/block/blk-core.c index 5fa3dd2705c6..a2e58eeb3549 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
| @@ -292,7 +292,6 @@ EXPORT_SYMBOL(blk_sync_queue); | |||
| 292 | /** | 292 | /** |
| 293 | * __blk_run_queue - run a single device queue | 293 | * __blk_run_queue - run a single device queue |
| 294 | * @q: The queue to run | 294 | * @q: The queue to run |
| 295 | * @force_kblockd: Don't run @q->request_fn directly. Use kblockd. | ||
| 296 | * | 295 | * |
| 297 | * Description: | 296 | * Description: |
| 298 | * See @blk_run_queue. This variant must be called with the queue lock | 297 | * See @blk_run_queue. This variant must be called with the queue lock |
| @@ -303,15 +302,7 @@ void __blk_run_queue(struct request_queue *q) | |||
| 303 | if (unlikely(blk_queue_stopped(q))) | 302 | if (unlikely(blk_queue_stopped(q))) |
| 304 | return; | 303 | return; |
| 305 | 304 | ||
| 306 | /* | 305 | q->request_fn(q); |
| 307 | * Only recurse once to avoid overrunning the stack, let the unplug | ||
| 308 | * handling reinvoke the handler shortly if we already got there. | ||
| 309 | */ | ||
| 310 | if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { | ||
| 311 | q->request_fn(q); | ||
| 312 | queue_flag_clear(QUEUE_FLAG_REENTER, q); | ||
| 313 | } else | ||
| 314 | queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); | ||
| 315 | } | 306 | } |
| 316 | EXPORT_SYMBOL(__blk_run_queue); | 307 | EXPORT_SYMBOL(__blk_run_queue); |
| 317 | 308 | ||
| @@ -328,6 +319,7 @@ void blk_run_queue_async(struct request_queue *q) | |||
| 328 | if (likely(!blk_queue_stopped(q))) | 319 | if (likely(!blk_queue_stopped(q))) |
| 329 | queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); | 320 | queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); |
| 330 | } | 321 | } |
| 322 | EXPORT_SYMBOL(blk_run_queue_async); | ||
| 331 | 323 | ||
| 332 | /** | 324 | /** |
| 333 | * blk_run_queue - run a single device queue | 325 | * blk_run_queue - run a single device queue |
| @@ -2787,7 +2779,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
| 2787 | 2779 | ||
| 2788 | local_irq_restore(flags); | 2780 | local_irq_restore(flags); |
| 2789 | } | 2781 | } |
| 2790 | EXPORT_SYMBOL(blk_flush_plug_list); | ||
| 2791 | 2782 | ||
| 2792 | void blk_finish_plug(struct blk_plug *plug) | 2783 | void blk_finish_plug(struct blk_plug *plug) |
| 2793 | { | 2784 | { |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 6d735122bc59..bd236313f35d 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
| @@ -66,14 +66,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) | |||
| 66 | 66 | ||
| 67 | if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { | 67 | if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { |
| 68 | blk_set_queue_full(q, BLK_RW_SYNC); | 68 | blk_set_queue_full(q, BLK_RW_SYNC); |
| 69 | } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) { | 69 | } else { |
| 70 | blk_clear_queue_full(q, BLK_RW_SYNC); | 70 | blk_clear_queue_full(q, BLK_RW_SYNC); |
| 71 | wake_up(&rl->wait[BLK_RW_SYNC]); | 71 | wake_up(&rl->wait[BLK_RW_SYNC]); |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { | 74 | if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { |
| 75 | blk_set_queue_full(q, BLK_RW_ASYNC); | 75 | blk_set_queue_full(q, BLK_RW_ASYNC); |
| 76 | } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) { | 76 | } else { |
| 77 | blk_clear_queue_full(q, BLK_RW_ASYNC); | 77 | blk_clear_queue_full(q, BLK_RW_ASYNC); |
| 78 | wake_up(&rl->wait[BLK_RW_ASYNC]); | 78 | wake_up(&rl->wait[BLK_RW_ASYNC]); |
| 79 | } | 79 | } |
| @@ -508,8 +508,10 @@ int blk_register_queue(struct gendisk *disk) | |||
| 508 | return ret; | 508 | return ret; |
| 509 | 509 | ||
| 510 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); | 510 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
| 511 | if (ret < 0) | 511 | if (ret < 0) { |
| 512 | blk_trace_remove_sysfs(dev); | ||
| 512 | return ret; | 513 | return ret; |
| 514 | } | ||
| 513 | 515 | ||
| 514 | kobject_uevent(&q->kobj, KOBJ_ADD); | 516 | kobject_uevent(&q->kobj, KOBJ_ADD); |
| 515 | 517 | ||
diff --git a/block/blk.h b/block/blk.h index c9df8fc3c999..61263463e38e 100644 --- a/block/blk.h +++ b/block/blk.h | |||
| @@ -22,7 +22,6 @@ void blk_rq_timed_out_timer(unsigned long data); | |||
| 22 | void blk_delete_timer(struct request *); | 22 | void blk_delete_timer(struct request *); |
| 23 | void blk_add_timer(struct request *); | 23 | void blk_add_timer(struct request *); |
| 24 | void __generic_unplug_device(struct request_queue *); | 24 | void __generic_unplug_device(struct request_queue *); |
| 25 | void blk_run_queue_async(struct request_queue *q); | ||
| 26 | 25 | ||
| 27 | /* | 26 | /* |
| 28 | * Internal atomic flags for request handling | 27 | * Internal atomic flags for request handling |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 46b0a1d1d925..5b52011e3a40 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
| @@ -2582,28 +2582,20 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
| 2582 | } | 2582 | } |
| 2583 | 2583 | ||
| 2584 | /* | 2584 | /* |
| 2585 | * Must always be called with the rcu_read_lock() held | 2585 | * Call func for each cic attached to this ioc. |
| 2586 | */ | 2586 | */ |
| 2587 | static void | 2587 | static void |
| 2588 | __call_for_each_cic(struct io_context *ioc, | 2588 | call_for_each_cic(struct io_context *ioc, |
| 2589 | void (*func)(struct io_context *, struct cfq_io_context *)) | 2589 | void (*func)(struct io_context *, struct cfq_io_context *)) |
| 2590 | { | 2590 | { |
| 2591 | struct cfq_io_context *cic; | 2591 | struct cfq_io_context *cic; |
| 2592 | struct hlist_node *n; | 2592 | struct hlist_node *n; |
| 2593 | 2593 | ||
| 2594 | rcu_read_lock(); | ||
| 2595 | |||
| 2594 | hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) | 2596 | hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) |
| 2595 | func(ioc, cic); | 2597 | func(ioc, cic); |
| 2596 | } | ||
| 2597 | 2598 | ||
| 2598 | /* | ||
| 2599 | * Call func for each cic attached to this ioc. | ||
| 2600 | */ | ||
| 2601 | static void | ||
| 2602 | call_for_each_cic(struct io_context *ioc, | ||
| 2603 | void (*func)(struct io_context *, struct cfq_io_context *)) | ||
| 2604 | { | ||
| 2605 | rcu_read_lock(); | ||
| 2606 | __call_for_each_cic(ioc, func); | ||
| 2607 | rcu_read_unlock(); | 2599 | rcu_read_unlock(); |
| 2608 | } | 2600 | } |
| 2609 | 2601 | ||
| @@ -2664,7 +2656,7 @@ static void cfq_free_io_context(struct io_context *ioc) | |||
| 2664 | * should be ok to iterate over the known list, we will see all cic's | 2656 | * should be ok to iterate over the known list, we will see all cic's |
| 2665 | * since no new ones are added. | 2657 | * since no new ones are added. |
| 2666 | */ | 2658 | */ |
| 2667 | __call_for_each_cic(ioc, cic_free_func); | 2659 | call_for_each_cic(ioc, cic_free_func); |
| 2668 | } | 2660 | } |
| 2669 | 2661 | ||
| 2670 | static void cfq_put_cooperator(struct cfq_queue *cfqq) | 2662 | static void cfq_put_cooperator(struct cfq_queue *cfqq) |
diff --git a/block/elevator.c b/block/elevator.c index 6f6abc08bb56..45ca1e34f582 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
| @@ -671,7 +671,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where) | |||
| 671 | q->boundary_rq = rq; | 671 | q->boundary_rq = rq; |
| 672 | } | 672 | } |
| 673 | } else if (!(rq->cmd_flags & REQ_ELVPRIV) && | 673 | } else if (!(rq->cmd_flags & REQ_ELVPRIV) && |
| 674 | where == ELEVATOR_INSERT_SORT) | 674 | (where == ELEVATOR_INSERT_SORT || |
| 675 | where == ELEVATOR_INSERT_SORT_MERGE)) | ||
| 675 | where = ELEVATOR_INSERT_BACK; | 676 | where = ELEVATOR_INSERT_BACK; |
| 676 | 677 | ||
| 677 | switch (where) { | 678 | switch (where) { |
diff --git a/block/genhd.c b/block/genhd.c index b364bd038a18..2dd988723d73 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
| @@ -1588,9 +1588,13 @@ static void disk_events_workfn(struct work_struct *work) | |||
| 1588 | 1588 | ||
| 1589 | spin_unlock_irq(&ev->lock); | 1589 | spin_unlock_irq(&ev->lock); |
| 1590 | 1590 | ||
| 1591 | /* tell userland about new events */ | 1591 | /* |
| 1592 | * Tell userland about new events. Only the events listed in | ||
| 1593 | * @disk->events are reported. Unlisted events are processed the | ||
| 1594 | * same internally but never get reported to userland. | ||
| 1595 | */ | ||
| 1592 | for (i = 0; i < ARRAY_SIZE(disk_uevents); i++) | 1596 | for (i = 0; i < ARRAY_SIZE(disk_uevents); i++) |
| 1593 | if (events & (1 << i)) | 1597 | if (events & disk->events & (1 << i)) |
| 1594 | envp[nr_events++] = disk_uevents[i]; | 1598 | envp[nr_events++] = disk_uevents[i]; |
| 1595 | 1599 | ||
| 1596 | if (nr_events) | 1600 | if (nr_events) |
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 012cba0d6d96..b072648dc3f6 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c | |||
| @@ -115,6 +115,9 @@ static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) | |||
| 115 | struct agp_memory *new; | 115 | struct agp_memory *new; |
| 116 | unsigned long alloc_size = num_agp_pages*sizeof(struct page *); | 116 | unsigned long alloc_size = num_agp_pages*sizeof(struct page *); |
| 117 | 117 | ||
| 118 | if (INT_MAX/sizeof(struct page *) < num_agp_pages) | ||
| 119 | return NULL; | ||
| 120 | |||
| 118 | new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); | 121 | new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); |
| 119 | if (new == NULL) | 122 | if (new == NULL) |
| 120 | return NULL; | 123 | return NULL; |
| @@ -234,11 +237,14 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, | |||
| 234 | int scratch_pages; | 237 | int scratch_pages; |
| 235 | struct agp_memory *new; | 238 | struct agp_memory *new; |
| 236 | size_t i; | 239 | size_t i; |
| 240 | int cur_memory; | ||
| 237 | 241 | ||
| 238 | if (!bridge) | 242 | if (!bridge) |
| 239 | return NULL; | 243 | return NULL; |
| 240 | 244 | ||
| 241 | if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp) | 245 | cur_memory = atomic_read(&bridge->current_memory_agp); |
| 246 | if ((cur_memory + page_count > bridge->max_memory_agp) || | ||
| 247 | (cur_memory + page_count < page_count)) | ||
| 242 | return NULL; | 248 | return NULL; |
| 243 | 249 | ||
| 244 | if (type >= AGP_USER_TYPES) { | 250 | if (type >= AGP_USER_TYPES) { |
| @@ -1089,8 +1095,8 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) | |||
| 1089 | return -EINVAL; | 1095 | return -EINVAL; |
| 1090 | } | 1096 | } |
| 1091 | 1097 | ||
| 1092 | /* AK: could wrap */ | 1098 | if (((pg_start + mem->page_count) > num_entries) || |
| 1093 | if ((pg_start + mem->page_count) > num_entries) | 1099 | ((pg_start + mem->page_count) < pg_start)) |
| 1094 | return -EINVAL; | 1100 | return -EINVAL; |
| 1095 | 1101 | ||
| 1096 | j = pg_start; | 1102 | j = pg_start; |
| @@ -1124,7 +1130,7 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) | |||
| 1124 | { | 1130 | { |
| 1125 | size_t i; | 1131 | size_t i; |
| 1126 | struct agp_bridge_data *bridge; | 1132 | struct agp_bridge_data *bridge; |
| 1127 | int mask_type; | 1133 | int mask_type, num_entries; |
| 1128 | 1134 | ||
| 1129 | bridge = mem->bridge; | 1135 | bridge = mem->bridge; |
| 1130 | if (!bridge) | 1136 | if (!bridge) |
| @@ -1136,6 +1142,11 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) | |||
| 1136 | if (type != mem->type) | 1142 | if (type != mem->type) |
| 1137 | return -EINVAL; | 1143 | return -EINVAL; |
| 1138 | 1144 | ||
| 1145 | num_entries = agp_num_entries(); | ||
| 1146 | if (((pg_start + mem->page_count) > num_entries) || | ||
| 1147 | ((pg_start + mem->page_count) < pg_start)) | ||
| 1148 | return -EINVAL; | ||
| 1149 | |||
| 1139 | mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); | 1150 | mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); |
| 1140 | if (mask_type != 0) { | 1151 | if (mask_type != 0) { |
| 1141 | /* The generic routines know nothing of memory types */ | 1152 | /* The generic routines know nothing of memory types */ |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 84b164d1eb2b..838568a7dbf5 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
| @@ -1280,18 +1280,7 @@ static void unplug_port(struct port *port) | |||
| 1280 | spin_lock_irq(&pdrvdata_lock); | 1280 | spin_lock_irq(&pdrvdata_lock); |
| 1281 | list_del(&port->cons.list); | 1281 | list_del(&port->cons.list); |
| 1282 | spin_unlock_irq(&pdrvdata_lock); | 1282 | spin_unlock_irq(&pdrvdata_lock); |
| 1283 | #if 0 | ||
| 1284 | /* | ||
| 1285 | * hvc_remove() not called as removing one hvc port | ||
| 1286 | * results in other hvc ports getting frozen. | ||
| 1287 | * | ||
| 1288 | * Once this is resolved in hvc, this functionality | ||
| 1289 | * will be enabled. Till that is done, the -EPIPE | ||
| 1290 | * return from get_chars() above will help | ||
| 1291 | * hvc_console.c to clean up on ports we remove here. | ||
| 1292 | */ | ||
| 1293 | hvc_remove(port->cons.hvc); | 1283 | hvc_remove(port->cons.hvc); |
| 1294 | #endif | ||
| 1295 | } | 1284 | } |
| 1296 | 1285 | ||
| 1297 | /* Remove unused data this port might have received. */ | 1286 | /* Remove unused data this port might have received. */ |
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index d77005849af8..219d88a0eeae 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c | |||
| @@ -142,6 +142,7 @@ static int cn_call_callback(struct sk_buff *skb) | |||
| 142 | cbq->callback(msg, nsp); | 142 | cbq->callback(msg, nsp); |
| 143 | kfree_skb(skb); | 143 | kfree_skb(skb); |
| 144 | cn_queue_release_callback(cbq); | 144 | cn_queue_release_callback(cbq); |
| 145 | err = 0; | ||
| 145 | } | 146 | } |
| 146 | 147 | ||
| 147 | return err; | 148 | return err; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 432fc04c6bff..e522c702b04e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -3771,8 +3771,11 @@ static bool g4x_compute_wm0(struct drm_device *dev, | |||
| 3771 | int entries, tlb_miss; | 3771 | int entries, tlb_miss; |
| 3772 | 3772 | ||
| 3773 | crtc = intel_get_crtc_for_plane(dev, plane); | 3773 | crtc = intel_get_crtc_for_plane(dev, plane); |
| 3774 | if (crtc->fb == NULL || !crtc->enabled) | 3774 | if (crtc->fb == NULL || !crtc->enabled) { |
| 3775 | *cursor_wm = cursor->guard_size; | ||
| 3776 | *plane_wm = display->guard_size; | ||
| 3775 | return false; | 3777 | return false; |
| 3778 | } | ||
| 3776 | 3779 | ||
| 3777 | htotal = crtc->mode.htotal; | 3780 | htotal = crtc->mode.htotal; |
| 3778 | hdisplay = crtc->mode.hdisplay; | 3781 | hdisplay = crtc->mode.hdisplay; |
| @@ -6215,36 +6218,6 @@ cleanup_work: | |||
| 6215 | return ret; | 6218 | return ret; |
| 6216 | } | 6219 | } |
| 6217 | 6220 | ||
| 6218 | static void intel_crtc_reset(struct drm_crtc *crtc) | ||
| 6219 | { | ||
| 6220 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
| 6221 | |||
| 6222 | /* Reset flags back to the 'unknown' status so that they | ||
| 6223 | * will be correctly set on the initial modeset. | ||
| 6224 | */ | ||
| 6225 | intel_crtc->dpms_mode = -1; | ||
| 6226 | } | ||
| 6227 | |||
| 6228 | static struct drm_crtc_helper_funcs intel_helper_funcs = { | ||
| 6229 | .dpms = intel_crtc_dpms, | ||
| 6230 | .mode_fixup = intel_crtc_mode_fixup, | ||
| 6231 | .mode_set = intel_crtc_mode_set, | ||
| 6232 | .mode_set_base = intel_pipe_set_base, | ||
| 6233 | .mode_set_base_atomic = intel_pipe_set_base_atomic, | ||
| 6234 | .load_lut = intel_crtc_load_lut, | ||
| 6235 | .disable = intel_crtc_disable, | ||
| 6236 | }; | ||
| 6237 | |||
| 6238 | static const struct drm_crtc_funcs intel_crtc_funcs = { | ||
| 6239 | .reset = intel_crtc_reset, | ||
| 6240 | .cursor_set = intel_crtc_cursor_set, | ||
| 6241 | .cursor_move = intel_crtc_cursor_move, | ||
| 6242 | .gamma_set = intel_crtc_gamma_set, | ||
| 6243 | .set_config = drm_crtc_helper_set_config, | ||
| 6244 | .destroy = intel_crtc_destroy, | ||
| 6245 | .page_flip = intel_crtc_page_flip, | ||
| 6246 | }; | ||
| 6247 | |||
| 6248 | static void intel_sanitize_modesetting(struct drm_device *dev, | 6221 | static void intel_sanitize_modesetting(struct drm_device *dev, |
| 6249 | int pipe, int plane) | 6222 | int pipe, int plane) |
| 6250 | { | 6223 | { |
| @@ -6281,6 +6254,42 @@ static void intel_sanitize_modesetting(struct drm_device *dev, | |||
| 6281 | intel_disable_pipe(dev_priv, pipe); | 6254 | intel_disable_pipe(dev_priv, pipe); |
| 6282 | } | 6255 | } |
| 6283 | 6256 | ||
| 6257 | static void intel_crtc_reset(struct drm_crtc *crtc) | ||
| 6258 | { | ||
| 6259 | struct drm_device *dev = crtc->dev; | ||
| 6260 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
| 6261 | |||
| 6262 | /* Reset flags back to the 'unknown' status so that they | ||
| 6263 | * will be correctly set on the initial modeset. | ||
| 6264 | */ | ||
| 6265 | intel_crtc->dpms_mode = -1; | ||
| 6266 | |||
| 6267 | /* We need to fix up any BIOS configuration that conflicts with | ||
| 6268 | * our expectations. | ||
| 6269 | */ | ||
| 6270 | intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); | ||
| 6271 | } | ||
| 6272 | |||
| 6273 | static struct drm_crtc_helper_funcs intel_helper_funcs = { | ||
| 6274 | .dpms = intel_crtc_dpms, | ||
| 6275 | .mode_fixup = intel_crtc_mode_fixup, | ||
| 6276 | .mode_set = intel_crtc_mode_set, | ||
| 6277 | .mode_set_base = intel_pipe_set_base, | ||
| 6278 | .mode_set_base_atomic = intel_pipe_set_base_atomic, | ||
| 6279 | .load_lut = intel_crtc_load_lut, | ||
| 6280 | .disable = intel_crtc_disable, | ||
| 6281 | }; | ||
| 6282 | |||
| 6283 | static const struct drm_crtc_funcs intel_crtc_funcs = { | ||
| 6284 | .reset = intel_crtc_reset, | ||
| 6285 | .cursor_set = intel_crtc_cursor_set, | ||
| 6286 | .cursor_move = intel_crtc_cursor_move, | ||
| 6287 | .gamma_set = intel_crtc_gamma_set, | ||
| 6288 | .set_config = drm_crtc_helper_set_config, | ||
| 6289 | .destroy = intel_crtc_destroy, | ||
| 6290 | .page_flip = intel_crtc_page_flip, | ||
| 6291 | }; | ||
| 6292 | |||
| 6284 | static void intel_crtc_init(struct drm_device *dev, int pipe) | 6293 | static void intel_crtc_init(struct drm_device *dev, int pipe) |
| 6285 | { | 6294 | { |
| 6286 | drm_i915_private_t *dev_priv = dev->dev_private; | 6295 | drm_i915_private_t *dev_priv = dev->dev_private; |
| @@ -6330,8 +6339,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
| 6330 | 6339 | ||
| 6331 | setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, | 6340 | setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, |
| 6332 | (unsigned long)intel_crtc); | 6341 | (unsigned long)intel_crtc); |
| 6333 | |||
| 6334 | intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); | ||
| 6335 | } | 6342 | } |
| 6336 | 6343 | ||
| 6337 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 6344 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 4256b8ef3947..6b22c1dcc015 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
| @@ -1151,10 +1151,10 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
| 1151 | (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); | 1151 | (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); |
| 1152 | { | 1152 | { |
| 1153 | int pipeconf_reg = PIPECONF(pipe); | 1153 | int pipeconf_reg = PIPECONF(pipe); |
| 1154 | int dspcntr_reg = DSPCNTR(pipe); | 1154 | int dspcntr_reg = DSPCNTR(intel_crtc->plane); |
| 1155 | int pipeconf = I915_READ(pipeconf_reg); | 1155 | int pipeconf = I915_READ(pipeconf_reg); |
| 1156 | int dspcntr = I915_READ(dspcntr_reg); | 1156 | int dspcntr = I915_READ(dspcntr_reg); |
| 1157 | int dspbase_reg = DSPADDR(pipe); | 1157 | int dspbase_reg = DSPADDR(intel_crtc->plane); |
| 1158 | int xpos = 0x0, ypos = 0x0; | 1158 | int xpos = 0x0, ypos = 0x0; |
| 1159 | unsigned int xsize, ysize; | 1159 | unsigned int xsize, ysize; |
| 1160 | /* Pipe must be off here */ | 1160 | /* Pipe must be off here */ |
| @@ -1378,7 +1378,9 @@ intel_tv_detect(struct drm_connector *connector, bool force) | |||
| 1378 | if (type < 0) | 1378 | if (type < 0) |
| 1379 | return connector_status_disconnected; | 1379 | return connector_status_disconnected; |
| 1380 | 1380 | ||
| 1381 | intel_tv->type = type; | ||
| 1381 | intel_tv_find_better_format(connector); | 1382 | intel_tv_find_better_format(connector); |
| 1383 | |||
| 1382 | return connector_status_connected; | 1384 | return connector_status_connected; |
| 1383 | } | 1385 | } |
| 1384 | 1386 | ||
| @@ -1670,8 +1672,7 @@ intel_tv_init(struct drm_device *dev) | |||
| 1670 | * | 1672 | * |
| 1671 | * More recent chipsets favour HDMI rather than integrated S-Video. | 1673 | * More recent chipsets favour HDMI rather than integrated S-Video. |
| 1672 | */ | 1674 | */ |
| 1673 | connector->polled = | 1675 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
| 1674 | DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; | ||
| 1675 | 1676 | ||
| 1676 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, | 1677 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, |
| 1677 | DRM_MODE_CONNECTOR_SVIDEO); | 1678 | DRM_MODE_CONNECTOR_SVIDEO); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index ce38e97b9428..568caedd7216 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c | |||
| @@ -83,7 +83,7 @@ nouveau_dma_init(struct nouveau_channel *chan) | |||
| 83 | return ret; | 83 | return ret; |
| 84 | 84 | ||
| 85 | /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ | 85 | /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ |
| 86 | ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000, | 86 | ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, |
| 87 | &chan->m2mf_ntfy); | 87 | &chan->m2mf_ntfy); |
| 88 | if (ret) | 88 | if (ret) |
| 89 | return ret; | 89 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 856d56a98d1e..a76514a209b3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
| @@ -682,6 +682,9 @@ struct drm_nouveau_private { | |||
| 682 | /* For PFIFO and PGRAPH. */ | 682 | /* For PFIFO and PGRAPH. */ |
| 683 | spinlock_t context_switch_lock; | 683 | spinlock_t context_switch_lock; |
| 684 | 684 | ||
| 685 | /* VM/PRAMIN flush, legacy PRAMIN aperture */ | ||
| 686 | spinlock_t vm_lock; | ||
| 687 | |||
| 685 | /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ | 688 | /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ |
| 686 | struct nouveau_ramht *ramht; | 689 | struct nouveau_ramht *ramht; |
| 687 | struct nouveau_gpuobj *ramfc; | 690 | struct nouveau_gpuobj *ramfc; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 889c4454682e..39aee6d4daf8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
| @@ -181,13 +181,13 @@ nouveau_fbcon_sync(struct fb_info *info) | |||
| 181 | OUT_RING (chan, 0); | 181 | OUT_RING (chan, 0); |
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); | 184 | nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff); |
| 185 | FIRE_RING(chan); | 185 | FIRE_RING(chan); |
| 186 | mutex_unlock(&chan->mutex); | 186 | mutex_unlock(&chan->mutex); |
| 187 | 187 | ||
| 188 | ret = -EBUSY; | 188 | ret = -EBUSY; |
| 189 | for (i = 0; i < 100000; i++) { | 189 | for (i = 0; i < 100000; i++) { |
| 190 | if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) { | 190 | if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) { |
| 191 | ret = 0; | 191 | ret = 0; |
| 192 | break; | 192 | break; |
| 193 | } | 193 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 78f467fe30be..5045f8b921d6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
| @@ -398,7 +398,7 @@ nouveau_mem_vram_init(struct drm_device *dev) | |||
| 398 | dma_bits = 40; | 398 | dma_bits = 40; |
| 399 | } else | 399 | } else |
| 400 | if (drm_pci_device_is_pcie(dev) && | 400 | if (drm_pci_device_is_pcie(dev) && |
| 401 | dev_priv->chipset != 0x40 && | 401 | dev_priv->chipset > 0x40 && |
| 402 | dev_priv->chipset != 0x45) { | 402 | dev_priv->chipset != 0x45) { |
| 403 | if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) | 403 | if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) |
| 404 | dma_bits = 39; | 404 | dma_bits = 39; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 7ba3fc0b30c1..5b39718ae1f8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
| @@ -35,19 +35,22 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) | |||
| 35 | { | 35 | { |
| 36 | struct drm_device *dev = chan->dev; | 36 | struct drm_device *dev = chan->dev; |
| 37 | struct nouveau_bo *ntfy = NULL; | 37 | struct nouveau_bo *ntfy = NULL; |
| 38 | uint32_t flags; | 38 | uint32_t flags, ttmpl; |
| 39 | int ret; | 39 | int ret; |
| 40 | 40 | ||
| 41 | if (nouveau_vram_notify) | 41 | if (nouveau_vram_notify) { |
| 42 | flags = NOUVEAU_GEM_DOMAIN_VRAM; | 42 | flags = NOUVEAU_GEM_DOMAIN_VRAM; |
| 43 | else | 43 | ttmpl = TTM_PL_FLAG_VRAM; |
| 44 | } else { | ||
| 44 | flags = NOUVEAU_GEM_DOMAIN_GART; | 45 | flags = NOUVEAU_GEM_DOMAIN_GART; |
| 46 | ttmpl = TTM_PL_FLAG_TT; | ||
| 47 | } | ||
| 45 | 48 | ||
| 46 | ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy); | 49 | ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy); |
| 47 | if (ret) | 50 | if (ret) |
| 48 | return ret; | 51 | return ret; |
| 49 | 52 | ||
| 50 | ret = nouveau_bo_pin(ntfy, flags); | 53 | ret = nouveau_bo_pin(ntfy, ttmpl); |
| 51 | if (ret) | 54 | if (ret) |
| 52 | goto out_err; | 55 | goto out_err; |
| 53 | 56 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 4f00c87ed86e..67a16e01ffa6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
| @@ -1039,19 +1039,20 @@ nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset) | |||
| 1039 | { | 1039 | { |
| 1040 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; | 1040 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; |
| 1041 | struct drm_device *dev = gpuobj->dev; | 1041 | struct drm_device *dev = gpuobj->dev; |
| 1042 | unsigned long flags; | ||
| 1042 | 1043 | ||
| 1043 | if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { | 1044 | if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { |
| 1044 | u64 ptr = gpuobj->vinst + offset; | 1045 | u64 ptr = gpuobj->vinst + offset; |
| 1045 | u32 base = ptr >> 16; | 1046 | u32 base = ptr >> 16; |
| 1046 | u32 val; | 1047 | u32 val; |
| 1047 | 1048 | ||
| 1048 | spin_lock(&dev_priv->ramin_lock); | 1049 | spin_lock_irqsave(&dev_priv->vm_lock, flags); |
| 1049 | if (dev_priv->ramin_base != base) { | 1050 | if (dev_priv->ramin_base != base) { |
| 1050 | dev_priv->ramin_base = base; | 1051 | dev_priv->ramin_base = base; |
| 1051 | nv_wr32(dev, 0x001700, dev_priv->ramin_base); | 1052 | nv_wr32(dev, 0x001700, dev_priv->ramin_base); |
| 1052 | } | 1053 | } |
| 1053 | val = nv_rd32(dev, 0x700000 + (ptr & 0xffff)); | 1054 | val = nv_rd32(dev, 0x700000 + (ptr & 0xffff)); |
| 1054 | spin_unlock(&dev_priv->ramin_lock); | 1055 | spin_unlock_irqrestore(&dev_priv->vm_lock, flags); |
| 1055 | return val; | 1056 | return val; |
| 1056 | } | 1057 | } |
| 1057 | 1058 | ||
| @@ -1063,18 +1064,19 @@ nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val) | |||
| 1063 | { | 1064 | { |
| 1064 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; | 1065 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; |
| 1065 | struct drm_device *dev = gpuobj->dev; | 1066 | struct drm_device *dev = gpuobj->dev; |
| 1067 | unsigned long flags; | ||
| 1066 | 1068 | ||
| 1067 | if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { | 1069 | if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { |
| 1068 | u64 ptr = gpuobj->vinst + offset; | 1070 | u64 ptr = gpuobj->vinst + offset; |
| 1069 | u32 base = ptr >> 16; | 1071 | u32 base = ptr >> 16; |
| 1070 | 1072 | ||
| 1071 | spin_lock(&dev_priv->ramin_lock); | 1073 | spin_lock_irqsave(&dev_priv->vm_lock, flags); |
| 1072 | if (dev_priv->ramin_base != base) { | 1074 | if (dev_priv->ramin_base != base) { |
| 1073 | dev_priv->ramin_base = base; | 1075 | dev_priv->ramin_base = base; |
| 1074 | nv_wr32(dev, 0x001700, dev_priv->ramin_base); | 1076 | nv_wr32(dev, 0x001700, dev_priv->ramin_base); |
| 1075 | } | 1077 | } |
| 1076 | nv_wr32(dev, 0x700000 + (ptr & 0xffff), val); | 1078 | nv_wr32(dev, 0x700000 + (ptr & 0xffff), val); |
| 1077 | spin_unlock(&dev_priv->ramin_lock); | 1079 | spin_unlock_irqrestore(&dev_priv->vm_lock, flags); |
| 1078 | return; | 1080 | return; |
| 1079 | } | 1081 | } |
| 1080 | 1082 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index a33fe4019286..4bce801bc588 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c | |||
| @@ -55,6 +55,7 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, | |||
| 55 | be->func->clear(be); | 55 | be->func->clear(be); |
| 56 | return -EFAULT; | 56 | return -EFAULT; |
| 57 | } | 57 | } |
| 58 | nvbe->ttm_alloced[nvbe->nr_pages] = false; | ||
| 58 | } | 59 | } |
| 59 | 60 | ||
| 60 | nvbe->nr_pages++; | 61 | nvbe->nr_pages++; |
| @@ -427,7 +428,7 @@ nouveau_sgdma_init(struct drm_device *dev) | |||
| 427 | u32 aper_size, align; | 428 | u32 aper_size, align; |
| 428 | int ret; | 429 | int ret; |
| 429 | 430 | ||
| 430 | if (dev_priv->card_type >= NV_50 || drm_pci_device_is_pcie(dev)) | 431 | if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev)) |
| 431 | aper_size = 512 * 1024 * 1024; | 432 | aper_size = 512 * 1024 * 1024; |
| 432 | else | 433 | else |
| 433 | aper_size = 64 * 1024 * 1024; | 434 | aper_size = 64 * 1024 * 1024; |
| @@ -457,7 +458,7 @@ nouveau_sgdma_init(struct drm_device *dev) | |||
| 457 | dev_priv->gart_info.func = &nv50_sgdma_backend; | 458 | dev_priv->gart_info.func = &nv50_sgdma_backend; |
| 458 | } else | 459 | } else |
| 459 | if (drm_pci_device_is_pcie(dev) && | 460 | if (drm_pci_device_is_pcie(dev) && |
| 460 | dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) { | 461 | dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) { |
| 461 | if (nv44_graph_class(dev)) { | 462 | if (nv44_graph_class(dev)) { |
| 462 | dev_priv->gart_info.func = &nv44_sgdma_backend; | 463 | dev_priv->gart_info.func = &nv44_sgdma_backend; |
| 463 | align = 512 * 1024; | 464 | align = 512 * 1024; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 6e2b1a6caa2d..a30adec5beaa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
| @@ -608,6 +608,7 @@ nouveau_card_init(struct drm_device *dev) | |||
| 608 | spin_lock_init(&dev_priv->channels.lock); | 608 | spin_lock_init(&dev_priv->channels.lock); |
| 609 | spin_lock_init(&dev_priv->tile.lock); | 609 | spin_lock_init(&dev_priv->tile.lock); |
| 610 | spin_lock_init(&dev_priv->context_switch_lock); | 610 | spin_lock_init(&dev_priv->context_switch_lock); |
| 611 | spin_lock_init(&dev_priv->vm_lock); | ||
| 611 | 612 | ||
| 612 | /* Make the CRTCs and I2C buses accessible */ | 613 | /* Make the CRTCs and I2C buses accessible */ |
| 613 | ret = engine->display.early_init(dev); | 614 | ret = engine->display.early_init(dev); |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index a6f8aa651fc6..4f95a1e5822e 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
| @@ -404,23 +404,25 @@ void | |||
| 404 | nv50_instmem_flush(struct drm_device *dev) | 404 | nv50_instmem_flush(struct drm_device *dev) |
| 405 | { | 405 | { |
| 406 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 406 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 407 | unsigned long flags; | ||
| 407 | 408 | ||
| 408 | spin_lock(&dev_priv->ramin_lock); | 409 | spin_lock_irqsave(&dev_priv->vm_lock, flags); |
| 409 | nv_wr32(dev, 0x00330c, 0x00000001); | 410 | nv_wr32(dev, 0x00330c, 0x00000001); |
| 410 | if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) | 411 | if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) |
| 411 | NV_ERROR(dev, "PRAMIN flush timeout\n"); | 412 | NV_ERROR(dev, "PRAMIN flush timeout\n"); |
| 412 | spin_unlock(&dev_priv->ramin_lock); | 413 | spin_unlock_irqrestore(&dev_priv->vm_lock, flags); |
| 413 | } | 414 | } |
| 414 | 415 | ||
| 415 | void | 416 | void |
| 416 | nv84_instmem_flush(struct drm_device *dev) | 417 | nv84_instmem_flush(struct drm_device *dev) |
| 417 | { | 418 | { |
| 418 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 419 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 420 | unsigned long flags; | ||
| 419 | 421 | ||
| 420 | spin_lock(&dev_priv->ramin_lock); | 422 | spin_lock_irqsave(&dev_priv->vm_lock, flags); |
| 421 | nv_wr32(dev, 0x070000, 0x00000001); | 423 | nv_wr32(dev, 0x070000, 0x00000001); |
| 422 | if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) | 424 | if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) |
| 423 | NV_ERROR(dev, "PRAMIN flush timeout\n"); | 425 | NV_ERROR(dev, "PRAMIN flush timeout\n"); |
| 424 | spin_unlock(&dev_priv->ramin_lock); | 426 | spin_unlock_irqrestore(&dev_priv->vm_lock, flags); |
| 425 | } | 427 | } |
| 426 | 428 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c index 4fd3432b5b8d..6c2694490741 100644 --- a/drivers/gpu/drm/nouveau/nv50_vm.c +++ b/drivers/gpu/drm/nouveau/nv50_vm.c | |||
| @@ -174,10 +174,11 @@ void | |||
| 174 | nv50_vm_flush_engine(struct drm_device *dev, int engine) | 174 | nv50_vm_flush_engine(struct drm_device *dev, int engine) |
| 175 | { | 175 | { |
| 176 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 176 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 177 | unsigned long flags; | ||
| 177 | 178 | ||
| 178 | spin_lock(&dev_priv->ramin_lock); | 179 | spin_lock_irqsave(&dev_priv->vm_lock, flags); |
| 179 | nv_wr32(dev, 0x100c80, (engine << 16) | 1); | 180 | nv_wr32(dev, 0x100c80, (engine << 16) | 1); |
| 180 | if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) | 181 | if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) |
| 181 | NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); | 182 | NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); |
| 182 | spin_unlock(&dev_priv->ramin_lock); | 183 | spin_unlock_irqrestore(&dev_priv->vm_lock, flags); |
| 183 | } | 184 | } |
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c index a0a2a0277f73..a179e6c55afb 100644 --- a/drivers/gpu/drm/nouveau/nvc0_vm.c +++ b/drivers/gpu/drm/nouveau/nvc0_vm.c | |||
| @@ -104,11 +104,12 @@ nvc0_vm_flush(struct nouveau_vm *vm) | |||
| 104 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | 104 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; |
| 105 | struct drm_device *dev = vm->dev; | 105 | struct drm_device *dev = vm->dev; |
| 106 | struct nouveau_vm_pgd *vpgd; | 106 | struct nouveau_vm_pgd *vpgd; |
| 107 | unsigned long flags; | ||
| 107 | u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5; | 108 | u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5; |
| 108 | 109 | ||
| 109 | pinstmem->flush(vm->dev); | 110 | pinstmem->flush(vm->dev); |
| 110 | 111 | ||
| 111 | spin_lock(&dev_priv->ramin_lock); | 112 | spin_lock_irqsave(&dev_priv->vm_lock, flags); |
| 112 | list_for_each_entry(vpgd, &vm->pgd_list, head) { | 113 | list_for_each_entry(vpgd, &vm->pgd_list, head) { |
| 113 | /* looks like maybe a "free flush slots" counter, the | 114 | /* looks like maybe a "free flush slots" counter, the |
| 114 | * faster you write to 0x100cbc to more it decreases | 115 | * faster you write to 0x100cbc to more it decreases |
| @@ -125,5 +126,5 @@ nvc0_vm_flush(struct nouveau_vm *vm) | |||
| 125 | nv_rd32(dev, 0x100c80), engine); | 126 | nv_rd32(dev, 0x100c80), engine); |
| 126 | } | 127 | } |
| 127 | } | 128 | } |
| 128 | spin_unlock(&dev_priv->ramin_lock); | 129 | spin_unlock_irqrestore(&dev_priv->vm_lock, flags); |
| 129 | } | 130 | } |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index d71d375149f8..7bd745689097 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
| @@ -135,7 +135,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, | |||
| 135 | case ATOM_IIO_MOVE_INDEX: | 135 | case ATOM_IIO_MOVE_INDEX: |
| 136 | temp &= | 136 | temp &= |
| 137 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << | 137 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << |
| 138 | CU8(base + 2)); | 138 | CU8(base + 3)); |
| 139 | temp |= | 139 | temp |= |
| 140 | ((index >> CU8(base + 2)) & | 140 | ((index >> CU8(base + 2)) & |
| 141 | (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + | 141 | (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + |
| @@ -145,7 +145,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, | |||
| 145 | case ATOM_IIO_MOVE_DATA: | 145 | case ATOM_IIO_MOVE_DATA: |
| 146 | temp &= | 146 | temp &= |
| 147 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << | 147 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << |
| 148 | CU8(base + 2)); | 148 | CU8(base + 3)); |
| 149 | temp |= | 149 | temp |= |
| 150 | ((data >> CU8(base + 2)) & | 150 | ((data >> CU8(base + 2)) & |
| 151 | (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + | 151 | (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + |
| @@ -155,7 +155,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, | |||
| 155 | case ATOM_IIO_MOVE_ATTR: | 155 | case ATOM_IIO_MOVE_ATTR: |
| 156 | temp &= | 156 | temp &= |
| 157 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << | 157 | ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << |
| 158 | CU8(base + 2)); | 158 | CU8(base + 3)); |
| 159 | temp |= | 159 | temp |= |
| 160 | ((ctx-> | 160 | ((ctx-> |
| 161 | io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - | 161 | io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 9d516a8c4dfa..529a3a704731 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -532,10 +532,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 532 | else | 532 | else |
| 533 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | 533 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; |
| 534 | 534 | ||
| 535 | if ((rdev->family == CHIP_R600) || | 535 | if (rdev->family < CHIP_RV770) |
| 536 | (rdev->family == CHIP_RV610) || | ||
| 537 | (rdev->family == CHIP_RV630) || | ||
| 538 | (rdev->family == CHIP_RV670)) | ||
| 539 | pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; | 536 | pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; |
| 540 | } else { | 537 | } else { |
| 541 | pll->flags |= RADEON_PLL_LEGACY; | 538 | pll->flags |= RADEON_PLL_LEGACY; |
| @@ -565,7 +562,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 565 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 562 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
| 566 | if (ss_enabled) { | 563 | if (ss_enabled) { |
| 567 | if (ss->refdiv) { | 564 | if (ss->refdiv) { |
| 568 | pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; | ||
| 569 | pll->flags |= RADEON_PLL_USE_REF_DIV; | 565 | pll->flags |= RADEON_PLL_USE_REF_DIV; |
| 570 | pll->reference_div = ss->refdiv; | 566 | pll->reference_div = ss->refdiv; |
| 571 | if (ASIC_IS_AVIVO(rdev)) | 567 | if (ASIC_IS_AVIVO(rdev)) |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 3453910ee0f3..e9bc135d9189 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -353,7 +353,7 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev, | |||
| 353 | struct drm_display_mode *mode, | 353 | struct drm_display_mode *mode, |
| 354 | struct drm_display_mode *other_mode) | 354 | struct drm_display_mode *other_mode) |
| 355 | { | 355 | { |
| 356 | u32 tmp = 0; | 356 | u32 tmp; |
| 357 | /* | 357 | /* |
| 358 | * Line Buffer Setup | 358 | * Line Buffer Setup |
| 359 | * There are 3 line buffers, each one shared by 2 display controllers. | 359 | * There are 3 line buffers, each one shared by 2 display controllers. |
| @@ -363,64 +363,63 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev, | |||
| 363 | * first display controller | 363 | * first display controller |
| 364 | * 0 - first half of lb (3840 * 2) | 364 | * 0 - first half of lb (3840 * 2) |
| 365 | * 1 - first 3/4 of lb (5760 * 2) | 365 | * 1 - first 3/4 of lb (5760 * 2) |
| 366 | * 2 - whole lb (7680 * 2) | 366 | * 2 - whole lb (7680 * 2), other crtc must be disabled |
| 367 | * 3 - first 1/4 of lb (1920 * 2) | 367 | * 3 - first 1/4 of lb (1920 * 2) |
| 368 | * second display controller | 368 | * second display controller |
| 369 | * 4 - second half of lb (3840 * 2) | 369 | * 4 - second half of lb (3840 * 2) |
| 370 | * 5 - second 3/4 of lb (5760 * 2) | 370 | * 5 - second 3/4 of lb (5760 * 2) |
| 371 | * 6 - whole lb (7680 * 2) | 371 | * 6 - whole lb (7680 * 2), other crtc must be disabled |
| 372 | * 7 - last 1/4 of lb (1920 * 2) | 372 | * 7 - last 1/4 of lb (1920 * 2) |
| 373 | */ | 373 | */ |
| 374 | if (mode && other_mode) { | 374 | /* this can get tricky if we have two large displays on a paired group |
| 375 | if (mode->hdisplay > other_mode->hdisplay) { | 375 | * of crtcs. Ideally for multiple large displays we'd assign them to |
| 376 | if (mode->hdisplay > 2560) | 376 | * non-linked crtcs for maximum line buffer allocation. |
| 377 | tmp = 1; /* 3/4 */ | 377 | */ |
| 378 | else | 378 | if (radeon_crtc->base.enabled && mode) { |
| 379 | tmp = 0; /* 1/2 */ | 379 | if (other_mode) |
| 380 | } else if (other_mode->hdisplay > mode->hdisplay) { | ||
| 381 | if (other_mode->hdisplay > 2560) | ||
| 382 | tmp = 3; /* 1/4 */ | ||
| 383 | else | ||
| 384 | tmp = 0; /* 1/2 */ | ||
| 385 | } else | ||
| 386 | tmp = 0; /* 1/2 */ | 380 | tmp = 0; /* 1/2 */ |
| 387 | } else if (mode) | 381 | else |
| 388 | tmp = 2; /* whole */ | 382 | tmp = 2; /* whole */ |
| 389 | else if (other_mode) | 383 | } else |
| 390 | tmp = 3; /* 1/4 */ | 384 | tmp = 0; |
| 391 | 385 | ||
| 392 | /* second controller of the pair uses second half of the lb */ | 386 | /* second controller of the pair uses second half of the lb */ |
| 393 | if (radeon_crtc->crtc_id % 2) | 387 | if (radeon_crtc->crtc_id % 2) |
| 394 | tmp += 4; | 388 | tmp += 4; |
| 395 | WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); | 389 | WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); |
| 396 | 390 | ||
| 397 | switch (tmp) { | 391 | if (radeon_crtc->base.enabled && mode) { |
| 398 | case 0: | 392 | switch (tmp) { |
| 399 | case 4: | 393 | case 0: |
| 400 | default: | 394 | case 4: |
| 401 | if (ASIC_IS_DCE5(rdev)) | 395 | default: |
| 402 | return 4096 * 2; | 396 | if (ASIC_IS_DCE5(rdev)) |
| 403 | else | 397 | return 4096 * 2; |
| 404 | return 3840 * 2; | 398 | else |
| 405 | case 1: | 399 | return 3840 * 2; |
| 406 | case 5: | 400 | case 1: |
| 407 | if (ASIC_IS_DCE5(rdev)) | 401 | case 5: |
| 408 | return 6144 * 2; | 402 | if (ASIC_IS_DCE5(rdev)) |
| 409 | else | 403 | return 6144 * 2; |
| 410 | return 5760 * 2; | 404 | else |
| 411 | case 2: | 405 | return 5760 * 2; |
| 412 | case 6: | 406 | case 2: |
| 413 | if (ASIC_IS_DCE5(rdev)) | 407 | case 6: |
| 414 | return 8192 * 2; | 408 | if (ASIC_IS_DCE5(rdev)) |
| 415 | else | 409 | return 8192 * 2; |
| 416 | return 7680 * 2; | 410 | else |
| 417 | case 3: | 411 | return 7680 * 2; |
| 418 | case 7: | 412 | case 3: |
| 419 | if (ASIC_IS_DCE5(rdev)) | 413 | case 7: |
| 420 | return 2048 * 2; | 414 | if (ASIC_IS_DCE5(rdev)) |
| 421 | else | 415 | return 2048 * 2; |
| 422 | return 1920 * 2; | 416 | else |
| 417 | return 1920 * 2; | ||
| 418 | } | ||
| 423 | } | 419 | } |
| 420 | |||
| 421 | /* controller not enabled, so no lb used */ | ||
| 422 | return 0; | ||
| 424 | } | 423 | } |
| 425 | 424 | ||
| 426 | static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev) | 425 | static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev) |
| @@ -2581,7 +2580,7 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev) | |||
| 2581 | u32 wptr, tmp; | 2580 | u32 wptr, tmp; |
| 2582 | 2581 | ||
| 2583 | if (rdev->wb.enabled) | 2582 | if (rdev->wb.enabled) |
| 2584 | wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]; | 2583 | wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); |
| 2585 | else | 2584 | else |
| 2586 | wptr = RREG32(IH_RB_WPTR); | 2585 | wptr = RREG32(IH_RB_WPTR); |
| 2587 | 2586 | ||
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 15d58292677a..6f27593901c7 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -3231,7 +3231,7 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) | |||
| 3231 | u32 wptr, tmp; | 3231 | u32 wptr, tmp; |
| 3232 | 3232 | ||
| 3233 | if (rdev->wb.enabled) | 3233 | if (rdev->wb.enabled) |
| 3234 | wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]; | 3234 | wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); |
| 3235 | else | 3235 | else |
| 3236 | wptr = RREG32(IH_RB_WPTR); | 3236 | wptr = RREG32(IH_RB_WPTR); |
| 3237 | 3237 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 2ef6d5135064..5f45fa12bb8b 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
| @@ -1199,7 +1199,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1199 | if (router->ddc_valid || router->cd_valid) { | 1199 | if (router->ddc_valid || router->cd_valid) { |
| 1200 | radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); | 1200 | radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); |
| 1201 | if (!radeon_connector->router_bus) | 1201 | if (!radeon_connector->router_bus) |
| 1202 | goto failed; | 1202 | DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n"); |
| 1203 | } | 1203 | } |
| 1204 | switch (connector_type) { | 1204 | switch (connector_type) { |
| 1205 | case DRM_MODE_CONNECTOR_VGA: | 1205 | case DRM_MODE_CONNECTOR_VGA: |
| @@ -1208,7 +1208,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1208 | if (i2c_bus->valid) { | 1208 | if (i2c_bus->valid) { |
| 1209 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | 1209 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); |
| 1210 | if (!radeon_connector->ddc_bus) | 1210 | if (!radeon_connector->ddc_bus) |
| 1211 | goto failed; | 1211 | DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
| 1212 | } | 1212 | } |
| 1213 | radeon_connector->dac_load_detect = true; | 1213 | radeon_connector->dac_load_detect = true; |
| 1214 | drm_connector_attach_property(&radeon_connector->base, | 1214 | drm_connector_attach_property(&radeon_connector->base, |
| @@ -1226,7 +1226,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1226 | if (i2c_bus->valid) { | 1226 | if (i2c_bus->valid) { |
| 1227 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | 1227 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); |
| 1228 | if (!radeon_connector->ddc_bus) | 1228 | if (!radeon_connector->ddc_bus) |
| 1229 | goto failed; | 1229 | DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
| 1230 | } | 1230 | } |
| 1231 | radeon_connector->dac_load_detect = true; | 1231 | radeon_connector->dac_load_detect = true; |
| 1232 | drm_connector_attach_property(&radeon_connector->base, | 1232 | drm_connector_attach_property(&radeon_connector->base, |
| @@ -1249,7 +1249,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1249 | if (i2c_bus->valid) { | 1249 | if (i2c_bus->valid) { |
| 1250 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | 1250 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); |
| 1251 | if (!radeon_connector->ddc_bus) | 1251 | if (!radeon_connector->ddc_bus) |
| 1252 | goto failed; | 1252 | DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
| 1253 | } | 1253 | } |
| 1254 | subpixel_order = SubPixelHorizontalRGB; | 1254 | subpixel_order = SubPixelHorizontalRGB; |
| 1255 | drm_connector_attach_property(&radeon_connector->base, | 1255 | drm_connector_attach_property(&radeon_connector->base, |
| @@ -1290,7 +1290,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1290 | if (i2c_bus->valid) { | 1290 | if (i2c_bus->valid) { |
| 1291 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | 1291 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); |
| 1292 | if (!radeon_connector->ddc_bus) | 1292 | if (!radeon_connector->ddc_bus) |
| 1293 | goto failed; | 1293 | DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
| 1294 | } | 1294 | } |
| 1295 | drm_connector_attach_property(&radeon_connector->base, | 1295 | drm_connector_attach_property(&radeon_connector->base, |
| 1296 | rdev->mode_info.coherent_mode_property, | 1296 | rdev->mode_info.coherent_mode_property, |
| @@ -1329,10 +1329,10 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1329 | else | 1329 | else |
| 1330 | radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); | 1330 | radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); |
| 1331 | if (!radeon_dig_connector->dp_i2c_bus) | 1331 | if (!radeon_dig_connector->dp_i2c_bus) |
| 1332 | goto failed; | 1332 | DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n"); |
| 1333 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | 1333 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); |
| 1334 | if (!radeon_connector->ddc_bus) | 1334 | if (!radeon_connector->ddc_bus) |
| 1335 | goto failed; | 1335 | DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
| 1336 | } | 1336 | } |
| 1337 | subpixel_order = SubPixelHorizontalRGB; | 1337 | subpixel_order = SubPixelHorizontalRGB; |
| 1338 | drm_connector_attach_property(&radeon_connector->base, | 1338 | drm_connector_attach_property(&radeon_connector->base, |
| @@ -1381,7 +1381,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1381 | if (i2c_bus->valid) { | 1381 | if (i2c_bus->valid) { |
| 1382 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | 1382 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); |
| 1383 | if (!radeon_connector->ddc_bus) | 1383 | if (!radeon_connector->ddc_bus) |
| 1384 | goto failed; | 1384 | DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
| 1385 | } | 1385 | } |
| 1386 | drm_connector_attach_property(&radeon_connector->base, | 1386 | drm_connector_attach_property(&radeon_connector->base, |
| 1387 | dev->mode_config.scaling_mode_property, | 1387 | dev->mode_config.scaling_mode_property, |
| @@ -1457,7 +1457,7 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
| 1457 | if (i2c_bus->valid) { | 1457 | if (i2c_bus->valid) { |
| 1458 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | 1458 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); |
| 1459 | if (!radeon_connector->ddc_bus) | 1459 | if (!radeon_connector->ddc_bus) |
| 1460 | goto failed; | 1460 | DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
| 1461 | } | 1461 | } |
| 1462 | radeon_connector->dac_load_detect = true; | 1462 | radeon_connector->dac_load_detect = true; |
| 1463 | drm_connector_attach_property(&radeon_connector->base, | 1463 | drm_connector_attach_property(&radeon_connector->base, |
| @@ -1475,7 +1475,7 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
| 1475 | if (i2c_bus->valid) { | 1475 | if (i2c_bus->valid) { |
| 1476 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | 1476 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); |
| 1477 | if (!radeon_connector->ddc_bus) | 1477 | if (!radeon_connector->ddc_bus) |
| 1478 | goto failed; | 1478 | DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
| 1479 | } | 1479 | } |
| 1480 | radeon_connector->dac_load_detect = true; | 1480 | radeon_connector->dac_load_detect = true; |
| 1481 | drm_connector_attach_property(&radeon_connector->base, | 1481 | drm_connector_attach_property(&radeon_connector->base, |
| @@ -1493,7 +1493,7 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
| 1493 | if (i2c_bus->valid) { | 1493 | if (i2c_bus->valid) { |
| 1494 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | 1494 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); |
| 1495 | if (!radeon_connector->ddc_bus) | 1495 | if (!radeon_connector->ddc_bus) |
| 1496 | goto failed; | 1496 | DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
| 1497 | } | 1497 | } |
| 1498 | if (connector_type == DRM_MODE_CONNECTOR_DVII) { | 1498 | if (connector_type == DRM_MODE_CONNECTOR_DVII) { |
| 1499 | radeon_connector->dac_load_detect = true; | 1499 | radeon_connector->dac_load_detect = true; |
| @@ -1538,7 +1538,7 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
| 1538 | if (i2c_bus->valid) { | 1538 | if (i2c_bus->valid) { |
| 1539 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); | 1539 | radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); |
| 1540 | if (!radeon_connector->ddc_bus) | 1540 | if (!radeon_connector->ddc_bus) |
| 1541 | goto failed; | 1541 | DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
| 1542 | } | 1542 | } |
| 1543 | drm_connector_attach_property(&radeon_connector->base, | 1543 | drm_connector_attach_property(&radeon_connector->base, |
| 1544 | dev->mode_config.scaling_mode_property, | 1544 | dev->mode_config.scaling_mode_property, |
| @@ -1567,9 +1567,4 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
| 1567 | radeon_legacy_backlight_init(radeon_encoder, connector); | 1567 | radeon_legacy_backlight_init(radeon_encoder, connector); |
| 1568 | } | 1568 | } |
| 1569 | } | 1569 | } |
| 1570 | return; | ||
| 1571 | |||
| 1572 | failed: | ||
| 1573 | drm_connector_cleanup(connector); | ||
| 1574 | kfree(connector); | ||
| 1575 | } | 1570 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index ccbabf734a61..983cbac75af0 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
| @@ -1096,6 +1096,9 @@ void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector) | |||
| 1096 | if (!radeon_connector->router.ddc_valid) | 1096 | if (!radeon_connector->router.ddc_valid) |
| 1097 | return; | 1097 | return; |
| 1098 | 1098 | ||
| 1099 | if (!radeon_connector->router_bus) | ||
| 1100 | return; | ||
| 1101 | |||
| 1099 | radeon_i2c_get_byte(radeon_connector->router_bus, | 1102 | radeon_i2c_get_byte(radeon_connector->router_bus, |
| 1100 | radeon_connector->router.i2c_addr, | 1103 | radeon_connector->router.i2c_addr, |
| 1101 | 0x3, &val); | 1104 | 0x3, &val); |
| @@ -1121,6 +1124,9 @@ void radeon_router_select_cd_port(struct radeon_connector *radeon_connector) | |||
| 1121 | if (!radeon_connector->router.cd_valid) | 1124 | if (!radeon_connector->router.cd_valid) |
| 1122 | return; | 1125 | return; |
| 1123 | 1126 | ||
| 1127 | if (!radeon_connector->router_bus) | ||
| 1128 | return; | ||
| 1129 | |||
| 1124 | radeon_i2c_get_byte(radeon_connector->router_bus, | 1130 | radeon_i2c_get_byte(radeon_connector->router_bus, |
| 1125 | radeon_connector->router.i2c_addr, | 1131 | radeon_connector->router.i2c_addr, |
| 1126 | 0x3, &val); | 1132 | 0x3, &val); |
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c index edfb92e41735..196ffafafd88 100644 --- a/drivers/hwmon/pmbus_core.c +++ b/drivers/hwmon/pmbus_core.c | |||
| @@ -139,7 +139,6 @@ struct pmbus_data { | |||
| 139 | * A single status register covers multiple attributes, | 139 | * A single status register covers multiple attributes, |
| 140 | * so we keep them all together. | 140 | * so we keep them all together. |
| 141 | */ | 141 | */ |
| 142 | u8 status_bits; | ||
| 143 | u8 status[PB_NUM_STATUS_REG]; | 142 | u8 status[PB_NUM_STATUS_REG]; |
| 144 | 143 | ||
| 145 | u8 currpage; | 144 | u8 currpage; |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index fd1e11799137..a5ec5a7cb381 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
| @@ -1782,7 +1782,6 @@ static int ide_cd_probe(ide_drive_t *drive) | |||
| 1782 | ide_cd_read_toc(drive, &sense); | 1782 | ide_cd_read_toc(drive, &sense); |
| 1783 | g->fops = &idecd_ops; | 1783 | g->fops = &idecd_ops; |
| 1784 | g->flags |= GENHD_FL_REMOVABLE; | 1784 | g->flags |= GENHD_FL_REMOVABLE; |
| 1785 | g->events = DISK_EVENT_MEDIA_CHANGE; | ||
| 1786 | add_disk(g); | 1785 | add_disk(g); |
| 1787 | return 0; | 1786 | return 0; |
| 1788 | 1787 | ||
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c index 2a6bc50e8a41..02caa7dd51c8 100644 --- a/drivers/ide/ide-cd_ioctl.c +++ b/drivers/ide/ide-cd_ioctl.c | |||
| @@ -79,6 +79,12 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr) | |||
| 79 | return CDS_DRIVE_NOT_READY; | 79 | return CDS_DRIVE_NOT_READY; |
| 80 | } | 80 | } |
| 81 | 81 | ||
| 82 | /* | ||
| 83 | * ide-cd always generates media changed event if media is missing, which | ||
| 84 | * makes it impossible to use for proper event reporting, so disk->events | ||
| 85 | * is cleared to 0 and the following function is used only to trigger | ||
| 86 | * revalidation and never propagated to userland. | ||
| 87 | */ | ||
| 82 | unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi, | 88 | unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi, |
| 83 | unsigned int clearing, int slot_nr) | 89 | unsigned int clearing, int slot_nr) |
| 84 | { | 90 | { |
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c index c4ffd4888939..70ea8763567d 100644 --- a/drivers/ide/ide-gd.c +++ b/drivers/ide/ide-gd.c | |||
| @@ -298,6 +298,12 @@ static unsigned int ide_gd_check_events(struct gendisk *disk, | |||
| 298 | return 0; | 298 | return 0; |
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | /* | ||
| 302 | * The following is used to force revalidation on the first open on | ||
| 303 | * removeable devices, and never gets reported to userland as | ||
| 304 | * genhd->events is 0. This is intended as removeable ide disk | ||
| 305 | * can't really detect MEDIA_CHANGE events. | ||
| 306 | */ | ||
| 301 | ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED; | 307 | ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED; |
| 302 | drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED; | 308 | drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED; |
| 303 | 309 | ||
| @@ -413,7 +419,6 @@ static int ide_gd_probe(ide_drive_t *drive) | |||
| 413 | if (drive->dev_flags & IDE_DFLAG_REMOVABLE) | 419 | if (drive->dev_flags & IDE_DFLAG_REMOVABLE) |
| 414 | g->flags = GENHD_FL_REMOVABLE; | 420 | g->flags = GENHD_FL_REMOVABLE; |
| 415 | g->fops = &ide_gd_ops; | 421 | g->fops = &ide_gd_ops; |
| 416 | g->events = DISK_EVENT_MEDIA_CHANGE; | ||
| 417 | add_disk(g); | 422 | add_disk(g); |
| 418 | return 0; | 423 | return 0; |
| 419 | 424 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index 6e853c61d87e..7d6f7f18a920 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -3170,6 +3170,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len) | |||
| 3170 | mddev->layout = mddev->new_layout; | 3170 | mddev->layout = mddev->new_layout; |
| 3171 | mddev->chunk_sectors = mddev->new_chunk_sectors; | 3171 | mddev->chunk_sectors = mddev->new_chunk_sectors; |
| 3172 | mddev->delta_disks = 0; | 3172 | mddev->delta_disks = 0; |
| 3173 | mddev->degraded = 0; | ||
| 3173 | if (mddev->pers->sync_request == NULL) { | 3174 | if (mddev->pers->sync_request == NULL) { |
| 3174 | /* this is now an array without redundancy, so | 3175 | /* this is now an array without redundancy, so |
| 3175 | * it must always be in_sync | 3176 | * it must always be in_sync |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index f301e6ae220c..49bf5f891435 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -5151,7 +5151,6 @@ static int run(mddev_t *mddev) | |||
| 5151 | 5151 | ||
| 5152 | mddev->queue->backing_dev_info.congested_data = mddev; | 5152 | mddev->queue->backing_dev_info.congested_data = mddev; |
| 5153 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; | 5153 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; |
| 5154 | mddev->queue->queue_lock = &conf->device_lock; | ||
| 5155 | 5154 | ||
| 5156 | chunk_size = mddev->chunk_sectors << 9; | 5155 | chunk_size = mddev->chunk_sectors << 9; |
| 5157 | blk_queue_io_min(mddev->queue, chunk_size); | 5156 | blk_queue_io_min(mddev->queue, chunk_size); |
| @@ -5679,6 +5678,7 @@ static void raid5_quiesce(mddev_t *mddev, int state) | |||
| 5679 | static void *raid45_takeover_raid0(mddev_t *mddev, int level) | 5678 | static void *raid45_takeover_raid0(mddev_t *mddev, int level) |
| 5680 | { | 5679 | { |
| 5681 | struct raid0_private_data *raid0_priv = mddev->private; | 5680 | struct raid0_private_data *raid0_priv = mddev->private; |
| 5681 | sector_t sectors; | ||
| 5682 | 5682 | ||
| 5683 | /* for raid0 takeover only one zone is supported */ | 5683 | /* for raid0 takeover only one zone is supported */ |
| 5684 | if (raid0_priv->nr_strip_zones > 1) { | 5684 | if (raid0_priv->nr_strip_zones > 1) { |
| @@ -5687,6 +5687,9 @@ static void *raid45_takeover_raid0(mddev_t *mddev, int level) | |||
| 5687 | return ERR_PTR(-EINVAL); | 5687 | return ERR_PTR(-EINVAL); |
| 5688 | } | 5688 | } |
| 5689 | 5689 | ||
| 5690 | sectors = raid0_priv->strip_zone[0].zone_end; | ||
| 5691 | sector_div(sectors, raid0_priv->strip_zone[0].nb_dev); | ||
| 5692 | mddev->dev_sectors = sectors; | ||
| 5690 | mddev->new_level = level; | 5693 | mddev->new_level = level; |
| 5691 | mddev->new_layout = ALGORITHM_PARITY_N; | 5694 | mddev->new_layout = ALGORITHM_PARITY_N; |
| 5692 | mddev->new_chunk_sectors = mddev->chunk_sectors; | 5695 | mddev->new_chunk_sectors = mddev->chunk_sectors; |
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c index e3de0b8625cd..7581518ecfa2 100644 --- a/drivers/net/bna/bfa_ioc.c +++ b/drivers/net/bna/bfa_ioc.c | |||
| @@ -38,6 +38,8 @@ | |||
| 38 | #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) | 38 | #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) |
| 39 | #define bfa_ioc_notify_fail(__ioc) \ | 39 | #define bfa_ioc_notify_fail(__ioc) \ |
| 40 | ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) | 40 | ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) |
| 41 | #define bfa_ioc_sync_start(__ioc) \ | ||
| 42 | ((__ioc)->ioc_hwif->ioc_sync_start(__ioc)) | ||
| 41 | #define bfa_ioc_sync_join(__ioc) \ | 43 | #define bfa_ioc_sync_join(__ioc) \ |
| 42 | ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) | 44 | ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) |
| 43 | #define bfa_ioc_sync_leave(__ioc) \ | 45 | #define bfa_ioc_sync_leave(__ioc) \ |
| @@ -602,7 +604,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) | |||
| 602 | switch (event) { | 604 | switch (event) { |
| 603 | case IOCPF_E_SEMLOCKED: | 605 | case IOCPF_E_SEMLOCKED: |
| 604 | if (bfa_ioc_firmware_lock(ioc)) { | 606 | if (bfa_ioc_firmware_lock(ioc)) { |
| 605 | if (bfa_ioc_sync_complete(ioc)) { | 607 | if (bfa_ioc_sync_start(ioc)) { |
| 606 | iocpf->retry_count = 0; | 608 | iocpf->retry_count = 0; |
| 607 | bfa_ioc_sync_join(ioc); | 609 | bfa_ioc_sync_join(ioc); |
| 608 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); | 610 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); |
| @@ -1314,7 +1316,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) | |||
| 1314 | * execution context (driver/bios) must match. | 1316 | * execution context (driver/bios) must match. |
| 1315 | */ | 1317 | */ |
| 1316 | static bool | 1318 | static bool |
| 1317 | bfa_ioc_fwver_valid(struct bfa_ioc *ioc) | 1319 | bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env) |
| 1318 | { | 1320 | { |
| 1319 | struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr; | 1321 | struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr; |
| 1320 | 1322 | ||
| @@ -1325,7 +1327,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc) | |||
| 1325 | if (fwhdr.signature != drv_fwhdr->signature) | 1327 | if (fwhdr.signature != drv_fwhdr->signature) |
| 1326 | return false; | 1328 | return false; |
| 1327 | 1329 | ||
| 1328 | if (fwhdr.exec != drv_fwhdr->exec) | 1330 | if (swab32(fwhdr.param) != boot_env) |
| 1329 | return false; | 1331 | return false; |
| 1330 | 1332 | ||
| 1331 | return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); | 1333 | return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); |
| @@ -1352,9 +1354,12 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) | |||
| 1352 | { | 1354 | { |
| 1353 | enum bfi_ioc_state ioc_fwstate; | 1355 | enum bfi_ioc_state ioc_fwstate; |
| 1354 | bool fwvalid; | 1356 | bool fwvalid; |
| 1357 | u32 boot_env; | ||
| 1355 | 1358 | ||
| 1356 | ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); | 1359 | ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); |
| 1357 | 1360 | ||
| 1361 | boot_env = BFI_BOOT_LOADER_OS; | ||
| 1362 | |||
| 1358 | if (force) | 1363 | if (force) |
| 1359 | ioc_fwstate = BFI_IOC_UNINIT; | 1364 | ioc_fwstate = BFI_IOC_UNINIT; |
| 1360 | 1365 | ||
| @@ -1362,10 +1367,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) | |||
| 1362 | * check if firmware is valid | 1367 | * check if firmware is valid |
| 1363 | */ | 1368 | */ |
| 1364 | fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? | 1369 | fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? |
| 1365 | false : bfa_ioc_fwver_valid(ioc); | 1370 | false : bfa_ioc_fwver_valid(ioc, boot_env); |
| 1366 | 1371 | ||
| 1367 | if (!fwvalid) { | 1372 | if (!fwvalid) { |
| 1368 | bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); | 1373 | bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env); |
| 1369 | return; | 1374 | return; |
| 1370 | } | 1375 | } |
| 1371 | 1376 | ||
| @@ -1396,7 +1401,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) | |||
| 1396 | /** | 1401 | /** |
| 1397 | * Initialize the h/w for any other states. | 1402 | * Initialize the h/w for any other states. |
| 1398 | */ | 1403 | */ |
| 1399 | bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); | 1404 | bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env); |
| 1400 | } | 1405 | } |
| 1401 | 1406 | ||
| 1402 | void | 1407 | void |
| @@ -1506,7 +1511,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc) | |||
| 1506 | */ | 1511 | */ |
| 1507 | static void | 1512 | static void |
| 1508 | bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, | 1513 | bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, |
| 1509 | u32 boot_param) | 1514 | u32 boot_env) |
| 1510 | { | 1515 | { |
| 1511 | u32 *fwimg; | 1516 | u32 *fwimg; |
| 1512 | u32 pgnum, pgoff; | 1517 | u32 pgnum, pgoff; |
| @@ -1558,10 +1563,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, | |||
| 1558 | /* | 1563 | /* |
| 1559 | * Set boot type and boot param at the end. | 1564 | * Set boot type and boot param at the end. |
| 1560 | */ | 1565 | */ |
| 1561 | writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start) | 1566 | writel(boot_type, ((ioc->ioc_regs.smem_page_start) |
| 1562 | + (BFI_BOOT_TYPE_OFF))); | 1567 | + (BFI_BOOT_TYPE_OFF))); |
| 1563 | writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start) | 1568 | writel(boot_env, ((ioc->ioc_regs.smem_page_start) |
| 1564 | + (BFI_BOOT_PARAM_OFF))); | 1569 | + (BFI_BOOT_LOADER_OFF))); |
| 1565 | } | 1570 | } |
| 1566 | 1571 | ||
| 1567 | static void | 1572 | static void |
| @@ -1721,7 +1726,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc) | |||
| 1721 | * as the entry vector. | 1726 | * as the entry vector. |
| 1722 | */ | 1727 | */ |
| 1723 | static void | 1728 | static void |
| 1724 | bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param) | 1729 | bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env) |
| 1725 | { | 1730 | { |
| 1726 | void __iomem *rb; | 1731 | void __iomem *rb; |
| 1727 | 1732 | ||
| @@ -1734,7 +1739,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param) | |||
| 1734 | * Initialize IOC state of all functions on a chip reset. | 1739 | * Initialize IOC state of all functions on a chip reset. |
| 1735 | */ | 1740 | */ |
| 1736 | rb = ioc->pcidev.pci_bar_kva; | 1741 | rb = ioc->pcidev.pci_bar_kva; |
| 1737 | if (boot_param == BFI_BOOT_TYPE_MEMTEST) { | 1742 | if (boot_type == BFI_BOOT_TYPE_MEMTEST) { |
| 1738 | writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG)); | 1743 | writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG)); |
| 1739 | writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG)); | 1744 | writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG)); |
| 1740 | } else { | 1745 | } else { |
| @@ -1743,7 +1748,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param) | |||
| 1743 | } | 1748 | } |
| 1744 | 1749 | ||
| 1745 | bfa_ioc_msgflush(ioc); | 1750 | bfa_ioc_msgflush(ioc); |
| 1746 | bfa_ioc_download_fw(ioc, boot_type, boot_param); | 1751 | bfa_ioc_download_fw(ioc, boot_type, boot_env); |
| 1747 | 1752 | ||
| 1748 | /** | 1753 | /** |
| 1749 | * Enable interrupts just before starting LPU | 1754 | * Enable interrupts just before starting LPU |
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h index e4974bc24ef6..bd48abee781f 100644 --- a/drivers/net/bna/bfa_ioc.h +++ b/drivers/net/bna/bfa_ioc.h | |||
| @@ -194,6 +194,7 @@ struct bfa_ioc_hwif { | |||
| 194 | bool msix); | 194 | bool msix); |
| 195 | void (*ioc_notify_fail) (struct bfa_ioc *ioc); | 195 | void (*ioc_notify_fail) (struct bfa_ioc *ioc); |
| 196 | void (*ioc_ownership_reset) (struct bfa_ioc *ioc); | 196 | void (*ioc_ownership_reset) (struct bfa_ioc *ioc); |
| 197 | bool (*ioc_sync_start) (struct bfa_ioc *ioc); | ||
| 197 | void (*ioc_sync_join) (struct bfa_ioc *ioc); | 198 | void (*ioc_sync_join) (struct bfa_ioc *ioc); |
| 198 | void (*ioc_sync_leave) (struct bfa_ioc *ioc); | 199 | void (*ioc_sync_leave) (struct bfa_ioc *ioc); |
| 199 | void (*ioc_sync_ack) (struct bfa_ioc *ioc); | 200 | void (*ioc_sync_ack) (struct bfa_ioc *ioc); |
diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c index 469997c4ffd1..87aecdf22cf9 100644 --- a/drivers/net/bna/bfa_ioc_ct.c +++ b/drivers/net/bna/bfa_ioc_ct.c | |||
| @@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc); | |||
| 41 | static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); | 41 | static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); |
| 42 | static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc); | 42 | static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc); |
| 43 | static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); | 43 | static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); |
| 44 | static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc); | ||
| 44 | static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); | 45 | static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); |
| 45 | static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); | 46 | static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); |
| 46 | static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); | 47 | static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); |
| @@ -63,6 +64,7 @@ bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) | |||
| 63 | nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; | 64 | nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; |
| 64 | nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail; | 65 | nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail; |
| 65 | nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; | 66 | nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; |
| 67 | nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start; | ||
| 66 | nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join; | 68 | nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join; |
| 67 | nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave; | 69 | nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave; |
| 68 | nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack; | 70 | nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack; |
| @@ -345,6 +347,32 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) | |||
| 345 | /** | 347 | /** |
| 346 | * Synchronized IOC failure processing routines | 348 | * Synchronized IOC failure processing routines |
| 347 | */ | 349 | */ |
| 350 | static bool | ||
| 351 | bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) | ||
| 352 | { | ||
| 353 | u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); | ||
| 354 | u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); | ||
| 355 | |||
| 356 | /* | ||
| 357 | * Driver load time. If the sync required bit for this PCI fn | ||
| 358 | * is set, it is due to an unclean exit by the driver for this | ||
| 359 | * PCI fn in the previous incarnation. Whoever comes here first | ||
| 360 | * should clean it up, no matter which PCI fn. | ||
| 361 | */ | ||
| 362 | |||
| 363 | if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) { | ||
| 364 | writel(0, ioc->ioc_regs.ioc_fail_sync); | ||
| 365 | writel(1, ioc->ioc_regs.ioc_usage_reg); | ||
| 366 | writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); | ||
| 367 | writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); | ||
| 368 | return true; | ||
| 369 | } | ||
| 370 | |||
| 371 | return bfa_ioc_ct_sync_complete(ioc); | ||
| 372 | } | ||
| 373 | /** | ||
| 374 | * Synchronized IOC failure processing routines | ||
| 375 | */ | ||
| 348 | static void | 376 | static void |
| 349 | bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) | 377 | bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) |
| 350 | { | 378 | { |
diff --git a/drivers/net/bna/bfi.h b/drivers/net/bna/bfi.h index a97396811050..6050379526f7 100644 --- a/drivers/net/bna/bfi.h +++ b/drivers/net/bna/bfi.h | |||
| @@ -184,12 +184,14 @@ enum bfi_mclass { | |||
| 184 | #define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */ | 184 | #define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */ |
| 185 | 185 | ||
| 186 | #define BFI_BOOT_TYPE_OFF 8 | 186 | #define BFI_BOOT_TYPE_OFF 8 |
| 187 | #define BFI_BOOT_PARAM_OFF 12 | 187 | #define BFI_BOOT_LOADER_OFF 12 |
| 188 | 188 | ||
| 189 | #define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */ | 189 | #define BFI_BOOT_TYPE_NORMAL 0 |
| 190 | #define BFI_BOOT_TYPE_FLASH 1 | 190 | #define BFI_BOOT_TYPE_FLASH 1 |
| 191 | #define BFI_BOOT_TYPE_MEMTEST 2 | 191 | #define BFI_BOOT_TYPE_MEMTEST 2 |
| 192 | 192 | ||
| 193 | #define BFI_BOOT_LOADER_OS 0 | ||
| 194 | |||
| 193 | #define BFI_BOOT_MEMTEST_RES_ADDR 0x900 | 195 | #define BFI_BOOT_MEMTEST_RES_ADDR 0x900 |
| 194 | #define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3 | 196 | #define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3 |
| 195 | 197 | ||
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c index 9f356d5d0f33..8e6ceab9f4d8 100644 --- a/drivers/net/bna/bnad.c +++ b/drivers/net/bna/bnad.c | |||
| @@ -1837,7 +1837,6 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id) | |||
| 1837 | /* Initialize the Rx event handlers */ | 1837 | /* Initialize the Rx event handlers */ |
| 1838 | rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup; | 1838 | rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup; |
| 1839 | rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy; | 1839 | rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy; |
| 1840 | rx_cbfn.rcb_destroy_cbfn = NULL; | ||
| 1841 | rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup; | 1840 | rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup; |
| 1842 | rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy; | 1841 | rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy; |
| 1843 | rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup; | 1842 | rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup; |
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index f5050155c6b5..89cb977898cb 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c | |||
| @@ -2114,19 +2114,18 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data) | |||
| 2114 | for (i = 0; i < (data * 2); i++) { | 2114 | for (i = 0; i < (data * 2); i++) { |
| 2115 | if ((i % 2) == 0) | 2115 | if ((i % 2) == 0) |
| 2116 | bnx2x_set_led(&bp->link_params, &bp->link_vars, | 2116 | bnx2x_set_led(&bp->link_params, &bp->link_vars, |
| 2117 | LED_MODE_OPER, SPEED_1000); | 2117 | LED_MODE_ON, SPEED_1000); |
| 2118 | else | 2118 | else |
| 2119 | bnx2x_set_led(&bp->link_params, &bp->link_vars, | 2119 | bnx2x_set_led(&bp->link_params, &bp->link_vars, |
| 2120 | LED_MODE_OFF, 0); | 2120 | LED_MODE_FRONT_PANEL_OFF, 0); |
| 2121 | 2121 | ||
| 2122 | msleep_interruptible(500); | 2122 | msleep_interruptible(500); |
| 2123 | if (signal_pending(current)) | 2123 | if (signal_pending(current)) |
| 2124 | break; | 2124 | break; |
| 2125 | } | 2125 | } |
| 2126 | 2126 | ||
| 2127 | if (bp->link_vars.link_up) | 2127 | bnx2x_set_led(&bp->link_params, &bp->link_vars, |
| 2128 | bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_OPER, | 2128 | LED_MODE_OPER, bp->link_vars.line_speed); |
| 2129 | bp->link_vars.line_speed); | ||
| 2130 | 2129 | ||
| 2131 | return 0; | 2130 | return 0; |
| 2132 | } | 2131 | } |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 9bc5de3e04a8..ba715826e2a8 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
| @@ -176,7 +176,7 @@ static int tlb_initialize(struct bonding *bond) | |||
| 176 | bond_info->tx_hashtbl = new_hashtbl; | 176 | bond_info->tx_hashtbl = new_hashtbl; |
| 177 | 177 | ||
| 178 | for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) { | 178 | for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) { |
| 179 | tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1); | 179 | tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0); |
| 180 | } | 180 | } |
| 181 | 181 | ||
| 182 | _unlock_tx_hashtbl(bond); | 182 | _unlock_tx_hashtbl(bond); |
| @@ -701,7 +701,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) | |||
| 701 | */ | 701 | */ |
| 702 | rlb_choose_channel(skb, bond); | 702 | rlb_choose_channel(skb, bond); |
| 703 | 703 | ||
| 704 | /* The ARP relpy packets must be delayed so that | 704 | /* The ARP reply packets must be delayed so that |
| 705 | * they can cancel out the influence of the ARP request. | 705 | * they can cancel out the influence of the ARP request. |
| 706 | */ | 706 | */ |
| 707 | bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY; | 707 | bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY; |
| @@ -1042,7 +1042,7 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla | |||
| 1042 | * | 1042 | * |
| 1043 | * If the permanent hw address of @slave is @bond's hw address, we need to | 1043 | * If the permanent hw address of @slave is @bond's hw address, we need to |
| 1044 | * find a different hw address to give @slave, that isn't in use by any other | 1044 | * find a different hw address to give @slave, that isn't in use by any other |
| 1045 | * slave in the bond. This address must be, of course, one of the premanent | 1045 | * slave in the bond. This address must be, of course, one of the permanent |
| 1046 | * addresses of the other slaves. | 1046 | * addresses of the other slaves. |
| 1047 | * | 1047 | * |
| 1048 | * We go over the slave list, and for each slave there we compare its | 1048 | * We go over the slave list, and for each slave there we compare its |
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h index 86861f08b24d..8ca7158b2dda 100644 --- a/drivers/net/bonding/bond_alb.h +++ b/drivers/net/bonding/bond_alb.h | |||
| @@ -75,7 +75,7 @@ struct tlb_client_info { | |||
| 75 | * gave this entry index. | 75 | * gave this entry index. |
| 76 | */ | 76 | */ |
| 77 | u32 tx_bytes; /* Each Client accumulates the BytesTx that | 77 | u32 tx_bytes; /* Each Client accumulates the BytesTx that |
| 78 | * were tranmitted to it, and after each | 78 | * were transmitted to it, and after each |
| 79 | * CallBack the LoadHistory is divided | 79 | * CallBack the LoadHistory is divided |
| 80 | * by the balance interval | 80 | * by the balance interval |
| 81 | */ | 81 | */ |
| @@ -122,7 +122,6 @@ struct tlb_slave_info { | |||
| 122 | }; | 122 | }; |
| 123 | 123 | ||
| 124 | struct alb_bond_info { | 124 | struct alb_bond_info { |
| 125 | struct timer_list alb_timer; | ||
| 126 | struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ | 125 | struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ |
| 127 | spinlock_t tx_hashtbl_lock; | 126 | spinlock_t tx_hashtbl_lock; |
| 128 | u32 unbalanced_load; | 127 | u32 unbalanced_load; |
| @@ -140,7 +139,6 @@ struct alb_bond_info { | |||
| 140 | struct slave *next_rx_slave;/* next slave to be assigned | 139 | struct slave *next_rx_slave;/* next slave to be assigned |
| 141 | * to a new rx client for | 140 | * to a new rx client for |
| 142 | */ | 141 | */ |
| 143 | u32 rlb_interval_counter; | ||
| 144 | u8 primary_is_promisc; /* boolean */ | 142 | u8 primary_is_promisc; /* boolean */ |
| 145 | u32 rlb_promisc_timeout_counter;/* counts primary | 143 | u32 rlb_promisc_timeout_counter;/* counts primary |
| 146 | * promiscuity time | 144 | * promiscuity time |
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index c0a1bc5b1435..bd1d811c204f 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c | |||
| @@ -260,7 +260,7 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) | |||
| 260 | 260 | ||
| 261 | if (!ofdev->dev.of_match) | 261 | if (!ofdev->dev.of_match) |
| 262 | return -EINVAL; | 262 | return -EINVAL; |
| 263 | data = (struct mpc5xxx_can_data *)of_dev->dev.of_match->data; | 263 | data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data; |
| 264 | 264 | ||
| 265 | base = of_iomap(np, 0); | 265 | base = of_iomap(np, 0); |
| 266 | if (!base) { | 266 | if (!base) { |
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index ea0dc451da9c..d70fb76edb77 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c | |||
| @@ -173,7 +173,8 @@ static void loopback_setup(struct net_device *dev) | |||
| 173 | | NETIF_F_RXCSUM | 173 | | NETIF_F_RXCSUM |
| 174 | | NETIF_F_HIGHDMA | 174 | | NETIF_F_HIGHDMA |
| 175 | | NETIF_F_LLTX | 175 | | NETIF_F_LLTX |
| 176 | | NETIF_F_NETNS_LOCAL; | 176 | | NETIF_F_NETNS_LOCAL |
| 177 | | NETIF_F_VLAN_CHALLENGED; | ||
| 177 | dev->ethtool_ops = &loopback_ethtool_ops; | 178 | dev->ethtool_ops = &loopback_ethtool_ops; |
| 178 | dev->header_ops = ð_header_ops; | 179 | dev->header_ops = ð_header_ops; |
| 179 | dev->netdev_ops = &loopback_ops; | 180 | dev->netdev_ops = &loopback_ops; |
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index aa2813e06d00..1074231f0a0d 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c | |||
| @@ -860,6 +860,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, | |||
| 860 | prev_eedata = eedata; | 860 | prev_eedata = eedata; |
| 861 | } | 861 | } |
| 862 | 862 | ||
| 863 | /* Store MAC Address in perm_addr */ | ||
| 864 | memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); | ||
| 865 | |||
| 863 | dev->base_addr = (unsigned long __force) ioaddr; | 866 | dev->base_addr = (unsigned long __force) ioaddr; |
| 864 | dev->irq = irq; | 867 | dev->irq = irq; |
| 865 | 868 | ||
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index d7299f1a4940..679dc8519c5b 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
| @@ -174,7 +174,7 @@ | |||
| 174 | 174 | ||
| 175 | #define MAX_NUM_CARDS 4 | 175 | #define MAX_NUM_CARDS 4 |
| 176 | 176 | ||
| 177 | #define MAX_BUFFERS_PER_CMD 32 | 177 | #define NETXEN_MAX_FRAGS_PER_TX 14 |
| 178 | #define MAX_TSO_HEADER_DESC 2 | 178 | #define MAX_TSO_HEADER_DESC 2 |
| 179 | #define MGMT_CMD_DESC_RESV 4 | 179 | #define MGMT_CMD_DESC_RESV 4 |
| 180 | #define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ | 180 | #define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ |
| @@ -558,7 +558,7 @@ struct netxen_recv_crb { | |||
| 558 | */ | 558 | */ |
| 559 | struct netxen_cmd_buffer { | 559 | struct netxen_cmd_buffer { |
| 560 | struct sk_buff *skb; | 560 | struct sk_buff *skb; |
| 561 | struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1]; | 561 | struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1]; |
| 562 | u32 frag_count; | 562 | u32 frag_count; |
| 563 | }; | 563 | }; |
| 564 | 564 | ||
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 83348dc4b184..e8a4b6655999 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
| @@ -1844,6 +1844,8 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
| 1844 | struct cmd_desc_type0 *hwdesc, *first_desc; | 1844 | struct cmd_desc_type0 *hwdesc, *first_desc; |
| 1845 | struct pci_dev *pdev; | 1845 | struct pci_dev *pdev; |
| 1846 | int i, k; | 1846 | int i, k; |
| 1847 | int delta = 0; | ||
| 1848 | struct skb_frag_struct *frag; | ||
| 1847 | 1849 | ||
| 1848 | u32 producer; | 1850 | u32 producer; |
| 1849 | int frag_count, no_of_desc; | 1851 | int frag_count, no_of_desc; |
| @@ -1851,6 +1853,21 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
| 1851 | 1853 | ||
| 1852 | frag_count = skb_shinfo(skb)->nr_frags + 1; | 1854 | frag_count = skb_shinfo(skb)->nr_frags + 1; |
| 1853 | 1855 | ||
| 1856 | /* 14 frags supported for normal packet and | ||
| 1857 | * 32 frags supported for TSO packet | ||
| 1858 | */ | ||
| 1859 | if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) { | ||
| 1860 | |||
| 1861 | for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) { | ||
| 1862 | frag = &skb_shinfo(skb)->frags[i]; | ||
| 1863 | delta += frag->size; | ||
| 1864 | } | ||
| 1865 | |||
| 1866 | if (!__pskb_pull_tail(skb, delta)) | ||
| 1867 | goto drop_packet; | ||
| 1868 | |||
| 1869 | frag_count = 1 + skb_shinfo(skb)->nr_frags; | ||
| 1870 | } | ||
| 1854 | /* 4 fragments per cmd des */ | 1871 | /* 4 fragments per cmd des */ |
| 1855 | no_of_desc = (frag_count + 3) >> 2; | 1872 | no_of_desc = (frag_count + 3) >> 2; |
| 1856 | 1873 | ||
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h index dc44564ef6f9..b0dead00b2d1 100644 --- a/drivers/net/qlcnic/qlcnic.h +++ b/drivers/net/qlcnic/qlcnic.h | |||
| @@ -99,6 +99,7 @@ | |||
| 99 | #define TX_UDPV6_PKT 0x0c | 99 | #define TX_UDPV6_PKT 0x0c |
| 100 | 100 | ||
| 101 | /* Tx defines */ | 101 | /* Tx defines */ |
| 102 | #define QLCNIC_MAX_FRAGS_PER_TX 14 | ||
| 102 | #define MAX_TSO_HEADER_DESC 2 | 103 | #define MAX_TSO_HEADER_DESC 2 |
| 103 | #define MGMT_CMD_DESC_RESV 4 | 104 | #define MGMT_CMD_DESC_RESV 4 |
| 104 | #define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ | 105 | #define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ |
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c index cd88c7e1bfa9..cb1a1ef36c0a 100644 --- a/drivers/net/qlcnic/qlcnic_main.c +++ b/drivers/net/qlcnic/qlcnic_main.c | |||
| @@ -2099,6 +2099,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
| 2099 | struct cmd_desc_type0 *hwdesc, *first_desc; | 2099 | struct cmd_desc_type0 *hwdesc, *first_desc; |
| 2100 | struct pci_dev *pdev; | 2100 | struct pci_dev *pdev; |
| 2101 | struct ethhdr *phdr; | 2101 | struct ethhdr *phdr; |
| 2102 | int delta = 0; | ||
| 2102 | int i, k; | 2103 | int i, k; |
| 2103 | 2104 | ||
| 2104 | u32 producer; | 2105 | u32 producer; |
| @@ -2118,6 +2119,19 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
| 2118 | } | 2119 | } |
| 2119 | 2120 | ||
| 2120 | frag_count = skb_shinfo(skb)->nr_frags + 1; | 2121 | frag_count = skb_shinfo(skb)->nr_frags + 1; |
| 2122 | /* 14 frags supported for normal packet and | ||
| 2123 | * 32 frags supported for TSO packet | ||
| 2124 | */ | ||
| 2125 | if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) { | ||
| 2126 | |||
| 2127 | for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++) | ||
| 2128 | delta += skb_shinfo(skb)->frags[i].size; | ||
| 2129 | |||
| 2130 | if (!__pskb_pull_tail(skb, delta)) | ||
| 2131 | goto drop_packet; | ||
| 2132 | |||
| 2133 | frag_count = 1 + skb_shinfo(skb)->nr_frags; | ||
| 2134 | } | ||
| 2121 | 2135 | ||
| 2122 | /* 4 fragments per cmd des */ | 2136 | /* 4 fragments per cmd des */ |
| 2123 | no_of_desc = (frag_count + 3) >> 2; | 2137 | no_of_desc = (frag_count + 3) >> 2; |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index d890679e4c4d..a3c2aab53de8 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
| @@ -328,7 +328,8 @@ static int efx_poll(struct napi_struct *napi, int budget) | |||
| 328 | * processing to finish, then directly poll (and ack ) the eventq. | 328 | * processing to finish, then directly poll (and ack ) the eventq. |
| 329 | * Finally reenable NAPI and interrupts. | 329 | * Finally reenable NAPI and interrupts. |
| 330 | * | 330 | * |
| 331 | * Since we are touching interrupts the caller should hold the suspend lock | 331 | * This is for use only during a loopback self-test. It must not |
| 332 | * deliver any packets up the stack as this can result in deadlock. | ||
| 332 | */ | 333 | */ |
| 333 | void efx_process_channel_now(struct efx_channel *channel) | 334 | void efx_process_channel_now(struct efx_channel *channel) |
| 334 | { | 335 | { |
| @@ -336,6 +337,7 @@ void efx_process_channel_now(struct efx_channel *channel) | |||
| 336 | 337 | ||
| 337 | BUG_ON(channel->channel >= efx->n_channels); | 338 | BUG_ON(channel->channel >= efx->n_channels); |
| 338 | BUG_ON(!channel->enabled); | 339 | BUG_ON(!channel->enabled); |
| 340 | BUG_ON(!efx->loopback_selftest); | ||
| 339 | 341 | ||
| 340 | /* Disable interrupts and wait for ISRs to complete */ | 342 | /* Disable interrupts and wait for ISRs to complete */ |
| 341 | efx_nic_disable_interrupts(efx); | 343 | efx_nic_disable_interrupts(efx); |
| @@ -1436,7 +1438,7 @@ static void efx_start_all(struct efx_nic *efx) | |||
| 1436 | * restart the transmit interface early so the watchdog timer stops */ | 1438 | * restart the transmit interface early so the watchdog timer stops */ |
| 1437 | efx_start_port(efx); | 1439 | efx_start_port(efx); |
| 1438 | 1440 | ||
| 1439 | if (efx_dev_registered(efx)) | 1441 | if (efx_dev_registered(efx) && !efx->port_inhibited) |
| 1440 | netif_tx_wake_all_queues(efx->net_dev); | 1442 | netif_tx_wake_all_queues(efx->net_dev); |
| 1441 | 1443 | ||
| 1442 | efx_for_each_channel(channel, efx) | 1444 | efx_for_each_channel(channel, efx) |
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h index d9d8c2ef1074..cc978803d484 100644 --- a/drivers/net/sfc/io.h +++ b/drivers/net/sfc/io.h | |||
| @@ -152,6 +152,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, | |||
| 152 | 152 | ||
| 153 | spin_lock_irqsave(&efx->biu_lock, flags); | 153 | spin_lock_irqsave(&efx->biu_lock, flags); |
| 154 | value->u32[0] = _efx_readd(efx, reg + 0); | 154 | value->u32[0] = _efx_readd(efx, reg + 0); |
| 155 | rmb(); | ||
| 155 | value->u32[1] = _efx_readd(efx, reg + 4); | 156 | value->u32[1] = _efx_readd(efx, reg + 4); |
| 156 | value->u32[2] = _efx_readd(efx, reg + 8); | 157 | value->u32[2] = _efx_readd(efx, reg + 8); |
| 157 | value->u32[3] = _efx_readd(efx, reg + 12); | 158 | value->u32[3] = _efx_readd(efx, reg + 12); |
| @@ -174,6 +175,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, | |||
| 174 | value->u64[0] = (__force __le64)__raw_readq(membase + addr); | 175 | value->u64[0] = (__force __le64)__raw_readq(membase + addr); |
| 175 | #else | 176 | #else |
| 176 | value->u32[0] = (__force __le32)__raw_readl(membase + addr); | 177 | value->u32[0] = (__force __le32)__raw_readl(membase + addr); |
| 178 | rmb(); | ||
| 177 | value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); | 179 | value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); |
| 178 | #endif | 180 | #endif |
| 179 | spin_unlock_irqrestore(&efx->biu_lock, flags); | 181 | spin_unlock_irqrestore(&efx->biu_lock, flags); |
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 9ffa9a6b55a0..191a311da2dc 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
| @@ -330,7 +330,6 @@ enum efx_rx_alloc_method { | |||
| 330 | * @eventq_mask: Event queue pointer mask | 330 | * @eventq_mask: Event queue pointer mask |
| 331 | * @eventq_read_ptr: Event queue read pointer | 331 | * @eventq_read_ptr: Event queue read pointer |
| 332 | * @last_eventq_read_ptr: Last event queue read pointer value. | 332 | * @last_eventq_read_ptr: Last event queue read pointer value. |
| 333 | * @magic_count: Event queue test event count | ||
| 334 | * @irq_count: Number of IRQs since last adaptive moderation decision | 333 | * @irq_count: Number of IRQs since last adaptive moderation decision |
| 335 | * @irq_mod_score: IRQ moderation score | 334 | * @irq_mod_score: IRQ moderation score |
| 336 | * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors | 335 | * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors |
| @@ -360,7 +359,6 @@ struct efx_channel { | |||
| 360 | unsigned int eventq_mask; | 359 | unsigned int eventq_mask; |
| 361 | unsigned int eventq_read_ptr; | 360 | unsigned int eventq_read_ptr; |
| 362 | unsigned int last_eventq_read_ptr; | 361 | unsigned int last_eventq_read_ptr; |
| 363 | unsigned int magic_count; | ||
| 364 | 362 | ||
| 365 | unsigned int irq_count; | 363 | unsigned int irq_count; |
| 366 | unsigned int irq_mod_score; | 364 | unsigned int irq_mod_score; |
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index e8396614daf3..10f1cb79c147 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c | |||
| @@ -84,7 +84,8 @@ static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, | |||
| 84 | static inline efx_qword_t *efx_event(struct efx_channel *channel, | 84 | static inline efx_qword_t *efx_event(struct efx_channel *channel, |
| 85 | unsigned int index) | 85 | unsigned int index) |
| 86 | { | 86 | { |
| 87 | return ((efx_qword_t *) (channel->eventq.addr)) + index; | 87 | return ((efx_qword_t *) (channel->eventq.addr)) + |
| 88 | (index & channel->eventq_mask); | ||
| 88 | } | 89 | } |
| 89 | 90 | ||
| 90 | /* See if an event is present | 91 | /* See if an event is present |
| @@ -673,7 +674,8 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel) | |||
| 673 | efx_dword_t reg; | 674 | efx_dword_t reg; |
| 674 | struct efx_nic *efx = channel->efx; | 675 | struct efx_nic *efx = channel->efx; |
| 675 | 676 | ||
| 676 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr); | 677 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, |
| 678 | channel->eventq_read_ptr & channel->eventq_mask); | ||
| 677 | efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, | 679 | efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, |
| 678 | channel->channel); | 680 | channel->channel); |
| 679 | } | 681 | } |
| @@ -908,7 +910,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) | |||
| 908 | 910 | ||
| 909 | code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); | 911 | code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); |
| 910 | if (code == EFX_CHANNEL_MAGIC_TEST(channel)) | 912 | if (code == EFX_CHANNEL_MAGIC_TEST(channel)) |
| 911 | ++channel->magic_count; | 913 | ; /* ignore */ |
| 912 | else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) | 914 | else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) |
| 913 | /* The queue must be empty, so we won't receive any rx | 915 | /* The queue must be empty, so we won't receive any rx |
| 914 | * events, so efx_process_channel() won't refill the | 916 | * events, so efx_process_channel() won't refill the |
| @@ -1015,8 +1017,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget) | |||
| 1015 | /* Clear this event by marking it all ones */ | 1017 | /* Clear this event by marking it all ones */ |
| 1016 | EFX_SET_QWORD(*p_event); | 1018 | EFX_SET_QWORD(*p_event); |
| 1017 | 1019 | ||
| 1018 | /* Increment read pointer */ | 1020 | ++read_ptr; |
| 1019 | read_ptr = (read_ptr + 1) & channel->eventq_mask; | ||
| 1020 | 1021 | ||
| 1021 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); | 1022 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); |
| 1022 | 1023 | ||
| @@ -1060,6 +1061,13 @@ out: | |||
| 1060 | return spent; | 1061 | return spent; |
| 1061 | } | 1062 | } |
| 1062 | 1063 | ||
| 1064 | /* Check whether an event is present in the eventq at the current | ||
| 1065 | * read pointer. Only useful for self-test. | ||
| 1066 | */ | ||
| 1067 | bool efx_nic_event_present(struct efx_channel *channel) | ||
| 1068 | { | ||
| 1069 | return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); | ||
| 1070 | } | ||
| 1063 | 1071 | ||
| 1064 | /* Allocate buffer table entries for event queue */ | 1072 | /* Allocate buffer table entries for event queue */ |
| 1065 | int efx_nic_probe_eventq(struct efx_channel *channel) | 1073 | int efx_nic_probe_eventq(struct efx_channel *channel) |
| @@ -1165,7 +1173,7 @@ static void efx_poll_flush_events(struct efx_nic *efx) | |||
| 1165 | struct efx_tx_queue *tx_queue; | 1173 | struct efx_tx_queue *tx_queue; |
| 1166 | struct efx_rx_queue *rx_queue; | 1174 | struct efx_rx_queue *rx_queue; |
| 1167 | unsigned int read_ptr = channel->eventq_read_ptr; | 1175 | unsigned int read_ptr = channel->eventq_read_ptr; |
| 1168 | unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask; | 1176 | unsigned int end_ptr = read_ptr + channel->eventq_mask - 1; |
| 1169 | 1177 | ||
| 1170 | do { | 1178 | do { |
| 1171 | efx_qword_t *event = efx_event(channel, read_ptr); | 1179 | efx_qword_t *event = efx_event(channel, read_ptr); |
| @@ -1205,7 +1213,7 @@ static void efx_poll_flush_events(struct efx_nic *efx) | |||
| 1205 | * it's ok to throw away every non-flush event */ | 1213 | * it's ok to throw away every non-flush event */ |
| 1206 | EFX_SET_QWORD(*event); | 1214 | EFX_SET_QWORD(*event); |
| 1207 | 1215 | ||
| 1208 | read_ptr = (read_ptr + 1) & channel->eventq_mask; | 1216 | ++read_ptr; |
| 1209 | } while (read_ptr != end_ptr); | 1217 | } while (read_ptr != end_ptr); |
| 1210 | 1218 | ||
| 1211 | channel->eventq_read_ptr = read_ptr; | 1219 | channel->eventq_read_ptr = read_ptr; |
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h index d9de1b647d41..a42db6e35be3 100644 --- a/drivers/net/sfc/nic.h +++ b/drivers/net/sfc/nic.h | |||
| @@ -184,6 +184,7 @@ extern void efx_nic_fini_eventq(struct efx_channel *channel); | |||
| 184 | extern void efx_nic_remove_eventq(struct efx_channel *channel); | 184 | extern void efx_nic_remove_eventq(struct efx_channel *channel); |
| 185 | extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); | 185 | extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); |
| 186 | extern void efx_nic_eventq_read_ack(struct efx_channel *channel); | 186 | extern void efx_nic_eventq_read_ack(struct efx_channel *channel); |
| 187 | extern bool efx_nic_event_present(struct efx_channel *channel); | ||
| 187 | 188 | ||
| 188 | /* MAC/PHY */ | 189 | /* MAC/PHY */ |
| 189 | extern void falcon_drain_tx_fifo(struct efx_nic *efx); | 190 | extern void falcon_drain_tx_fifo(struct efx_nic *efx); |
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c index a0f49b348d62..50ad3bcaf68a 100644 --- a/drivers/net/sfc/selftest.c +++ b/drivers/net/sfc/selftest.c | |||
| @@ -131,8 +131,6 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) | |||
| 131 | static int efx_test_interrupts(struct efx_nic *efx, | 131 | static int efx_test_interrupts(struct efx_nic *efx, |
| 132 | struct efx_self_tests *tests) | 132 | struct efx_self_tests *tests) |
| 133 | { | 133 | { |
| 134 | struct efx_channel *channel; | ||
| 135 | |||
| 136 | netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); | 134 | netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); |
| 137 | tests->interrupt = -1; | 135 | tests->interrupt = -1; |
| 138 | 136 | ||
| @@ -140,15 +138,6 @@ static int efx_test_interrupts(struct efx_nic *efx, | |||
| 140 | efx->last_irq_cpu = -1; | 138 | efx->last_irq_cpu = -1; |
| 141 | smp_wmb(); | 139 | smp_wmb(); |
| 142 | 140 | ||
| 143 | /* ACK each interrupting event queue. Receiving an interrupt due to | ||
| 144 | * traffic before a test event is raised is considered a pass */ | ||
| 145 | efx_for_each_channel(channel, efx) { | ||
| 146 | if (channel->work_pending) | ||
| 147 | efx_process_channel_now(channel); | ||
| 148 | if (efx->last_irq_cpu >= 0) | ||
| 149 | goto success; | ||
| 150 | } | ||
| 151 | |||
| 152 | efx_nic_generate_interrupt(efx); | 141 | efx_nic_generate_interrupt(efx); |
| 153 | 142 | ||
| 154 | /* Wait for arrival of test interrupt. */ | 143 | /* Wait for arrival of test interrupt. */ |
| @@ -173,13 +162,13 @@ static int efx_test_eventq_irq(struct efx_channel *channel, | |||
| 173 | struct efx_self_tests *tests) | 162 | struct efx_self_tests *tests) |
| 174 | { | 163 | { |
| 175 | struct efx_nic *efx = channel->efx; | 164 | struct efx_nic *efx = channel->efx; |
| 176 | unsigned int magic_count, count; | 165 | unsigned int read_ptr, count; |
| 177 | 166 | ||
| 178 | tests->eventq_dma[channel->channel] = -1; | 167 | tests->eventq_dma[channel->channel] = -1; |
| 179 | tests->eventq_int[channel->channel] = -1; | 168 | tests->eventq_int[channel->channel] = -1; |
| 180 | tests->eventq_poll[channel->channel] = -1; | 169 | tests->eventq_poll[channel->channel] = -1; |
| 181 | 170 | ||
| 182 | magic_count = channel->magic_count; | 171 | read_ptr = channel->eventq_read_ptr; |
| 183 | channel->efx->last_irq_cpu = -1; | 172 | channel->efx->last_irq_cpu = -1; |
| 184 | smp_wmb(); | 173 | smp_wmb(); |
| 185 | 174 | ||
| @@ -190,10 +179,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel, | |||
| 190 | do { | 179 | do { |
| 191 | schedule_timeout_uninterruptible(HZ / 100); | 180 | schedule_timeout_uninterruptible(HZ / 100); |
| 192 | 181 | ||
| 193 | if (channel->work_pending) | 182 | if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr) |
| 194 | efx_process_channel_now(channel); | ||
| 195 | |||
| 196 | if (channel->magic_count != magic_count) | ||
| 197 | goto eventq_ok; | 183 | goto eventq_ok; |
| 198 | } while (++count < 2); | 184 | } while (++count < 2); |
| 199 | 185 | ||
| @@ -211,8 +197,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel, | |||
| 211 | } | 197 | } |
| 212 | 198 | ||
| 213 | /* Check to see if event was received even if interrupt wasn't */ | 199 | /* Check to see if event was received even if interrupt wasn't */ |
| 214 | efx_process_channel_now(channel); | 200 | if (efx_nic_event_present(channel)) { |
| 215 | if (channel->magic_count != magic_count) { | ||
| 216 | netif_err(efx, drv, efx->net_dev, | 201 | netif_err(efx, drv, efx->net_dev, |
| 217 | "channel %d event was generated, but " | 202 | "channel %d event was generated, but " |
| 218 | "failed to trigger an interrupt\n", channel->channel); | 203 | "failed to trigger an interrupt\n", channel->channel); |
| @@ -770,6 +755,8 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, | |||
| 770 | __efx_reconfigure_port(efx); | 755 | __efx_reconfigure_port(efx); |
| 771 | mutex_unlock(&efx->mac_lock); | 756 | mutex_unlock(&efx->mac_lock); |
| 772 | 757 | ||
| 758 | netif_tx_wake_all_queues(efx->net_dev); | ||
| 759 | |||
| 773 | return rc_test; | 760 | return rc_test; |
| 774 | } | 761 | } |
| 775 | 762 | ||
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 139801908217..d2c85dfdf3bf 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
| @@ -435,7 +435,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |||
| 435 | * queue state. */ | 435 | * queue state. */ |
| 436 | smp_mb(); | 436 | smp_mb(); |
| 437 | if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && | 437 | if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && |
| 438 | likely(efx->port_enabled)) { | 438 | likely(efx->port_enabled) && |
| 439 | likely(!efx->port_inhibited)) { | ||
| 439 | fill_level = tx_queue->insert_count - tx_queue->read_count; | 440 | fill_level = tx_queue->insert_count - tx_queue->read_count; |
| 440 | if (fill_level < EFX_TXQ_THRESHOLD(efx)) { | 441 | if (fill_level < EFX_TXQ_THRESHOLD(efx)) { |
| 441 | EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); | 442 | EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); |
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index cb317cd069ff..484f795a779d 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c | |||
| @@ -240,7 +240,8 @@ static const struct ethtool_ops sis900_ethtool_ops; | |||
| 240 | * @net_dev: the net device to get address for | 240 | * @net_dev: the net device to get address for |
| 241 | * | 241 | * |
| 242 | * Older SiS900 and friends, use EEPROM to store MAC address. | 242 | * Older SiS900 and friends, use EEPROM to store MAC address. |
| 243 | * MAC address is read from read_eeprom() into @net_dev->dev_addr. | 243 | * MAC address is read from read_eeprom() into @net_dev->dev_addr and |
| 244 | * @net_dev->perm_addr. | ||
| 244 | */ | 245 | */ |
| 245 | 246 | ||
| 246 | static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev) | 247 | static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev) |
| @@ -261,6 +262,9 @@ static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_de | |||
| 261 | for (i = 0; i < 3; i++) | 262 | for (i = 0; i < 3; i++) |
| 262 | ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); | 263 | ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); |
| 263 | 264 | ||
| 265 | /* Store MAC Address in perm_addr */ | ||
| 266 | memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); | ||
| 267 | |||
| 264 | return 1; | 268 | return 1; |
| 265 | } | 269 | } |
| 266 | 270 | ||
| @@ -271,7 +275,8 @@ static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_de | |||
| 271 | * | 275 | * |
| 272 | * SiS630E model, use APC CMOS RAM to store MAC address. | 276 | * SiS630E model, use APC CMOS RAM to store MAC address. |
| 273 | * APC CMOS RAM is accessed through ISA bridge. | 277 | * APC CMOS RAM is accessed through ISA bridge. |
| 274 | * MAC address is read into @net_dev->dev_addr. | 278 | * MAC address is read into @net_dev->dev_addr and |
| 279 | * @net_dev->perm_addr. | ||
| 275 | */ | 280 | */ |
| 276 | 281 | ||
| 277 | static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev, | 282 | static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev, |
| @@ -296,6 +301,10 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev, | |||
| 296 | outb(0x09 + i, 0x70); | 301 | outb(0x09 + i, 0x70); |
| 297 | ((u8 *)(net_dev->dev_addr))[i] = inb(0x71); | 302 | ((u8 *)(net_dev->dev_addr))[i] = inb(0x71); |
| 298 | } | 303 | } |
| 304 | |||
| 305 | /* Store MAC Address in perm_addr */ | ||
| 306 | memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); | ||
| 307 | |||
| 299 | pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40); | 308 | pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40); |
| 300 | pci_dev_put(isa_bridge); | 309 | pci_dev_put(isa_bridge); |
| 301 | 310 | ||
| @@ -310,7 +319,7 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev, | |||
| 310 | * | 319 | * |
| 311 | * SiS635 model, set MAC Reload Bit to load Mac address from APC | 320 | * SiS635 model, set MAC Reload Bit to load Mac address from APC |
| 312 | * to rfdr. rfdr is accessed through rfcr. MAC address is read into | 321 | * to rfdr. rfdr is accessed through rfcr. MAC address is read into |
| 313 | * @net_dev->dev_addr. | 322 | * @net_dev->dev_addr and @net_dev->perm_addr. |
| 314 | */ | 323 | */ |
| 315 | 324 | ||
| 316 | static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev, | 325 | static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev, |
| @@ -334,6 +343,9 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev, | |||
| 334 | *( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr); | 343 | *( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr); |
| 335 | } | 344 | } |
| 336 | 345 | ||
| 346 | /* Store MAC Address in perm_addr */ | ||
| 347 | memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); | ||
| 348 | |||
| 337 | /* enable packet filtering */ | 349 | /* enable packet filtering */ |
| 338 | outl(rfcrSave | RFEN, rfcr + ioaddr); | 350 | outl(rfcrSave | RFEN, rfcr + ioaddr); |
| 339 | 351 | ||
| @@ -353,7 +365,7 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev, | |||
| 353 | * EEDONE signal to refuse EEPROM access by LAN. | 365 | * EEDONE signal to refuse EEPROM access by LAN. |
| 354 | * The EEPROM map of SiS962 or SiS963 is different to SiS900. | 366 | * The EEPROM map of SiS962 or SiS963 is different to SiS900. |
| 355 | * The signature field in SiS962 or SiS963 spec is meaningless. | 367 | * The signature field in SiS962 or SiS963 spec is meaningless. |
| 356 | * MAC address is read into @net_dev->dev_addr. | 368 | * MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr. |
| 357 | */ | 369 | */ |
| 358 | 370 | ||
| 359 | static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev, | 371 | static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev, |
| @@ -372,6 +384,9 @@ static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev, | |||
| 372 | for (i = 0; i < 3; i++) | 384 | for (i = 0; i < 3; i++) |
| 373 | ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); | 385 | ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); |
| 374 | 386 | ||
| 387 | /* Store MAC Address in perm_addr */ | ||
| 388 | memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); | ||
| 389 | |||
| 375 | outl(EEDONE, ee_addr); | 390 | outl(EEDONE, ee_addr); |
| 376 | return 1; | 391 | return 1; |
| 377 | } else { | 392 | } else { |
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c index d65fab1ba790..e25093510b0c 100644 --- a/drivers/net/stmmac/dwmac_lib.c +++ b/drivers/net/stmmac/dwmac_lib.c | |||
| @@ -26,9 +26,9 @@ | |||
| 26 | 26 | ||
| 27 | #undef DWMAC_DMA_DEBUG | 27 | #undef DWMAC_DMA_DEBUG |
| 28 | #ifdef DWMAC_DMA_DEBUG | 28 | #ifdef DWMAC_DMA_DEBUG |
| 29 | #define DBG(fmt, args...) printk(fmt, ## args) | 29 | #define DWMAC_LIB_DBG(fmt, args...) printk(fmt, ## args) |
| 30 | #else | 30 | #else |
| 31 | #define DBG(fmt, args...) do { } while (0) | 31 | #define DWMAC_LIB_DBG(fmt, args...) do { } while (0) |
| 32 | #endif | 32 | #endif |
| 33 | 33 | ||
| 34 | /* CSR1 enables the transmit DMA to check for new descriptor */ | 34 | /* CSR1 enables the transmit DMA to check for new descriptor */ |
| @@ -152,7 +152,7 @@ int dwmac_dma_interrupt(void __iomem *ioaddr, | |||
| 152 | /* read the status register (CSR5) */ | 152 | /* read the status register (CSR5) */ |
| 153 | u32 intr_status = readl(ioaddr + DMA_STATUS); | 153 | u32 intr_status = readl(ioaddr + DMA_STATUS); |
| 154 | 154 | ||
| 155 | DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status); | 155 | DWMAC_LIB_DBG(KERN_INFO "%s: [CSR5: 0x%08x]\n", __func__, intr_status); |
| 156 | #ifdef DWMAC_DMA_DEBUG | 156 | #ifdef DWMAC_DMA_DEBUG |
| 157 | /* It displays the DMA process states (CSR5 register) */ | 157 | /* It displays the DMA process states (CSR5 register) */ |
| 158 | show_tx_process_state(intr_status); | 158 | show_tx_process_state(intr_status); |
| @@ -160,43 +160,43 @@ int dwmac_dma_interrupt(void __iomem *ioaddr, | |||
| 160 | #endif | 160 | #endif |
| 161 | /* ABNORMAL interrupts */ | 161 | /* ABNORMAL interrupts */ |
| 162 | if (unlikely(intr_status & DMA_STATUS_AIS)) { | 162 | if (unlikely(intr_status & DMA_STATUS_AIS)) { |
| 163 | DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: "); | 163 | DWMAC_LIB_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: "); |
| 164 | if (unlikely(intr_status & DMA_STATUS_UNF)) { | 164 | if (unlikely(intr_status & DMA_STATUS_UNF)) { |
| 165 | DBG(INFO, "transmit underflow\n"); | 165 | DWMAC_LIB_DBG(KERN_INFO "transmit underflow\n"); |
| 166 | ret = tx_hard_error_bump_tc; | 166 | ret = tx_hard_error_bump_tc; |
| 167 | x->tx_undeflow_irq++; | 167 | x->tx_undeflow_irq++; |
| 168 | } | 168 | } |
| 169 | if (unlikely(intr_status & DMA_STATUS_TJT)) { | 169 | if (unlikely(intr_status & DMA_STATUS_TJT)) { |
| 170 | DBG(INFO, "transmit jabber\n"); | 170 | DWMAC_LIB_DBG(KERN_INFO "transmit jabber\n"); |
| 171 | x->tx_jabber_irq++; | 171 | x->tx_jabber_irq++; |
| 172 | } | 172 | } |
| 173 | if (unlikely(intr_status & DMA_STATUS_OVF)) { | 173 | if (unlikely(intr_status & DMA_STATUS_OVF)) { |
| 174 | DBG(INFO, "recv overflow\n"); | 174 | DWMAC_LIB_DBG(KERN_INFO "recv overflow\n"); |
| 175 | x->rx_overflow_irq++; | 175 | x->rx_overflow_irq++; |
| 176 | } | 176 | } |
| 177 | if (unlikely(intr_status & DMA_STATUS_RU)) { | 177 | if (unlikely(intr_status & DMA_STATUS_RU)) { |
| 178 | DBG(INFO, "receive buffer unavailable\n"); | 178 | DWMAC_LIB_DBG(KERN_INFO "receive buffer unavailable\n"); |
| 179 | x->rx_buf_unav_irq++; | 179 | x->rx_buf_unav_irq++; |
| 180 | } | 180 | } |
| 181 | if (unlikely(intr_status & DMA_STATUS_RPS)) { | 181 | if (unlikely(intr_status & DMA_STATUS_RPS)) { |
| 182 | DBG(INFO, "receive process stopped\n"); | 182 | DWMAC_LIB_DBG(KERN_INFO "receive process stopped\n"); |
| 183 | x->rx_process_stopped_irq++; | 183 | x->rx_process_stopped_irq++; |
| 184 | } | 184 | } |
| 185 | if (unlikely(intr_status & DMA_STATUS_RWT)) { | 185 | if (unlikely(intr_status & DMA_STATUS_RWT)) { |
| 186 | DBG(INFO, "receive watchdog\n"); | 186 | DWMAC_LIB_DBG(KERN_INFO "receive watchdog\n"); |
| 187 | x->rx_watchdog_irq++; | 187 | x->rx_watchdog_irq++; |
| 188 | } | 188 | } |
| 189 | if (unlikely(intr_status & DMA_STATUS_ETI)) { | 189 | if (unlikely(intr_status & DMA_STATUS_ETI)) { |
| 190 | DBG(INFO, "transmit early interrupt\n"); | 190 | DWMAC_LIB_DBG(KERN_INFO "transmit early interrupt\n"); |
| 191 | x->tx_early_irq++; | 191 | x->tx_early_irq++; |
| 192 | } | 192 | } |
| 193 | if (unlikely(intr_status & DMA_STATUS_TPS)) { | 193 | if (unlikely(intr_status & DMA_STATUS_TPS)) { |
| 194 | DBG(INFO, "transmit process stopped\n"); | 194 | DWMAC_LIB_DBG(KERN_INFO "transmit process stopped\n"); |
| 195 | x->tx_process_stopped_irq++; | 195 | x->tx_process_stopped_irq++; |
| 196 | ret = tx_hard_error; | 196 | ret = tx_hard_error; |
| 197 | } | 197 | } |
| 198 | if (unlikely(intr_status & DMA_STATUS_FBI)) { | 198 | if (unlikely(intr_status & DMA_STATUS_FBI)) { |
| 199 | DBG(INFO, "fatal bus error\n"); | 199 | DWMAC_LIB_DBG(KERN_INFO "fatal bus error\n"); |
| 200 | x->fatal_bus_error_irq++; | 200 | x->fatal_bus_error_irq++; |
| 201 | ret = tx_hard_error; | 201 | ret = tx_hard_error; |
| 202 | } | 202 | } |
| @@ -215,7 +215,7 @@ int dwmac_dma_interrupt(void __iomem *ioaddr, | |||
| 215 | /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ | 215 | /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ |
| 216 | writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS); | 216 | writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS); |
| 217 | 217 | ||
| 218 | DBG(INFO, "\n\n"); | 218 | DWMAC_LIB_DBG(KERN_INFO "\n\n"); |
| 219 | return ret; | 219 | return ret; |
| 220 | } | 220 | } |
| 221 | 221 | ||
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index 0e5f03135b50..cc973fc38405 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c | |||
| @@ -750,7 +750,6 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) | |||
| 750 | priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); | 750 | priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); |
| 751 | priv->xstats.threshold = tc; | 751 | priv->xstats.threshold = tc; |
| 752 | } | 752 | } |
| 753 | stmmac_tx_err(priv); | ||
| 754 | } else if (unlikely(status == tx_hard_error)) | 753 | } else if (unlikely(status == tx_hard_error)) |
| 755 | stmmac_tx_err(priv); | 754 | stmmac_tx_err(priv); |
| 756 | } | 755 | } |
| @@ -781,21 +780,6 @@ static int stmmac_open(struct net_device *dev) | |||
| 781 | 780 | ||
| 782 | stmmac_verify_args(); | 781 | stmmac_verify_args(); |
| 783 | 782 | ||
| 784 | ret = stmmac_init_phy(dev); | ||
| 785 | if (unlikely(ret)) { | ||
| 786 | pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); | ||
| 787 | return ret; | ||
| 788 | } | ||
| 789 | |||
| 790 | /* Request the IRQ lines */ | ||
| 791 | ret = request_irq(dev->irq, stmmac_interrupt, | ||
| 792 | IRQF_SHARED, dev->name, dev); | ||
| 793 | if (unlikely(ret < 0)) { | ||
| 794 | pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", | ||
| 795 | __func__, dev->irq, ret); | ||
| 796 | return ret; | ||
| 797 | } | ||
| 798 | |||
| 799 | #ifdef CONFIG_STMMAC_TIMER | 783 | #ifdef CONFIG_STMMAC_TIMER |
| 800 | priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); | 784 | priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); |
| 801 | if (unlikely(priv->tm == NULL)) { | 785 | if (unlikely(priv->tm == NULL)) { |
| @@ -814,6 +798,11 @@ static int stmmac_open(struct net_device *dev) | |||
| 814 | } else | 798 | } else |
| 815 | priv->tm->enable = 1; | 799 | priv->tm->enable = 1; |
| 816 | #endif | 800 | #endif |
| 801 | ret = stmmac_init_phy(dev); | ||
| 802 | if (unlikely(ret)) { | ||
| 803 | pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); | ||
| 804 | goto open_error; | ||
| 805 | } | ||
| 817 | 806 | ||
| 818 | /* Create and initialize the TX/RX descriptors chains. */ | 807 | /* Create and initialize the TX/RX descriptors chains. */ |
| 819 | priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); | 808 | priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); |
| @@ -822,12 +811,11 @@ static int stmmac_open(struct net_device *dev) | |||
| 822 | init_dma_desc_rings(dev); | 811 | init_dma_desc_rings(dev); |
| 823 | 812 | ||
| 824 | /* DMA initialization and SW reset */ | 813 | /* DMA initialization and SW reset */ |
| 825 | if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl, | 814 | ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl, |
| 826 | priv->dma_tx_phy, | 815 | priv->dma_tx_phy, priv->dma_rx_phy); |
| 827 | priv->dma_rx_phy) < 0)) { | 816 | if (ret < 0) { |
| 828 | |||
| 829 | pr_err("%s: DMA initialization failed\n", __func__); | 817 | pr_err("%s: DMA initialization failed\n", __func__); |
| 830 | return -1; | 818 | goto open_error; |
| 831 | } | 819 | } |
| 832 | 820 | ||
| 833 | /* Copy the MAC addr into the HW */ | 821 | /* Copy the MAC addr into the HW */ |
| @@ -848,6 +836,15 @@ static int stmmac_open(struct net_device *dev) | |||
| 848 | writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK); | 836 | writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK); |
| 849 | writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK); | 837 | writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK); |
| 850 | 838 | ||
| 839 | /* Request the IRQ lines */ | ||
| 840 | ret = request_irq(dev->irq, stmmac_interrupt, | ||
| 841 | IRQF_SHARED, dev->name, dev); | ||
| 842 | if (unlikely(ret < 0)) { | ||
| 843 | pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", | ||
| 844 | __func__, dev->irq, ret); | ||
| 845 | goto open_error; | ||
| 846 | } | ||
| 847 | |||
| 851 | /* Enable the MAC Rx/Tx */ | 848 | /* Enable the MAC Rx/Tx */ |
| 852 | stmmac_enable_mac(priv->ioaddr); | 849 | stmmac_enable_mac(priv->ioaddr); |
| 853 | 850 | ||
| @@ -878,7 +875,17 @@ static int stmmac_open(struct net_device *dev) | |||
| 878 | napi_enable(&priv->napi); | 875 | napi_enable(&priv->napi); |
| 879 | skb_queue_head_init(&priv->rx_recycle); | 876 | skb_queue_head_init(&priv->rx_recycle); |
| 880 | netif_start_queue(dev); | 877 | netif_start_queue(dev); |
| 878 | |||
| 881 | return 0; | 879 | return 0; |
| 880 | |||
| 881 | open_error: | ||
| 882 | #ifdef CONFIG_STMMAC_TIMER | ||
| 883 | kfree(priv->tm); | ||
| 884 | #endif | ||
| 885 | if (priv->phydev) | ||
| 886 | phy_disconnect(priv->phydev); | ||
| 887 | |||
| 888 | return ret; | ||
| 882 | } | 889 | } |
| 883 | 890 | ||
| 884 | /** | 891 | /** |
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c index 8a3b191b195b..ff32befd8443 100644 --- a/drivers/net/tokenring/3c359.c +++ b/drivers/net/tokenring/3c359.c | |||
| @@ -1251,7 +1251,7 @@ static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1251 | /* | 1251 | /* |
| 1252 | * The NIC has told us that a packet has been downloaded onto the card, we must | 1252 | * The NIC has told us that a packet has been downloaded onto the card, we must |
| 1253 | * find out which packet it has done, clear the skb and information for the packet | 1253 | * find out which packet it has done, clear the skb and information for the packet |
| 1254 | * then advance around the ring for all tranmitted packets | 1254 | * then advance around the ring for all transmitted packets |
| 1255 | */ | 1255 | */ |
| 1256 | 1256 | ||
| 1257 | static void xl_dn_comp(struct net_device *dev) | 1257 | static void xl_dn_comp(struct net_device *dev) |
| @@ -1568,7 +1568,7 @@ static void xl_arb_cmd(struct net_device *dev) | |||
| 1568 | if (lan_status_diff & LSC_SOFT_ERR) | 1568 | if (lan_status_diff & LSC_SOFT_ERR) |
| 1569 | printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name); | 1569 | printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name); |
| 1570 | if (lan_status_diff & LSC_TRAN_BCN) | 1570 | if (lan_status_diff & LSC_TRAN_BCN) |
| 1571 | printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name); | 1571 | printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name); |
| 1572 | if (lan_status_diff & LSC_SS) | 1572 | if (lan_status_diff & LSC_SS) |
| 1573 | printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); | 1573 | printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); |
| 1574 | if (lan_status_diff & LSC_RING_REC) | 1574 | if (lan_status_diff & LSC_RING_REC) |
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c index 5bd140704533..9354ca9da576 100644 --- a/drivers/net/tokenring/lanstreamer.c +++ b/drivers/net/tokenring/lanstreamer.c | |||
| @@ -1675,7 +1675,7 @@ drop_frame: | |||
| 1675 | if (lan_status_diff & LSC_SOFT_ERR) | 1675 | if (lan_status_diff & LSC_SOFT_ERR) |
| 1676 | printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name); | 1676 | printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name); |
| 1677 | if (lan_status_diff & LSC_TRAN_BCN) | 1677 | if (lan_status_diff & LSC_TRAN_BCN) |
| 1678 | printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name); | 1678 | printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n", dev->name); |
| 1679 | if (lan_status_diff & LSC_SS) | 1679 | if (lan_status_diff & LSC_SS) |
| 1680 | printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); | 1680 | printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); |
| 1681 | if (lan_status_diff & LSC_RING_REC) | 1681 | if (lan_status_diff & LSC_RING_REC) |
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c index 3d2fbe60b46e..2684003b8ab6 100644 --- a/drivers/net/tokenring/olympic.c +++ b/drivers/net/tokenring/olympic.c | |||
| @@ -1500,7 +1500,7 @@ drop_frame: | |||
| 1500 | if (lan_status_diff & LSC_SOFT_ERR) | 1500 | if (lan_status_diff & LSC_SOFT_ERR) |
| 1501 | printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name); | 1501 | printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name); |
| 1502 | if (lan_status_diff & LSC_TRAN_BCN) | 1502 | if (lan_status_diff & LSC_TRAN_BCN) |
| 1503 | printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name); | 1503 | printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name); |
| 1504 | if (lan_status_diff & LSC_SS) | 1504 | if (lan_status_diff & LSC_SS) |
| 1505 | printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); | 1505 | printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); |
| 1506 | if (lan_status_diff & LSC_RING_REC) | 1506 | if (lan_status_diff & LSC_RING_REC) |
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index f1b8af64569c..2d10239ce829 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c | |||
| @@ -1040,7 +1040,7 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface, | |||
| 1040 | } | 1040 | } |
| 1041 | 1041 | ||
| 1042 | ret = ath9k_htc_hw_init(hif_dev->htc_handle, | 1042 | ret = ath9k_htc_hw_init(hif_dev->htc_handle, |
| 1043 | &hif_dev->udev->dev, hif_dev->device_id, | 1043 | &interface->dev, hif_dev->device_id, |
| 1044 | hif_dev->udev->product, id->driver_info); | 1044 | hif_dev->udev->product, id->driver_info); |
| 1045 | if (ret) { | 1045 | if (ret) { |
| 1046 | ret = -EINVAL; | 1046 | ret = -EINVAL; |
| @@ -1158,7 +1158,7 @@ fail_resume: | |||
| 1158 | #endif | 1158 | #endif |
| 1159 | 1159 | ||
| 1160 | static struct usb_driver ath9k_hif_usb_driver = { | 1160 | static struct usb_driver ath9k_hif_usb_driver = { |
| 1161 | .name = "ath9k_hif_usb", | 1161 | .name = KBUILD_MODNAME, |
| 1162 | .probe = ath9k_hif_usb_probe, | 1162 | .probe = ath9k_hif_usb_probe, |
| 1163 | .disconnect = ath9k_hif_usb_disconnect, | 1163 | .disconnect = ath9k_hif_usb_disconnect, |
| 1164 | #ifdef CONFIG_PM | 1164 | #ifdef CONFIG_PM |
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 1ec9bcd6b281..c95bc5cc1a1f 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
| @@ -1254,15 +1254,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, | |||
| 1254 | ah->txchainmask = common->tx_chainmask; | 1254 | ah->txchainmask = common->tx_chainmask; |
| 1255 | ah->rxchainmask = common->rx_chainmask; | 1255 | ah->rxchainmask = common->rx_chainmask; |
| 1256 | 1256 | ||
| 1257 | if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) { | ||
| 1258 | ath9k_hw_abortpcurecv(ah); | ||
| 1259 | if (!ath9k_hw_stopdmarecv(ah)) { | ||
| 1260 | ath_dbg(common, ATH_DBG_XMIT, | ||
| 1261 | "Failed to stop receive dma\n"); | ||
| 1262 | bChannelChange = false; | ||
| 1263 | } | ||
| 1264 | } | ||
| 1265 | |||
| 1266 | if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) | 1257 | if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) |
| 1267 | return -EIO; | 1258 | return -EIO; |
| 1268 | 1259 | ||
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 562257ac52cf..edc1cbbfecaf 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c | |||
| @@ -751,28 +751,47 @@ void ath9k_hw_abortpcurecv(struct ath_hw *ah) | |||
| 751 | } | 751 | } |
| 752 | EXPORT_SYMBOL(ath9k_hw_abortpcurecv); | 752 | EXPORT_SYMBOL(ath9k_hw_abortpcurecv); |
| 753 | 753 | ||
| 754 | bool ath9k_hw_stopdmarecv(struct ath_hw *ah) | 754 | bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset) |
| 755 | { | 755 | { |
| 756 | #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ | 756 | #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ |
| 757 | #define AH_RX_TIME_QUANTUM 100 /* usec */ | 757 | #define AH_RX_TIME_QUANTUM 100 /* usec */ |
| 758 | struct ath_common *common = ath9k_hw_common(ah); | 758 | struct ath_common *common = ath9k_hw_common(ah); |
| 759 | u32 mac_status, last_mac_status = 0; | ||
| 759 | int i; | 760 | int i; |
| 760 | 761 | ||
| 762 | /* Enable access to the DMA observation bus */ | ||
| 763 | REG_WRITE(ah, AR_MACMISC, | ||
| 764 | ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) | | ||
| 765 | (AR_MACMISC_MISC_OBS_BUS_1 << | ||
| 766 | AR_MACMISC_MISC_OBS_BUS_MSB_S))); | ||
| 767 | |||
| 761 | REG_WRITE(ah, AR_CR, AR_CR_RXD); | 768 | REG_WRITE(ah, AR_CR, AR_CR_RXD); |
| 762 | 769 | ||
| 763 | /* Wait for rx enable bit to go low */ | 770 | /* Wait for rx enable bit to go low */ |
| 764 | for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) { | 771 | for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) { |
| 765 | if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0) | 772 | if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0) |
| 766 | break; | 773 | break; |
| 774 | |||
| 775 | if (!AR_SREV_9300_20_OR_LATER(ah)) { | ||
| 776 | mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0; | ||
| 777 | if (mac_status == 0x1c0 && mac_status == last_mac_status) { | ||
| 778 | *reset = true; | ||
| 779 | break; | ||
| 780 | } | ||
| 781 | |||
| 782 | last_mac_status = mac_status; | ||
| 783 | } | ||
| 784 | |||
| 767 | udelay(AH_TIME_QUANTUM); | 785 | udelay(AH_TIME_QUANTUM); |
| 768 | } | 786 | } |
| 769 | 787 | ||
| 770 | if (i == 0) { | 788 | if (i == 0) { |
| 771 | ath_err(common, | 789 | ath_err(common, |
| 772 | "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n", | 790 | "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n", |
| 773 | AH_RX_STOP_DMA_TIMEOUT / 1000, | 791 | AH_RX_STOP_DMA_TIMEOUT / 1000, |
| 774 | REG_READ(ah, AR_CR), | 792 | REG_READ(ah, AR_CR), |
| 775 | REG_READ(ah, AR_DIAG_SW)); | 793 | REG_READ(ah, AR_DIAG_SW), |
| 794 | REG_READ(ah, AR_DMADBG_7)); | ||
| 776 | return false; | 795 | return false; |
| 777 | } else { | 796 | } else { |
| 778 | return true; | 797 | return true; |
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index b2b2ff852c32..c2a59386fb9c 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h | |||
| @@ -695,7 +695,7 @@ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set); | |||
| 695 | void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp); | 695 | void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp); |
| 696 | void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning); | 696 | void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning); |
| 697 | void ath9k_hw_abortpcurecv(struct ath_hw *ah); | 697 | void ath9k_hw_abortpcurecv(struct ath_hw *ah); |
| 698 | bool ath9k_hw_stopdmarecv(struct ath_hw *ah); | 698 | bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset); |
| 699 | int ath9k_hw_beaconq_setup(struct ath_hw *ah); | 699 | int ath9k_hw_beaconq_setup(struct ath_hw *ah); |
| 700 | 700 | ||
| 701 | /* Interrupt Handling */ | 701 | /* Interrupt Handling */ |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index dddb85de622d..17d04ff8d678 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
| @@ -1376,7 +1376,6 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw, | |||
| 1376 | 1376 | ||
| 1377 | ath9k_calculate_iter_data(hw, vif, &iter_data); | 1377 | ath9k_calculate_iter_data(hw, vif, &iter_data); |
| 1378 | 1378 | ||
| 1379 | ath9k_ps_wakeup(sc); | ||
| 1380 | /* Set BSSID mask. */ | 1379 | /* Set BSSID mask. */ |
| 1381 | memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); | 1380 | memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); |
| 1382 | ath_hw_setbssidmask(common); | 1381 | ath_hw_setbssidmask(common); |
| @@ -1411,7 +1410,6 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw, | |||
| 1411 | } | 1410 | } |
| 1412 | 1411 | ||
| 1413 | ath9k_hw_set_interrupts(ah, ah->imask); | 1412 | ath9k_hw_set_interrupts(ah, ah->imask); |
| 1414 | ath9k_ps_restore(sc); | ||
| 1415 | 1413 | ||
| 1416 | /* Set up ANI */ | 1414 | /* Set up ANI */ |
| 1417 | if ((iter_data.naps + iter_data.nadhocs) > 0) { | 1415 | if ((iter_data.naps + iter_data.nadhocs) > 0) { |
| @@ -1457,6 +1455,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, | |||
| 1457 | struct ath_vif *avp = (void *)vif->drv_priv; | 1455 | struct ath_vif *avp = (void *)vif->drv_priv; |
| 1458 | int ret = 0; | 1456 | int ret = 0; |
| 1459 | 1457 | ||
| 1458 | ath9k_ps_wakeup(sc); | ||
| 1460 | mutex_lock(&sc->mutex); | 1459 | mutex_lock(&sc->mutex); |
| 1461 | 1460 | ||
| 1462 | switch (vif->type) { | 1461 | switch (vif->type) { |
| @@ -1503,6 +1502,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, | |||
| 1503 | ath9k_do_vif_add_setup(hw, vif); | 1502 | ath9k_do_vif_add_setup(hw, vif); |
| 1504 | out: | 1503 | out: |
| 1505 | mutex_unlock(&sc->mutex); | 1504 | mutex_unlock(&sc->mutex); |
| 1505 | ath9k_ps_restore(sc); | ||
| 1506 | return ret; | 1506 | return ret; |
| 1507 | } | 1507 | } |
| 1508 | 1508 | ||
| @@ -1517,6 +1517,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw, | |||
| 1517 | 1517 | ||
| 1518 | ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n"); | 1518 | ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n"); |
| 1519 | mutex_lock(&sc->mutex); | 1519 | mutex_lock(&sc->mutex); |
| 1520 | ath9k_ps_wakeup(sc); | ||
| 1520 | 1521 | ||
| 1521 | /* See if new interface type is valid. */ | 1522 | /* See if new interface type is valid. */ |
| 1522 | if ((new_type == NL80211_IFTYPE_ADHOC) && | 1523 | if ((new_type == NL80211_IFTYPE_ADHOC) && |
| @@ -1546,6 +1547,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw, | |||
| 1546 | 1547 | ||
| 1547 | ath9k_do_vif_add_setup(hw, vif); | 1548 | ath9k_do_vif_add_setup(hw, vif); |
| 1548 | out: | 1549 | out: |
| 1550 | ath9k_ps_restore(sc); | ||
| 1549 | mutex_unlock(&sc->mutex); | 1551 | mutex_unlock(&sc->mutex); |
| 1550 | return ret; | 1552 | return ret; |
| 1551 | } | 1553 | } |
| @@ -1558,6 +1560,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, | |||
| 1558 | 1560 | ||
| 1559 | ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n"); | 1561 | ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n"); |
| 1560 | 1562 | ||
| 1563 | ath9k_ps_wakeup(sc); | ||
| 1561 | mutex_lock(&sc->mutex); | 1564 | mutex_lock(&sc->mutex); |
| 1562 | 1565 | ||
| 1563 | sc->nvifs--; | 1566 | sc->nvifs--; |
| @@ -1569,6 +1572,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, | |||
| 1569 | ath9k_calculate_summary_state(hw, NULL); | 1572 | ath9k_calculate_summary_state(hw, NULL); |
| 1570 | 1573 | ||
| 1571 | mutex_unlock(&sc->mutex); | 1574 | mutex_unlock(&sc->mutex); |
| 1575 | ath9k_ps_restore(sc); | ||
| 1572 | } | 1576 | } |
| 1573 | 1577 | ||
| 1574 | static void ath9k_enable_ps(struct ath_softc *sc) | 1578 | static void ath9k_enable_ps(struct ath_softc *sc) |
| @@ -1809,6 +1813,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue, | |||
| 1809 | 1813 | ||
| 1810 | txq = sc->tx.txq_map[queue]; | 1814 | txq = sc->tx.txq_map[queue]; |
| 1811 | 1815 | ||
| 1816 | ath9k_ps_wakeup(sc); | ||
| 1812 | mutex_lock(&sc->mutex); | 1817 | mutex_lock(&sc->mutex); |
| 1813 | 1818 | ||
| 1814 | memset(&qi, 0, sizeof(struct ath9k_tx_queue_info)); | 1819 | memset(&qi, 0, sizeof(struct ath9k_tx_queue_info)); |
| @@ -1832,6 +1837,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue, | |||
| 1832 | ath_beaconq_config(sc); | 1837 | ath_beaconq_config(sc); |
| 1833 | 1838 | ||
| 1834 | mutex_unlock(&sc->mutex); | 1839 | mutex_unlock(&sc->mutex); |
| 1840 | ath9k_ps_restore(sc); | ||
| 1835 | 1841 | ||
| 1836 | return ret; | 1842 | return ret; |
| 1837 | } | 1843 | } |
| @@ -1894,6 +1900,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, | |||
| 1894 | int slottime; | 1900 | int slottime; |
| 1895 | int error; | 1901 | int error; |
| 1896 | 1902 | ||
| 1903 | ath9k_ps_wakeup(sc); | ||
| 1897 | mutex_lock(&sc->mutex); | 1904 | mutex_lock(&sc->mutex); |
| 1898 | 1905 | ||
| 1899 | if (changed & BSS_CHANGED_BSSID) { | 1906 | if (changed & BSS_CHANGED_BSSID) { |
| @@ -1994,6 +2001,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, | |||
| 1994 | } | 2001 | } |
| 1995 | 2002 | ||
| 1996 | mutex_unlock(&sc->mutex); | 2003 | mutex_unlock(&sc->mutex); |
| 2004 | ath9k_ps_restore(sc); | ||
| 1997 | } | 2005 | } |
| 1998 | 2006 | ||
| 1999 | static u64 ath9k_get_tsf(struct ieee80211_hw *hw) | 2007 | static u64 ath9k_get_tsf(struct ieee80211_hw *hw) |
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index a9c3f4672aa0..dcd19bc337d1 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c | |||
| @@ -486,12 +486,12 @@ start_recv: | |||
| 486 | bool ath_stoprecv(struct ath_softc *sc) | 486 | bool ath_stoprecv(struct ath_softc *sc) |
| 487 | { | 487 | { |
| 488 | struct ath_hw *ah = sc->sc_ah; | 488 | struct ath_hw *ah = sc->sc_ah; |
| 489 | bool stopped; | 489 | bool stopped, reset = false; |
| 490 | 490 | ||
| 491 | spin_lock_bh(&sc->rx.rxbuflock); | 491 | spin_lock_bh(&sc->rx.rxbuflock); |
| 492 | ath9k_hw_abortpcurecv(ah); | 492 | ath9k_hw_abortpcurecv(ah); |
| 493 | ath9k_hw_setrxfilter(ah, 0); | 493 | ath9k_hw_setrxfilter(ah, 0); |
| 494 | stopped = ath9k_hw_stopdmarecv(ah); | 494 | stopped = ath9k_hw_stopdmarecv(ah, &reset); |
| 495 | 495 | ||
| 496 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) | 496 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) |
| 497 | ath_edma_stop_recv(sc); | 497 | ath_edma_stop_recv(sc); |
| @@ -506,7 +506,7 @@ bool ath_stoprecv(struct ath_softc *sc) | |||
| 506 | "confusing the DMA engine when we start RX up\n"); | 506 | "confusing the DMA engine when we start RX up\n"); |
| 507 | ATH_DBG_WARN_ON_ONCE(!stopped); | 507 | ATH_DBG_WARN_ON_ONCE(!stopped); |
| 508 | } | 508 | } |
| 509 | return stopped; | 509 | return stopped || reset; |
| 510 | } | 510 | } |
| 511 | 511 | ||
| 512 | void ath_flushrecv(struct ath_softc *sc) | 512 | void ath_flushrecv(struct ath_softc *sc) |
diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 248c670fdfbe..5c2cfe694152 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h | |||
| @@ -195,6 +195,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { | |||
| 195 | {APL9_WORLD, CTL_ETSI, CTL_ETSI}, | 195 | {APL9_WORLD, CTL_ETSI, CTL_ETSI}, |
| 196 | 196 | ||
| 197 | {APL3_FCCA, CTL_FCC, CTL_FCC}, | 197 | {APL3_FCCA, CTL_FCC, CTL_FCC}, |
| 198 | {APL7_FCCA, CTL_FCC, CTL_FCC}, | ||
| 198 | {APL1_ETSIC, CTL_FCC, CTL_ETSI}, | 199 | {APL1_ETSIC, CTL_FCC, CTL_ETSI}, |
| 199 | {APL2_ETSIC, CTL_FCC, CTL_ETSI}, | 200 | {APL2_ETSIC, CTL_FCC, CTL_ETSI}, |
| 200 | {APL2_APLD, CTL_FCC, NO_CTL}, | 201 | {APL2_APLD, CTL_FCC, NO_CTL}, |
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig index 2a45dd44cc12..aef65cd47661 100644 --- a/drivers/net/wireless/iwlegacy/Kconfig +++ b/drivers/net/wireless/iwlegacy/Kconfig | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | config IWLWIFI_LEGACY | 1 | config IWLWIFI_LEGACY |
| 2 | tristate "Intel Wireless Wifi legacy devices" | 2 | tristate |
| 3 | depends on PCI && MAC80211 | ||
| 4 | select FW_LOADER | 3 | select FW_LOADER |
| 5 | select NEW_LEDS | 4 | select NEW_LEDS |
| 6 | select LEDS_CLASS | 5 | select LEDS_CLASS |
| @@ -65,7 +64,8 @@ endmenu | |||
| 65 | 64 | ||
| 66 | config IWL4965 | 65 | config IWL4965 |
| 67 | tristate "Intel Wireless WiFi 4965AGN (iwl4965)" | 66 | tristate "Intel Wireless WiFi 4965AGN (iwl4965)" |
| 68 | depends on IWLWIFI_LEGACY | 67 | depends on PCI && MAC80211 |
| 68 | select IWLWIFI_LEGACY | ||
| 69 | ---help--- | 69 | ---help--- |
| 70 | This option enables support for | 70 | This option enables support for |
| 71 | 71 | ||
| @@ -92,7 +92,8 @@ config IWL4965 | |||
| 92 | 92 | ||
| 93 | config IWL3945 | 93 | config IWL3945 |
| 94 | tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" | 94 | tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" |
| 95 | depends on IWLWIFI_LEGACY | 95 | depends on PCI && MAC80211 |
| 96 | select IWLWIFI_LEGACY | ||
| 96 | ---help--- | 97 | ---help--- |
| 97 | Select to build the driver supporting the: | 98 | Select to build the driver supporting the: |
| 98 | 99 | ||
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h index 779d3cb86e2c..5c3a68d3af12 100644 --- a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h +++ b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h | |||
| @@ -74,8 +74,6 @@ | |||
| 74 | /* RSSI to dBm */ | 74 | /* RSSI to dBm */ |
| 75 | #define IWL39_RSSI_OFFSET 95 | 75 | #define IWL39_RSSI_OFFSET 95 |
| 76 | 76 | ||
| 77 | #define IWL_DEFAULT_TX_POWER 0x0F | ||
| 78 | |||
| 79 | /* | 77 | /* |
| 80 | * EEPROM related constants, enums, and structures. | 78 | * EEPROM related constants, enums, and structures. |
| 81 | */ | 79 | */ |
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h index 08b189c8472d..fc6fa2886d9c 100644 --- a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h +++ b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h | |||
| @@ -804,9 +804,6 @@ struct iwl4965_scd_bc_tbl { | |||
| 804 | 804 | ||
| 805 | #define IWL4965_DEFAULT_TX_RETRY 15 | 805 | #define IWL4965_DEFAULT_TX_RETRY 15 |
| 806 | 806 | ||
| 807 | /* Limit range of txpower output target to be between these values */ | ||
| 808 | #define IWL4965_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */ | ||
| 809 | |||
| 810 | /* EEPROM */ | 807 | /* EEPROM */ |
| 811 | #define IWL4965_FIRST_AMPDU_QUEUE 10 | 808 | #define IWL4965_FIRST_AMPDU_QUEUE 10 |
| 812 | 809 | ||
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c index 7007d61bb6b5..c1511b14b239 100644 --- a/drivers/net/wireless/iwlegacy/iwl-core.c +++ b/drivers/net/wireless/iwlegacy/iwl-core.c | |||
| @@ -160,6 +160,7 @@ int iwl_legacy_init_geos(struct iwl_priv *priv) | |||
| 160 | struct ieee80211_channel *geo_ch; | 160 | struct ieee80211_channel *geo_ch; |
| 161 | struct ieee80211_rate *rates; | 161 | struct ieee80211_rate *rates; |
| 162 | int i = 0; | 162 | int i = 0; |
| 163 | s8 max_tx_power = 0; | ||
| 163 | 164 | ||
| 164 | if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || | 165 | if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || |
| 165 | priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { | 166 | priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { |
| @@ -235,8 +236,8 @@ int iwl_legacy_init_geos(struct iwl_priv *priv) | |||
| 235 | 236 | ||
| 236 | geo_ch->flags |= ch->ht40_extension_channel; | 237 | geo_ch->flags |= ch->ht40_extension_channel; |
| 237 | 238 | ||
| 238 | if (ch->max_power_avg > priv->tx_power_device_lmt) | 239 | if (ch->max_power_avg > max_tx_power) |
| 239 | priv->tx_power_device_lmt = ch->max_power_avg; | 240 | max_tx_power = ch->max_power_avg; |
| 240 | } else { | 241 | } else { |
| 241 | geo_ch->flags |= IEEE80211_CHAN_DISABLED; | 242 | geo_ch->flags |= IEEE80211_CHAN_DISABLED; |
| 242 | } | 243 | } |
| @@ -249,6 +250,10 @@ int iwl_legacy_init_geos(struct iwl_priv *priv) | |||
| 249 | geo_ch->flags); | 250 | geo_ch->flags); |
| 250 | } | 251 | } |
| 251 | 252 | ||
| 253 | priv->tx_power_device_lmt = max_tx_power; | ||
| 254 | priv->tx_power_user_lmt = max_tx_power; | ||
| 255 | priv->tx_power_next = max_tx_power; | ||
| 256 | |||
| 252 | if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && | 257 | if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && |
| 253 | priv->cfg->sku & IWL_SKU_A) { | 258 | priv->cfg->sku & IWL_SKU_A) { |
| 254 | IWL_INFO(priv, "Incorrectly detected BG card as ABG. " | 259 | IWL_INFO(priv, "Incorrectly detected BG card as ABG. " |
| @@ -1124,11 +1129,11 @@ int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) | |||
| 1124 | if (!priv->cfg->ops->lib->send_tx_power) | 1129 | if (!priv->cfg->ops->lib->send_tx_power) |
| 1125 | return -EOPNOTSUPP; | 1130 | return -EOPNOTSUPP; |
| 1126 | 1131 | ||
| 1127 | if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) { | 1132 | /* 0 dBm mean 1 milliwatt */ |
| 1133 | if (tx_power < 0) { | ||
| 1128 | IWL_WARN(priv, | 1134 | IWL_WARN(priv, |
| 1129 | "Requested user TXPOWER %d below lower limit %d.\n", | 1135 | "Requested user TXPOWER %d below 1 mW.\n", |
| 1130 | tx_power, | 1136 | tx_power); |
| 1131 | IWL4965_TX_POWER_TARGET_POWER_MIN); | ||
| 1132 | return -EINVAL; | 1137 | return -EINVAL; |
| 1133 | } | 1138 | } |
| 1134 | 1139 | ||
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c index 04c5648027df..cb346d1a9ffa 100644 --- a/drivers/net/wireless/iwlegacy/iwl-eeprom.c +++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.c | |||
| @@ -471,13 +471,6 @@ int iwl_legacy_init_channel_map(struct iwl_priv *priv) | |||
| 471 | flags & EEPROM_CHANNEL_RADAR)) | 471 | flags & EEPROM_CHANNEL_RADAR)) |
| 472 | ? "" : "not "); | 472 | ? "" : "not "); |
| 473 | 473 | ||
| 474 | /* Set the tx_power_user_lmt to the highest power | ||
| 475 | * supported by any channel */ | ||
| 476 | if (eeprom_ch_info[ch].max_power_avg > | ||
| 477 | priv->tx_power_user_lmt) | ||
| 478 | priv->tx_power_user_lmt = | ||
| 479 | eeprom_ch_info[ch].max_power_avg; | ||
| 480 | |||
| 481 | ch_info++; | 474 | ch_info++; |
| 482 | } | 475 | } |
| 483 | } | 476 | } |
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c index 28eb3d885ba1..cc7ebcee60e5 100644 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c | |||
| @@ -3825,10 +3825,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv) | |||
| 3825 | priv->force_reset[IWL_FW_RESET].reset_duration = | 3825 | priv->force_reset[IWL_FW_RESET].reset_duration = |
| 3826 | IWL_DELAY_NEXT_FORCE_FW_RELOAD; | 3826 | IWL_DELAY_NEXT_FORCE_FW_RELOAD; |
| 3827 | 3827 | ||
| 3828 | |||
| 3829 | priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER; | ||
| 3830 | priv->tx_power_next = IWL_DEFAULT_TX_POWER; | ||
| 3831 | |||
| 3832 | if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { | 3828 | if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { |
| 3833 | IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n", | 3829 | IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n", |
| 3834 | eeprom->version); | 3830 | eeprom->version); |
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c index 91b3d8b9d7a5..d484c3678163 100644 --- a/drivers/net/wireless/iwlegacy/iwl4965-base.c +++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c | |||
| @@ -3140,12 +3140,6 @@ static int iwl4965_init_drv(struct iwl_priv *priv) | |||
| 3140 | 3140 | ||
| 3141 | iwl_legacy_init_scan_params(priv); | 3141 | iwl_legacy_init_scan_params(priv); |
| 3142 | 3142 | ||
| 3143 | /* Set the tx_power_user_lmt to the lowest power level | ||
| 3144 | * this value will get overwritten by channel max power avg | ||
| 3145 | * from eeprom */ | ||
| 3146 | priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN; | ||
| 3147 | priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN; | ||
| 3148 | |||
| 3149 | ret = iwl_legacy_init_channel_map(priv); | 3143 | ret = iwl_legacy_init_channel_map(priv); |
| 3150 | if (ret) { | 3144 | if (ret) { |
| 3151 | IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); | 3145 | IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 3ea31b659d1a..22e045b5bcee 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | |||
| @@ -530,6 +530,9 @@ static struct iwl_ht_params iwl5000_ht_params = { | |||
| 530 | struct iwl_cfg iwl5300_agn_cfg = { | 530 | struct iwl_cfg iwl5300_agn_cfg = { |
| 531 | .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", | 531 | .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", |
| 532 | IWL_DEVICE_5000, | 532 | IWL_DEVICE_5000, |
| 533 | /* at least EEPROM 0x11A has wrong info */ | ||
| 534 | .valid_tx_ant = ANT_ABC, /* .cfg overwrite */ | ||
| 535 | .valid_rx_ant = ANT_ABC, /* .cfg overwrite */ | ||
| 533 | .ht_params = &iwl5000_ht_params, | 536 | .ht_params = &iwl5000_ht_params, |
| 534 | }; | 537 | }; |
| 535 | 538 | ||
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index 36952274950e..c1ceb4b23971 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c | |||
| @@ -137,6 +137,7 @@ struct mwl8k_tx_queue { | |||
| 137 | struct mwl8k_priv { | 137 | struct mwl8k_priv { |
| 138 | struct ieee80211_hw *hw; | 138 | struct ieee80211_hw *hw; |
| 139 | struct pci_dev *pdev; | 139 | struct pci_dev *pdev; |
| 140 | int irq; | ||
| 140 | 141 | ||
| 141 | struct mwl8k_device_info *device_info; | 142 | struct mwl8k_device_info *device_info; |
| 142 | 143 | ||
| @@ -3761,9 +3762,11 @@ static int mwl8k_start(struct ieee80211_hw *hw) | |||
| 3761 | rc = request_irq(priv->pdev->irq, mwl8k_interrupt, | 3762 | rc = request_irq(priv->pdev->irq, mwl8k_interrupt, |
| 3762 | IRQF_SHARED, MWL8K_NAME, hw); | 3763 | IRQF_SHARED, MWL8K_NAME, hw); |
| 3763 | if (rc) { | 3764 | if (rc) { |
| 3765 | priv->irq = -1; | ||
| 3764 | wiphy_err(hw->wiphy, "failed to register IRQ handler\n"); | 3766 | wiphy_err(hw->wiphy, "failed to register IRQ handler\n"); |
| 3765 | return -EIO; | 3767 | return -EIO; |
| 3766 | } | 3768 | } |
| 3769 | priv->irq = priv->pdev->irq; | ||
| 3767 | 3770 | ||
| 3768 | /* Enable TX reclaim and RX tasklets. */ | 3771 | /* Enable TX reclaim and RX tasklets. */ |
| 3769 | tasklet_enable(&priv->poll_tx_task); | 3772 | tasklet_enable(&priv->poll_tx_task); |
| @@ -3800,6 +3803,7 @@ static int mwl8k_start(struct ieee80211_hw *hw) | |||
| 3800 | if (rc) { | 3803 | if (rc) { |
| 3801 | iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); | 3804 | iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); |
| 3802 | free_irq(priv->pdev->irq, hw); | 3805 | free_irq(priv->pdev->irq, hw); |
| 3806 | priv->irq = -1; | ||
| 3803 | tasklet_disable(&priv->poll_tx_task); | 3807 | tasklet_disable(&priv->poll_tx_task); |
| 3804 | tasklet_disable(&priv->poll_rx_task); | 3808 | tasklet_disable(&priv->poll_rx_task); |
| 3805 | } | 3809 | } |
| @@ -3818,7 +3822,10 @@ static void mwl8k_stop(struct ieee80211_hw *hw) | |||
| 3818 | 3822 | ||
| 3819 | /* Disable interrupts */ | 3823 | /* Disable interrupts */ |
| 3820 | iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); | 3824 | iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); |
| 3821 | free_irq(priv->pdev->irq, hw); | 3825 | if (priv->irq != -1) { |
| 3826 | free_irq(priv->pdev->irq, hw); | ||
| 3827 | priv->irq = -1; | ||
| 3828 | } | ||
| 3822 | 3829 | ||
| 3823 | /* Stop finalize join worker */ | 3830 | /* Stop finalize join worker */ |
| 3824 | cancel_work_sync(&priv->finalize_join_worker); | 3831 | cancel_work_sync(&priv->finalize_join_worker); |
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c index 7834c26c2954..042842e704de 100644 --- a/drivers/net/wireless/p54/txrx.c +++ b/drivers/net/wireless/p54/txrx.c | |||
| @@ -703,7 +703,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) | |||
| 703 | struct p54_tx_info *p54info; | 703 | struct p54_tx_info *p54info; |
| 704 | struct p54_hdr *hdr; | 704 | struct p54_hdr *hdr; |
| 705 | struct p54_tx_data *txhdr; | 705 | struct p54_tx_data *txhdr; |
| 706 | unsigned int padding, len, extra_len; | 706 | unsigned int padding, len, extra_len = 0; |
| 707 | int i, j, ridx; | 707 | int i, j, ridx; |
| 708 | u16 hdr_flags = 0, aid = 0; | 708 | u16 hdr_flags = 0, aid = 0; |
| 709 | u8 rate, queue = 0, crypt_offset = 0; | 709 | u8 rate, queue = 0, crypt_offset = 0; |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 505c1c7075f0..d552d2c77844 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
| @@ -1299,7 +1299,7 @@ static void iommu_detach_domain(struct dmar_domain *domain, | |||
| 1299 | static struct iova_domain reserved_iova_list; | 1299 | static struct iova_domain reserved_iova_list; |
| 1300 | static struct lock_class_key reserved_rbtree_key; | 1300 | static struct lock_class_key reserved_rbtree_key; |
| 1301 | 1301 | ||
| 1302 | static void dmar_init_reserved_ranges(void) | 1302 | static int dmar_init_reserved_ranges(void) |
| 1303 | { | 1303 | { |
| 1304 | struct pci_dev *pdev = NULL; | 1304 | struct pci_dev *pdev = NULL; |
| 1305 | struct iova *iova; | 1305 | struct iova *iova; |
| @@ -1313,8 +1313,10 @@ static void dmar_init_reserved_ranges(void) | |||
| 1313 | /* IOAPIC ranges shouldn't be accessed by DMA */ | 1313 | /* IOAPIC ranges shouldn't be accessed by DMA */ |
| 1314 | iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), | 1314 | iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), |
| 1315 | IOVA_PFN(IOAPIC_RANGE_END)); | 1315 | IOVA_PFN(IOAPIC_RANGE_END)); |
| 1316 | if (!iova) | 1316 | if (!iova) { |
| 1317 | printk(KERN_ERR "Reserve IOAPIC range failed\n"); | 1317 | printk(KERN_ERR "Reserve IOAPIC range failed\n"); |
| 1318 | return -ENODEV; | ||
| 1319 | } | ||
| 1318 | 1320 | ||
| 1319 | /* Reserve all PCI MMIO to avoid peer-to-peer access */ | 1321 | /* Reserve all PCI MMIO to avoid peer-to-peer access */ |
| 1320 | for_each_pci_dev(pdev) { | 1322 | for_each_pci_dev(pdev) { |
| @@ -1327,11 +1329,13 @@ static void dmar_init_reserved_ranges(void) | |||
| 1327 | iova = reserve_iova(&reserved_iova_list, | 1329 | iova = reserve_iova(&reserved_iova_list, |
| 1328 | IOVA_PFN(r->start), | 1330 | IOVA_PFN(r->start), |
| 1329 | IOVA_PFN(r->end)); | 1331 | IOVA_PFN(r->end)); |
| 1330 | if (!iova) | 1332 | if (!iova) { |
| 1331 | printk(KERN_ERR "Reserve iova failed\n"); | 1333 | printk(KERN_ERR "Reserve iova failed\n"); |
| 1334 | return -ENODEV; | ||
| 1335 | } | ||
| 1332 | } | 1336 | } |
| 1333 | } | 1337 | } |
| 1334 | 1338 | return 0; | |
| 1335 | } | 1339 | } |
| 1336 | 1340 | ||
| 1337 | static void domain_reserve_special_ranges(struct dmar_domain *domain) | 1341 | static void domain_reserve_special_ranges(struct dmar_domain *domain) |
| @@ -1835,7 +1839,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
| 1835 | 1839 | ||
| 1836 | ret = iommu_attach_domain(domain, iommu); | 1840 | ret = iommu_attach_domain(domain, iommu); |
| 1837 | if (ret) { | 1841 | if (ret) { |
| 1838 | domain_exit(domain); | 1842 | free_domain_mem(domain); |
| 1839 | goto error; | 1843 | goto error; |
| 1840 | } | 1844 | } |
| 1841 | 1845 | ||
| @@ -2213,7 +2217,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw) | |||
| 2213 | return 0; | 2217 | return 0; |
| 2214 | } | 2218 | } |
| 2215 | 2219 | ||
| 2216 | int __init init_dmars(void) | 2220 | static int __init init_dmars(int force_on) |
| 2217 | { | 2221 | { |
| 2218 | struct dmar_drhd_unit *drhd; | 2222 | struct dmar_drhd_unit *drhd; |
| 2219 | struct dmar_rmrr_unit *rmrr; | 2223 | struct dmar_rmrr_unit *rmrr; |
| @@ -2393,8 +2397,15 @@ int __init init_dmars(void) | |||
| 2393 | * enable translation | 2397 | * enable translation |
| 2394 | */ | 2398 | */ |
| 2395 | for_each_drhd_unit(drhd) { | 2399 | for_each_drhd_unit(drhd) { |
| 2396 | if (drhd->ignored) | 2400 | if (drhd->ignored) { |
| 2401 | /* | ||
| 2402 | * we always have to disable PMRs or DMA may fail on | ||
| 2403 | * this device | ||
| 2404 | */ | ||
| 2405 | if (force_on) | ||
| 2406 | iommu_disable_protect_mem_regions(drhd->iommu); | ||
| 2397 | continue; | 2407 | continue; |
| 2408 | } | ||
| 2398 | iommu = drhd->iommu; | 2409 | iommu = drhd->iommu; |
| 2399 | 2410 | ||
| 2400 | iommu_flush_write_buffer(iommu); | 2411 | iommu_flush_write_buffer(iommu); |
| @@ -3240,9 +3251,15 @@ static int device_notifier(struct notifier_block *nb, | |||
| 3240 | if (!domain) | 3251 | if (!domain) |
| 3241 | return 0; | 3252 | return 0; |
| 3242 | 3253 | ||
| 3243 | if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) | 3254 | if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) { |
| 3244 | domain_remove_one_dev_info(domain, pdev); | 3255 | domain_remove_one_dev_info(domain, pdev); |
| 3245 | 3256 | ||
| 3257 | if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && | ||
| 3258 | !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && | ||
| 3259 | list_empty(&domain->devices)) | ||
| 3260 | domain_exit(domain); | ||
| 3261 | } | ||
| 3262 | |||
| 3246 | return 0; | 3263 | return 0; |
| 3247 | } | 3264 | } |
| 3248 | 3265 | ||
| @@ -3277,12 +3294,21 @@ int __init intel_iommu_init(void) | |||
| 3277 | if (no_iommu || dmar_disabled) | 3294 | if (no_iommu || dmar_disabled) |
| 3278 | return -ENODEV; | 3295 | return -ENODEV; |
| 3279 | 3296 | ||
| 3280 | iommu_init_mempool(); | 3297 | if (iommu_init_mempool()) { |
| 3281 | dmar_init_reserved_ranges(); | 3298 | if (force_on) |
| 3299 | panic("tboot: Failed to initialize iommu memory\n"); | ||
| 3300 | return -ENODEV; | ||
| 3301 | } | ||
| 3302 | |||
| 3303 | if (dmar_init_reserved_ranges()) { | ||
| 3304 | if (force_on) | ||
| 3305 | panic("tboot: Failed to reserve iommu ranges\n"); | ||
| 3306 | return -ENODEV; | ||
| 3307 | } | ||
| 3282 | 3308 | ||
| 3283 | init_no_remapping_devices(); | 3309 | init_no_remapping_devices(); |
| 3284 | 3310 | ||
| 3285 | ret = init_dmars(); | 3311 | ret = init_dmars(force_on); |
| 3286 | if (ret) { | 3312 | if (ret) { |
| 3287 | if (force_on) | 3313 | if (force_on) |
| 3288 | panic("tboot: Failed to initialize DMARs\n"); | 3314 | panic("tboot: Failed to initialize DMARs\n"); |
| @@ -3391,6 +3417,11 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, | |||
| 3391 | domain->iommu_count--; | 3417 | domain->iommu_count--; |
| 3392 | domain_update_iommu_cap(domain); | 3418 | domain_update_iommu_cap(domain); |
| 3393 | spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); | 3419 | spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); |
| 3420 | |||
| 3421 | spin_lock_irqsave(&iommu->lock, tmp_flags); | ||
| 3422 | clear_bit(domain->id, iommu->domain_ids); | ||
| 3423 | iommu->domains[domain->id] = NULL; | ||
| 3424 | spin_unlock_irqrestore(&iommu->lock, tmp_flags); | ||
| 3394 | } | 3425 | } |
| 3395 | 3426 | ||
| 3396 | spin_unlock_irqrestore(&device_domain_lock, flags); | 3427 | spin_unlock_irqrestore(&device_domain_lock, flags); |
| @@ -3607,9 +3638,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
| 3607 | 3638 | ||
| 3608 | pte = dmar_domain->pgd; | 3639 | pte = dmar_domain->pgd; |
| 3609 | if (dma_pte_present(pte)) { | 3640 | if (dma_pte_present(pte)) { |
| 3610 | free_pgtable_page(dmar_domain->pgd); | ||
| 3611 | dmar_domain->pgd = (struct dma_pte *) | 3641 | dmar_domain->pgd = (struct dma_pte *) |
| 3612 | phys_to_virt(dma_pte_addr(pte)); | 3642 | phys_to_virt(dma_pte_addr(pte)); |
| 3643 | free_pgtable_page(pte); | ||
| 3613 | } | 3644 | } |
| 3614 | dmar_domain->agaw--; | 3645 | dmar_domain->agaw--; |
| 3615 | } | 3646 | } |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index ab55c2fa7ce2..e9901b8f8443 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -411,8 +411,6 @@ static void scsi_run_queue(struct request_queue *q) | |||
| 411 | list_splice_init(&shost->starved_list, &starved_list); | 411 | list_splice_init(&shost->starved_list, &starved_list); |
| 412 | 412 | ||
| 413 | while (!list_empty(&starved_list)) { | 413 | while (!list_empty(&starved_list)) { |
| 414 | int flagset; | ||
| 415 | |||
| 416 | /* | 414 | /* |
| 417 | * As long as shost is accepting commands and we have | 415 | * As long as shost is accepting commands and we have |
| 418 | * starved queues, call blk_run_queue. scsi_request_fn | 416 | * starved queues, call blk_run_queue. scsi_request_fn |
| @@ -435,20 +433,7 @@ static void scsi_run_queue(struct request_queue *q) | |||
| 435 | continue; | 433 | continue; |
| 436 | } | 434 | } |
| 437 | 435 | ||
| 438 | spin_unlock(shost->host_lock); | 436 | blk_run_queue_async(sdev->request_queue); |
| 439 | |||
| 440 | spin_lock(sdev->request_queue->queue_lock); | ||
| 441 | flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && | ||
| 442 | !test_bit(QUEUE_FLAG_REENTER, | ||
| 443 | &sdev->request_queue->queue_flags); | ||
| 444 | if (flagset) | ||
| 445 | queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); | ||
| 446 | __blk_run_queue(sdev->request_queue); | ||
| 447 | if (flagset) | ||
| 448 | queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); | ||
| 449 | spin_unlock(sdev->request_queue->queue_lock); | ||
| 450 | |||
| 451 | spin_lock(shost->host_lock); | ||
| 452 | } | 437 | } |
| 453 | /* put any unprocessed entries back */ | 438 | /* put any unprocessed entries back */ |
| 454 | list_splice(&starved_list, &shost->starved_list); | 439 | list_splice(&starved_list, &shost->starved_list); |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 28c33506e4ad..815069d13f9b 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
| @@ -3816,28 +3816,17 @@ fail_host_msg: | |||
| 3816 | static void | 3816 | static void |
| 3817 | fc_bsg_goose_queue(struct fc_rport *rport) | 3817 | fc_bsg_goose_queue(struct fc_rport *rport) |
| 3818 | { | 3818 | { |
| 3819 | int flagset; | ||
| 3820 | unsigned long flags; | ||
| 3821 | |||
| 3822 | if (!rport->rqst_q) | 3819 | if (!rport->rqst_q) |
| 3823 | return; | 3820 | return; |
| 3824 | 3821 | ||
| 3822 | /* | ||
| 3823 | * This get/put dance makes no sense | ||
| 3824 | */ | ||
| 3825 | get_device(&rport->dev); | 3825 | get_device(&rport->dev); |
| 3826 | 3826 | blk_run_queue_async(rport->rqst_q); | |
| 3827 | spin_lock_irqsave(rport->rqst_q->queue_lock, flags); | ||
| 3828 | flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) && | ||
| 3829 | !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); | ||
| 3830 | if (flagset) | ||
| 3831 | queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); | ||
| 3832 | __blk_run_queue(rport->rqst_q); | ||
| 3833 | if (flagset) | ||
| 3834 | queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); | ||
| 3835 | spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); | ||
| 3836 | |||
| 3837 | put_device(&rport->dev); | 3827 | put_device(&rport->dev); |
| 3838 | } | 3828 | } |
| 3839 | 3829 | ||
| 3840 | |||
| 3841 | /** | 3830 | /** |
| 3842 | * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD | 3831 | * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD |
| 3843 | * @q: rport request queue | 3832 | * @q: rport request queue |
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 4fb5b2bf2348..4bcc8b82640b 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c | |||
| @@ -590,15 +590,10 @@ static struct virtio_config_ops virtio_pci_config_ops = { | |||
| 590 | 590 | ||
| 591 | static void virtio_pci_release_dev(struct device *_d) | 591 | static void virtio_pci_release_dev(struct device *_d) |
| 592 | { | 592 | { |
| 593 | struct virtio_device *dev = container_of(_d, struct virtio_device, dev); | 593 | struct virtio_device *dev = container_of(_d, struct virtio_device, |
| 594 | dev); | ||
| 594 | struct virtio_pci_device *vp_dev = to_vp_device(dev); | 595 | struct virtio_pci_device *vp_dev = to_vp_device(dev); |
| 595 | struct pci_dev *pci_dev = vp_dev->pci_dev; | ||
| 596 | 596 | ||
| 597 | vp_del_vqs(dev); | ||
| 598 | pci_set_drvdata(pci_dev, NULL); | ||
| 599 | pci_iounmap(pci_dev, vp_dev->ioaddr); | ||
| 600 | pci_release_regions(pci_dev); | ||
| 601 | pci_disable_device(pci_dev); | ||
| 602 | kfree(vp_dev); | 597 | kfree(vp_dev); |
| 603 | } | 598 | } |
| 604 | 599 | ||
| @@ -681,6 +676,12 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev) | |||
| 681 | struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); | 676 | struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); |
| 682 | 677 | ||
| 683 | unregister_virtio_device(&vp_dev->vdev); | 678 | unregister_virtio_device(&vp_dev->vdev); |
| 679 | |||
| 680 | vp_del_vqs(&vp_dev->vdev); | ||
| 681 | pci_set_drvdata(pci_dev, NULL); | ||
| 682 | pci_iounmap(pci_dev, vp_dev->ioaddr); | ||
| 683 | pci_release_regions(pci_dev); | ||
| 684 | pci_disable_device(pci_dev); | ||
| 684 | } | 685 | } |
| 685 | 686 | ||
| 686 | #ifdef CONFIG_PM | 687 | #ifdef CONFIG_PM |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index cc2f73e03475..b0043fb26a4d 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
| @@ -371,6 +371,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) | |||
| 371 | /* detach_buf clears data, so grab it now. */ | 371 | /* detach_buf clears data, so grab it now. */ |
| 372 | buf = vq->data[i]; | 372 | buf = vq->data[i]; |
| 373 | detach_buf(vq, i); | 373 | detach_buf(vq, i); |
| 374 | vq->vring.avail->idx--; | ||
| 374 | END_USE(vq); | 375 | END_USE(vq); |
| 375 | return buf; | 376 | return buf; |
| 376 | } | 377 | } |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index aa309aa93fe8..4cf04e11c66c 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
| @@ -258,6 +258,7 @@ static void nfs4_put_deleg_lease(struct nfs4_file *fp) | |||
| 258 | if (atomic_dec_and_test(&fp->fi_delegees)) { | 258 | if (atomic_dec_and_test(&fp->fi_delegees)) { |
| 259 | vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease); | 259 | vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease); |
| 260 | fp->fi_lease = NULL; | 260 | fp->fi_lease = NULL; |
| 261 | fput(fp->fi_deleg_file); | ||
| 261 | fp->fi_deleg_file = NULL; | 262 | fp->fi_deleg_file = NULL; |
| 262 | } | 263 | } |
| 263 | } | 264 | } |
| @@ -402,8 +403,8 @@ static void free_generic_stateid(struct nfs4_stateid *stp) | |||
| 402 | if (stp->st_access_bmap) { | 403 | if (stp->st_access_bmap) { |
| 403 | oflag = nfs4_access_bmap_to_omode(stp); | 404 | oflag = nfs4_access_bmap_to_omode(stp); |
| 404 | nfs4_file_put_access(stp->st_file, oflag); | 405 | nfs4_file_put_access(stp->st_file, oflag); |
| 405 | put_nfs4_file(stp->st_file); | ||
| 406 | } | 406 | } |
| 407 | put_nfs4_file(stp->st_file); | ||
| 407 | kmem_cache_free(stateid_slab, stp); | 408 | kmem_cache_free(stateid_slab, stp); |
| 408 | } | 409 | } |
| 409 | 410 | ||
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 2e1cebde90df..129f3c9f62d5 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
| @@ -1363,7 +1363,7 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp, | |||
| 1363 | goto out; | 1363 | goto out; |
| 1364 | if (!(iap->ia_valid & ATTR_MODE)) | 1364 | if (!(iap->ia_valid & ATTR_MODE)) |
| 1365 | iap->ia_mode = 0; | 1365 | iap->ia_mode = 0; |
| 1366 | err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE); | 1366 | err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC); |
| 1367 | if (err) | 1367 | if (err) |
| 1368 | goto out; | 1368 | goto out; |
| 1369 | 1369 | ||
| @@ -1385,6 +1385,13 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp, | |||
| 1385 | if (IS_ERR(dchild)) | 1385 | if (IS_ERR(dchild)) |
| 1386 | goto out_nfserr; | 1386 | goto out_nfserr; |
| 1387 | 1387 | ||
| 1388 | /* If file doesn't exist, check for permissions to create one */ | ||
| 1389 | if (!dchild->d_inode) { | ||
| 1390 | err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE); | ||
| 1391 | if (err) | ||
| 1392 | goto out; | ||
| 1393 | } | ||
| 1394 | |||
| 1388 | err = fh_compose(resfhp, fhp->fh_export, dchild, fhp); | 1395 | err = fh_compose(resfhp, fhp->fh_export, dchild, fhp); |
| 1389 | if (err) | 1396 | if (err) |
| 1390 | goto out; | 1397 | goto out; |
diff --git a/fs/xattr.c b/fs/xattr.c index a19acdb81cd1..f1ef94974dea 100644 --- a/fs/xattr.c +++ b/fs/xattr.c | |||
| @@ -666,7 +666,7 @@ generic_setxattr(struct dentry *dentry, const char *name, const void *value, siz | |||
| 666 | handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name); | 666 | handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name); |
| 667 | if (!handler) | 667 | if (!handler) |
| 668 | return -EOPNOTSUPP; | 668 | return -EOPNOTSUPP; |
| 669 | return handler->set(dentry, name, value, size, 0, handler->flags); | 669 | return handler->set(dentry, name, value, size, flags, handler->flags); |
| 670 | } | 670 | } |
| 671 | 671 | ||
| 672 | /* | 672 | /* |
diff --git a/fs/xfs/linux-2.6/xfs_message.c b/fs/xfs/linux-2.6/xfs_message.c index 3ca795609113..9f76cceb678d 100644 --- a/fs/xfs/linux-2.6/xfs_message.c +++ b/fs/xfs/linux-2.6/xfs_message.c | |||
| @@ -34,8 +34,10 @@ __xfs_printk( | |||
| 34 | const struct xfs_mount *mp, | 34 | const struct xfs_mount *mp, |
| 35 | struct va_format *vaf) | 35 | struct va_format *vaf) |
| 36 | { | 36 | { |
| 37 | if (mp && mp->m_fsname) | 37 | if (mp && mp->m_fsname) { |
| 38 | printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf); | 38 | printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf); |
| 39 | return; | ||
| 40 | } | ||
| 39 | printk("%sXFS: %pV\n", level, vaf); | 41 | printk("%sXFS: %pV\n", level, vaf); |
| 40 | } | 42 | } |
| 41 | 43 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index cbbfd98ad4a3..2ad95fa1d130 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -388,20 +388,19 @@ struct request_queue | |||
| 388 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ | 388 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
| 389 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ | 389 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
| 390 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 390 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
| 391 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 391 | #define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ |
| 392 | #define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */ | 392 | #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ |
| 393 | #define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */ | 393 | #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ |
| 394 | #define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */ | 394 | #define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */ |
| 395 | #define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */ | 395 | #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ |
| 396 | #define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */ | 396 | #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ |
| 397 | #define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */ | 397 | #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ |
| 398 | #define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */ | ||
| 399 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | 398 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
| 400 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 399 | #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ |
| 401 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ | 400 | #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ |
| 402 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ | 401 | #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ |
| 403 | #define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ | 402 | #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ |
| 404 | #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ | 403 | #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ |
| 405 | 404 | ||
| 406 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 405 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
| 407 | (1 << QUEUE_FLAG_STACKABLE) | \ | 406 | (1 << QUEUE_FLAG_STACKABLE) | \ |
| @@ -699,6 +698,7 @@ extern void blk_sync_queue(struct request_queue *q); | |||
| 699 | extern void __blk_stop_queue(struct request_queue *q); | 698 | extern void __blk_stop_queue(struct request_queue *q); |
| 700 | extern void __blk_run_queue(struct request_queue *q); | 699 | extern void __blk_run_queue(struct request_queue *q); |
| 701 | extern void blk_run_queue(struct request_queue *); | 700 | extern void blk_run_queue(struct request_queue *); |
| 701 | extern void blk_run_queue_async(struct request_queue *q); | ||
| 702 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 702 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
| 703 | struct rq_map_data *, void __user *, unsigned long, | 703 | struct rq_map_data *, void __user *, unsigned long, |
| 704 | gfp_t); | 704 | gfp_t); |
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 3c7329b8ea0e..0e1855079fbb 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h | |||
| @@ -103,8 +103,8 @@ struct driver_info { | |||
| 103 | * Indicates to usbnet, that USB driver accumulates multiple IP packets. | 103 | * Indicates to usbnet, that USB driver accumulates multiple IP packets. |
| 104 | * Affects statistic (counters) and short packet handling. | 104 | * Affects statistic (counters) and short packet handling. |
| 105 | */ | 105 | */ |
| 106 | #define FLAG_MULTI_PACKET 0x1000 | 106 | #define FLAG_MULTI_PACKET 0x2000 |
| 107 | #define FLAG_RX_ASSEMBLE 0x2000 /* rx packets may span >1 frames */ | 107 | #define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */ |
| 108 | 108 | ||
| 109 | /* init device ... can sleep, or cause probe() failure */ | 109 | /* init device ... can sleep, or cause probe() failure */ |
| 110 | int (*bind)(struct usbnet *, struct usb_interface *); | 110 | int (*bind)(struct usbnet *, struct usb_interface *); |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 008ff6c4eecf..f3bc322c5891 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
| @@ -249,11 +249,9 @@ static int br_parse_ip_options(struct sk_buff *skb) | |||
| 249 | goto drop; | 249 | goto drop; |
| 250 | } | 250 | } |
| 251 | 251 | ||
| 252 | /* Zero out the CB buffer if no options present */ | 252 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); |
| 253 | if (iph->ihl == 5) { | 253 | if (iph->ihl == 5) |
| 254 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); | ||
| 255 | return 0; | 254 | return 0; |
| 256 | } | ||
| 257 | 255 | ||
| 258 | opt->optlen = iph->ihl*4 - sizeof(struct iphdr); | 256 | opt->optlen = iph->ihl*4 - sizeof(struct iphdr); |
| 259 | if (ip_options_compile(dev_net(dev), opt, skb)) | 257 | if (ip_options_compile(dev_net(dev), opt, skb)) |
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c index 27dab26ad3b8..054fdb5aeb88 100644 --- a/net/caif/cfdgml.c +++ b/net/caif/cfdgml.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <net/caif/cfsrvl.h> | 13 | #include <net/caif/cfsrvl.h> |
| 14 | #include <net/caif/cfpkt.h> | 14 | #include <net/caif/cfpkt.h> |
| 15 | 15 | ||
| 16 | |||
| 16 | #define container_obj(layr) ((struct cfsrvl *) layr) | 17 | #define container_obj(layr) ((struct cfsrvl *) layr) |
| 17 | 18 | ||
| 18 | #define DGM_CMD_BIT 0x80 | 19 | #define DGM_CMD_BIT 0x80 |
| @@ -83,6 +84,7 @@ static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt) | |||
| 83 | 84 | ||
| 84 | static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt) | 85 | static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt) |
| 85 | { | 86 | { |
| 87 | u8 packet_type; | ||
| 86 | u32 zero = 0; | 88 | u32 zero = 0; |
| 87 | struct caif_payload_info *info; | 89 | struct caif_payload_info *info; |
| 88 | struct cfsrvl *service = container_obj(layr); | 90 | struct cfsrvl *service = container_obj(layr); |
| @@ -94,7 +96,9 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt) | |||
| 94 | if (cfpkt_getlen(pkt) > DGM_MTU) | 96 | if (cfpkt_getlen(pkt) > DGM_MTU) |
| 95 | return -EMSGSIZE; | 97 | return -EMSGSIZE; |
| 96 | 98 | ||
| 97 | cfpkt_add_head(pkt, &zero, 4); | 99 | cfpkt_add_head(pkt, &zero, 3); |
| 100 | packet_type = 0x08; /* B9 set - UNCLASSIFIED */ | ||
| 101 | cfpkt_add_head(pkt, &packet_type, 1); | ||
| 98 | 102 | ||
| 99 | /* Add info for MUX-layer to route the packet out. */ | 103 | /* Add info for MUX-layer to route the packet out. */ |
| 100 | info = cfpkt_info(pkt); | 104 | info = cfpkt_info(pkt); |
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c index 46f34b2e0478..24f1ffa74b06 100644 --- a/net/caif/cfmuxl.c +++ b/net/caif/cfmuxl.c | |||
| @@ -244,9 +244,9 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, | |||
| 244 | int phyid) | 244 | int phyid) |
| 245 | { | 245 | { |
| 246 | struct cfmuxl *muxl = container_obj(layr); | 246 | struct cfmuxl *muxl = container_obj(layr); |
| 247 | struct list_head *node; | 247 | struct list_head *node, *next; |
| 248 | struct cflayer *layer; | 248 | struct cflayer *layer; |
| 249 | list_for_each(node, &muxl->srvl_list) { | 249 | list_for_each_safe(node, next, &muxl->srvl_list) { |
| 250 | layer = list_entry(node, struct cflayer, node); | 250 | layer = list_entry(node, struct cflayer, node); |
| 251 | if (cfsrvl_phyid_match(layer, phyid)) | 251 | if (cfsrvl_phyid_match(layer, phyid)) |
| 252 | layer->ctrlcmd(layer, ctrl, phyid); | 252 | layer->ctrlcmd(layer, ctrl, phyid); |
diff --git a/net/core/dev.c b/net/core/dev.c index 956d3b006e8b..c2ac599fa0f6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -5203,11 +5203,15 @@ u32 netdev_fix_features(struct net_device *dev, u32 features) | |||
| 5203 | } | 5203 | } |
| 5204 | 5204 | ||
| 5205 | /* TSO requires that SG is present as well. */ | 5205 | /* TSO requires that SG is present as well. */ |
| 5206 | if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) { | 5206 | if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { |
| 5207 | netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n"); | 5207 | netdev_info(dev, "Dropping TSO features since no SG feature.\n"); |
| 5208 | features &= ~NETIF_F_TSO; | 5208 | features &= ~NETIF_F_ALL_TSO; |
| 5209 | } | 5209 | } |
| 5210 | 5210 | ||
| 5211 | /* TSO ECN requires that TSO is present as well. */ | ||
| 5212 | if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) | ||
| 5213 | features &= ~NETIF_F_TSO_ECN; | ||
| 5214 | |||
| 5211 | /* Software GSO depends on SG. */ | 5215 | /* Software GSO depends on SG. */ |
| 5212 | if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { | 5216 | if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { |
| 5213 | netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); | 5217 | netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); |
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile index ce2d33582859..5761185f884e 100644 --- a/net/ieee802154/Makefile +++ b/net/ieee802154/Makefile | |||
| @@ -1,5 +1,3 @@ | |||
| 1 | obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o | 1 | obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o |
| 2 | ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o | 2 | ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o |
| 3 | af_802154-y := af_ieee802154.o raw.o dgram.o | 3 | af_802154-y := af_ieee802154.o raw.o dgram.o |
| 4 | |||
| 5 | ccflags-y += -Wall -DDEBUG | ||
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 6c0b7f4a3d7d..38f23e721b80 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
| @@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk, | |||
| 73 | !sk2->sk_bound_dev_if || | 73 | !sk2->sk_bound_dev_if || |
| 74 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { | 74 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { |
| 75 | if (!reuse || !sk2->sk_reuse || | 75 | if (!reuse || !sk2->sk_reuse || |
| 76 | ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) { | 76 | sk2->sk_state == TCP_LISTEN) { |
| 77 | const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); | 77 | const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); |
| 78 | if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || | 78 | if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || |
| 79 | sk2_rcv_saddr == sk_rcv_saddr(sk)) | 79 | sk2_rcv_saddr == sk_rcv_saddr(sk)) |
| @@ -122,8 +122,7 @@ again: | |||
| 122 | (tb->num_owners < smallest_size || smallest_size == -1)) { | 122 | (tb->num_owners < smallest_size || smallest_size == -1)) { |
| 123 | smallest_size = tb->num_owners; | 123 | smallest_size = tb->num_owners; |
| 124 | smallest_rover = rover; | 124 | smallest_rover = rover; |
| 125 | if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 && | 125 | if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { |
| 126 | !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { | ||
| 127 | spin_unlock(&head->lock); | 126 | spin_unlock(&head->lock); |
| 128 | snum = smallest_rover; | 127 | snum = smallest_rover; |
| 129 | goto have_snum; | 128 | goto have_snum; |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index dd1b20eca1a2..9df4e635fb5f 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
| @@ -354,7 +354,8 @@ static void inetpeer_free_rcu(struct rcu_head *head) | |||
| 354 | } | 354 | } |
| 355 | 355 | ||
| 356 | /* May be called with local BH enabled. */ | 356 | /* May be called with local BH enabled. */ |
| 357 | static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) | 357 | static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base, |
| 358 | struct inet_peer __rcu **stack[PEER_MAXDEPTH]) | ||
| 358 | { | 359 | { |
| 359 | int do_free; | 360 | int do_free; |
| 360 | 361 | ||
| @@ -368,7 +369,6 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) | |||
| 368 | * We use refcnt=-1 to alert lockless readers this entry is deleted. | 369 | * We use refcnt=-1 to alert lockless readers this entry is deleted. |
| 369 | */ | 370 | */ |
| 370 | if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { | 371 | if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { |
| 371 | struct inet_peer __rcu **stack[PEER_MAXDEPTH]; | ||
| 372 | struct inet_peer __rcu ***stackptr, ***delp; | 372 | struct inet_peer __rcu ***stackptr, ***delp; |
| 373 | if (lookup(&p->daddr, stack, base) != p) | 373 | if (lookup(&p->daddr, stack, base) != p) |
| 374 | BUG(); | 374 | BUG(); |
| @@ -422,7 +422,7 @@ static struct inet_peer_base *peer_to_base(struct inet_peer *p) | |||
| 422 | } | 422 | } |
| 423 | 423 | ||
| 424 | /* May be called with local BH enabled. */ | 424 | /* May be called with local BH enabled. */ |
| 425 | static int cleanup_once(unsigned long ttl) | 425 | static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH]) |
| 426 | { | 426 | { |
| 427 | struct inet_peer *p = NULL; | 427 | struct inet_peer *p = NULL; |
| 428 | 428 | ||
| @@ -454,7 +454,7 @@ static int cleanup_once(unsigned long ttl) | |||
| 454 | * happen because of entry limits in route cache. */ | 454 | * happen because of entry limits in route cache. */ |
| 455 | return -1; | 455 | return -1; |
| 456 | 456 | ||
| 457 | unlink_from_pool(p, peer_to_base(p)); | 457 | unlink_from_pool(p, peer_to_base(p), stack); |
| 458 | return 0; | 458 | return 0; |
| 459 | } | 459 | } |
| 460 | 460 | ||
| @@ -524,7 +524,7 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) | |||
| 524 | 524 | ||
| 525 | if (base->total >= inet_peer_threshold) | 525 | if (base->total >= inet_peer_threshold) |
| 526 | /* Remove one less-recently-used entry. */ | 526 | /* Remove one less-recently-used entry. */ |
| 527 | cleanup_once(0); | 527 | cleanup_once(0, stack); |
| 528 | 528 | ||
| 529 | return p; | 529 | return p; |
| 530 | } | 530 | } |
| @@ -540,6 +540,7 @@ static void peer_check_expire(unsigned long dummy) | |||
| 540 | { | 540 | { |
| 541 | unsigned long now = jiffies; | 541 | unsigned long now = jiffies; |
| 542 | int ttl, total; | 542 | int ttl, total; |
| 543 | struct inet_peer __rcu **stack[PEER_MAXDEPTH]; | ||
| 543 | 544 | ||
| 544 | total = compute_total(); | 545 | total = compute_total(); |
| 545 | if (total >= inet_peer_threshold) | 546 | if (total >= inet_peer_threshold) |
| @@ -548,7 +549,7 @@ static void peer_check_expire(unsigned long dummy) | |||
| 548 | ttl = inet_peer_maxttl | 549 | ttl = inet_peer_maxttl |
| 549 | - (inet_peer_maxttl - inet_peer_minttl) / HZ * | 550 | - (inet_peer_maxttl - inet_peer_minttl) / HZ * |
| 550 | total / inet_peer_threshold * HZ; | 551 | total / inet_peer_threshold * HZ; |
| 551 | while (!cleanup_once(ttl)) { | 552 | while (!cleanup_once(ttl, stack)) { |
| 552 | if (jiffies != now) | 553 | if (jiffies != now) |
| 553 | break; | 554 | break; |
| 554 | } | 555 | } |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 28a736f3442f..2391b24e8251 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
| @@ -329,7 +329,7 @@ int ip_options_compile(struct net *net, | |||
| 329 | pp_ptr = optptr + 2; | 329 | pp_ptr = optptr + 2; |
| 330 | goto error; | 330 | goto error; |
| 331 | } | 331 | } |
| 332 | if (skb) { | 332 | if (rt) { |
| 333 | memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); | 333 | memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); |
| 334 | opt->is_changed = 1; | 334 | opt->is_changed = 1; |
| 335 | } | 335 | } |
| @@ -371,7 +371,7 @@ int ip_options_compile(struct net *net, | |||
| 371 | goto error; | 371 | goto error; |
| 372 | } | 372 | } |
| 373 | opt->ts = optptr - iph; | 373 | opt->ts = optptr - iph; |
| 374 | if (skb) { | 374 | if (rt) { |
| 375 | memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); | 375 | memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); |
| 376 | timeptr = (__be32*)&optptr[optptr[2]+3]; | 376 | timeptr = (__be32*)&optptr[optptr[2]+3]; |
| 377 | } | 377 | } |
| @@ -603,7 +603,7 @@ int ip_options_rcv_srr(struct sk_buff *skb) | |||
| 603 | unsigned long orefdst; | 603 | unsigned long orefdst; |
| 604 | int err; | 604 | int err; |
| 605 | 605 | ||
| 606 | if (!opt->srr) | 606 | if (!opt->srr || !rt) |
| 607 | return 0; | 607 | return 0; |
| 608 | 608 | ||
| 609 | if (skb->pkt_type != PACKET_HOST) | 609 | if (skb->pkt_type != PACKET_HOST) |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 1a456652086b..321e6e84dbcc 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
| @@ -311,7 +311,6 @@ static struct ctl_table ipv4_table[] = { | |||
| 311 | .mode = 0644, | 311 | .mode = 0644, |
| 312 | .proc_handler = proc_do_large_bitmap, | 312 | .proc_handler = proc_do_large_bitmap, |
| 313 | }, | 313 | }, |
| 314 | #ifdef CONFIG_IP_MULTICAST | ||
| 315 | { | 314 | { |
| 316 | .procname = "igmp_max_memberships", | 315 | .procname = "igmp_max_memberships", |
| 317 | .data = &sysctl_igmp_max_memberships, | 316 | .data = &sysctl_igmp_max_memberships, |
| @@ -319,8 +318,6 @@ static struct ctl_table ipv4_table[] = { | |||
| 319 | .mode = 0644, | 318 | .mode = 0644, |
| 320 | .proc_handler = proc_dointvec | 319 | .proc_handler = proc_dointvec |
| 321 | }, | 320 | }, |
| 322 | |||
| 323 | #endif | ||
| 324 | { | 321 | { |
| 325 | .procname = "igmp_max_msf", | 322 | .procname = "igmp_max_msf", |
| 326 | .data = &sysctl_igmp_max_msf, | 323 | .data = &sysctl_igmp_max_msf, |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 166054650466..f2c5b0fc0f21 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
| @@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk, | |||
| 44 | !sk2->sk_bound_dev_if || | 44 | !sk2->sk_bound_dev_if || |
| 45 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && | 45 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && |
| 46 | (!sk->sk_reuse || !sk2->sk_reuse || | 46 | (!sk->sk_reuse || !sk2->sk_reuse || |
| 47 | ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) && | 47 | sk2->sk_state == TCP_LISTEN) && |
| 48 | ipv6_rcv_saddr_equal(sk, sk2)) | 48 | ipv6_rcv_saddr_equal(sk, sk2)) |
| 49 | break; | 49 | break; |
| 50 | } | 50 | } |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index c9890e25cd4c..cc616974a447 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
| @@ -1297,8 +1297,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
| 1297 | /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ | 1297 | /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ |
| 1298 | if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | | 1298 | if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | |
| 1299 | MSG_NOSIGNAL)) { | 1299 | MSG_NOSIGNAL)) { |
| 1300 | err = -EINVAL; | 1300 | return -EINVAL; |
| 1301 | goto out; | ||
| 1302 | } | 1301 | } |
| 1303 | 1302 | ||
| 1304 | lock_sock(sk); | 1303 | lock_sock(sk); |
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c index 058f1e9a9128..903242111317 100644 --- a/net/llc/llc_input.c +++ b/net/llc/llc_input.c | |||
| @@ -121,8 +121,7 @@ static inline int llc_fixup_skb(struct sk_buff *skb) | |||
| 121 | s32 data_size = ntohs(pdulen) - llc_len; | 121 | s32 data_size = ntohs(pdulen) - llc_len; |
| 122 | 122 | ||
| 123 | if (data_size < 0 || | 123 | if (data_size < 0 || |
| 124 | ((skb_tail_pointer(skb) - | 124 | !pskb_may_pull(skb, data_size)) |
| 125 | (u8 *)pdu) - llc_len) < data_size) | ||
| 126 | return 0; | 125 | return 0; |
| 127 | if (unlikely(pskb_trim_rcsum(skb, data_size))) | 126 | if (unlikely(pskb_trim_rcsum(skb, data_size))) |
| 128 | return 0; | 127 | return 0; |
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index 00a33242e90c..a274300b6a56 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c | |||
| @@ -343,6 +343,10 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb, | |||
| 343 | ipset_adtfn adtfn = set->variant->adt[adt]; | 343 | ipset_adtfn adtfn = set->variant->adt[adt]; |
| 344 | struct ipmac data; | 344 | struct ipmac data; |
| 345 | 345 | ||
| 346 | /* MAC can be src only */ | ||
| 347 | if (!(flags & IPSET_DIM_TWO_SRC)) | ||
| 348 | return 0; | ||
| 349 | |||
| 346 | data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC)); | 350 | data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC)); |
| 347 | if (data.id < map->first_ip || data.id > map->last_ip) | 351 | if (data.id < map->first_ip || data.id > map->last_ip) |
| 348 | return -IPSET_ERR_BITMAP_RANGE; | 352 | return -IPSET_ERR_BITMAP_RANGE; |
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 9152e69a162d..72d1ac611fdc 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c | |||
| @@ -1022,8 +1022,9 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1022 | if (cb->args[1] >= ip_set_max) | 1022 | if (cb->args[1] >= ip_set_max) |
| 1023 | goto out; | 1023 | goto out; |
| 1024 | 1024 | ||
| 1025 | pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]); | ||
| 1026 | max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max; | 1025 | max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max; |
| 1026 | dump_last: | ||
| 1027 | pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]); | ||
| 1027 | for (; cb->args[1] < max; cb->args[1]++) { | 1028 | for (; cb->args[1] < max; cb->args[1]++) { |
| 1028 | index = (ip_set_id_t) cb->args[1]; | 1029 | index = (ip_set_id_t) cb->args[1]; |
| 1029 | set = ip_set_list[index]; | 1030 | set = ip_set_list[index]; |
| @@ -1038,8 +1039,8 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1038 | * so that lists (unions of sets) are dumped last. | 1039 | * so that lists (unions of sets) are dumped last. |
| 1039 | */ | 1040 | */ |
| 1040 | if (cb->args[0] != DUMP_ONE && | 1041 | if (cb->args[0] != DUMP_ONE && |
| 1041 | !((cb->args[0] == DUMP_ALL) ^ | 1042 | ((cb->args[0] == DUMP_ALL) == |
| 1042 | (set->type->features & IPSET_DUMP_LAST))) | 1043 | !!(set->type->features & IPSET_DUMP_LAST))) |
| 1043 | continue; | 1044 | continue; |
| 1044 | pr_debug("List set: %s\n", set->name); | 1045 | pr_debug("List set: %s\n", set->name); |
| 1045 | if (!cb->args[2]) { | 1046 | if (!cb->args[2]) { |
| @@ -1083,6 +1084,12 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1083 | goto release_refcount; | 1084 | goto release_refcount; |
| 1084 | } | 1085 | } |
| 1085 | } | 1086 | } |
| 1087 | /* If we dump all sets, continue with dumping last ones */ | ||
| 1088 | if (cb->args[0] == DUMP_ALL) { | ||
| 1089 | cb->args[0] = DUMP_LAST; | ||
| 1090 | cb->args[1] = 0; | ||
| 1091 | goto dump_last; | ||
| 1092 | } | ||
| 1086 | goto out; | 1093 | goto out; |
| 1087 | 1094 | ||
| 1088 | nla_put_failure: | 1095 | nla_put_failure: |
| @@ -1093,11 +1100,6 @@ release_refcount: | |||
| 1093 | pr_debug("release set %s\n", ip_set_list[index]->name); | 1100 | pr_debug("release set %s\n", ip_set_list[index]->name); |
| 1094 | ip_set_put_byindex(index); | 1101 | ip_set_put_byindex(index); |
| 1095 | } | 1102 | } |
| 1096 | |||
| 1097 | /* If we dump all sets, continue with dumping last ones */ | ||
| 1098 | if (cb->args[0] == DUMP_ALL && cb->args[1] >= max && !cb->args[2]) | ||
| 1099 | cb->args[0] = DUMP_LAST; | ||
| 1100 | |||
| 1101 | out: | 1103 | out: |
| 1102 | if (nlh) { | 1104 | if (nlh) { |
| 1103 | nlmsg_end(skb, nlh); | 1105 | nlmsg_end(skb, nlh); |
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c index 061d48cec137..b3babaed7719 100644 --- a/net/netfilter/xt_set.c +++ b/net/netfilter/xt_set.c | |||
| @@ -81,6 +81,7 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par) | |||
| 81 | if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) { | 81 | if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) { |
| 82 | pr_warning("Protocol error: set match dimension " | 82 | pr_warning("Protocol error: set match dimension " |
| 83 | "is over the limit!\n"); | 83 | "is over the limit!\n"); |
| 84 | ip_set_nfnl_put(info->match_set.index); | ||
| 84 | return -ERANGE; | 85 | return -ERANGE; |
| 85 | } | 86 | } |
| 86 | 87 | ||
| @@ -135,6 +136,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par) | |||
| 135 | if (index == IPSET_INVALID_ID) { | 136 | if (index == IPSET_INVALID_ID) { |
| 136 | pr_warning("Cannot find del_set index %u as target\n", | 137 | pr_warning("Cannot find del_set index %u as target\n", |
| 137 | info->del_set.index); | 138 | info->del_set.index); |
| 139 | if (info->add_set.index != IPSET_INVALID_ID) | ||
| 140 | ip_set_nfnl_put(info->add_set.index); | ||
| 138 | return -ENOENT; | 141 | return -ENOENT; |
| 139 | } | 142 | } |
| 140 | } | 143 | } |
| @@ -142,6 +145,10 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par) | |||
| 142 | info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) { | 145 | info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) { |
| 143 | pr_warning("Protocol error: SET target dimension " | 146 | pr_warning("Protocol error: SET target dimension " |
| 144 | "is over the limit!\n"); | 147 | "is over the limit!\n"); |
| 148 | if (info->add_set.index != IPSET_INVALID_ID) | ||
| 149 | ip_set_nfnl_put(info->add_set.index); | ||
| 150 | if (info->del_set.index != IPSET_INVALID_ID) | ||
| 151 | ip_set_nfnl_put(info->del_set.index); | ||
| 145 | return -ERANGE; | 152 | return -ERANGE; |
| 146 | } | 153 | } |
| 147 | 154 | ||
| @@ -192,6 +199,7 @@ set_match_checkentry(const struct xt_mtchk_param *par) | |||
| 192 | if (info->match_set.dim > IPSET_DIM_MAX) { | 199 | if (info->match_set.dim > IPSET_DIM_MAX) { |
| 193 | pr_warning("Protocol error: set match dimension " | 200 | pr_warning("Protocol error: set match dimension " |
| 194 | "is over the limit!\n"); | 201 | "is over the limit!\n"); |
| 202 | ip_set_nfnl_put(info->match_set.index); | ||
| 195 | return -ERANGE; | 203 | return -ERANGE; |
| 196 | } | 204 | } |
| 197 | 205 | ||
| @@ -219,7 +227,7 @@ set_target(struct sk_buff *skb, const struct xt_action_param *par) | |||
| 219 | if (info->del_set.index != IPSET_INVALID_ID) | 227 | if (info->del_set.index != IPSET_INVALID_ID) |
| 220 | ip_set_del(info->del_set.index, | 228 | ip_set_del(info->del_set.index, |
| 221 | skb, par->family, | 229 | skb, par->family, |
| 222 | info->add_set.dim, | 230 | info->del_set.dim, |
| 223 | info->del_set.flags); | 231 | info->del_set.flags); |
| 224 | 232 | ||
| 225 | return XT_CONTINUE; | 233 | return XT_CONTINUE; |
| @@ -245,13 +253,19 @@ set_target_checkentry(const struct xt_tgchk_param *par) | |||
| 245 | if (index == IPSET_INVALID_ID) { | 253 | if (index == IPSET_INVALID_ID) { |
| 246 | pr_warning("Cannot find del_set index %u as target\n", | 254 | pr_warning("Cannot find del_set index %u as target\n", |
| 247 | info->del_set.index); | 255 | info->del_set.index); |
| 256 | if (info->add_set.index != IPSET_INVALID_ID) | ||
| 257 | ip_set_nfnl_put(info->add_set.index); | ||
| 248 | return -ENOENT; | 258 | return -ENOENT; |
| 249 | } | 259 | } |
| 250 | } | 260 | } |
| 251 | if (info->add_set.dim > IPSET_DIM_MAX || | 261 | if (info->add_set.dim > IPSET_DIM_MAX || |
| 252 | info->del_set.flags > IPSET_DIM_MAX) { | 262 | info->del_set.dim > IPSET_DIM_MAX) { |
| 253 | pr_warning("Protocol error: SET target dimension " | 263 | pr_warning("Protocol error: SET target dimension " |
| 254 | "is over the limit!\n"); | 264 | "is over the limit!\n"); |
| 265 | if (info->add_set.index != IPSET_INVALID_ID) | ||
| 266 | ip_set_nfnl_put(info->add_set.index); | ||
| 267 | if (info->del_set.index != IPSET_INVALID_ID) | ||
| 268 | ip_set_nfnl_put(info->del_set.index); | ||
| 255 | return -ERANGE; | 269 | return -ERANGE; |
| 256 | } | 270 | } |
| 257 | 271 | ||
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 0698cad61763..1a21c571aa03 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
| @@ -569,6 +569,8 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc, | |||
| 569 | sctp_assoc_set_primary(asoc, transport); | 569 | sctp_assoc_set_primary(asoc, transport); |
| 570 | if (asoc->peer.active_path == peer) | 570 | if (asoc->peer.active_path == peer) |
| 571 | asoc->peer.active_path = transport; | 571 | asoc->peer.active_path = transport; |
| 572 | if (asoc->peer.retran_path == peer) | ||
| 573 | asoc->peer.retran_path = transport; | ||
| 572 | if (asoc->peer.last_data_from == peer) | 574 | if (asoc->peer.last_data_from == peer) |
| 573 | asoc->peer.last_data_from = transport; | 575 | asoc->peer.last_data_from = transport; |
| 574 | 576 | ||
| @@ -1323,6 +1325,8 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc) | |||
| 1323 | 1325 | ||
| 1324 | if (t) | 1326 | if (t) |
| 1325 | asoc->peer.retran_path = t; | 1327 | asoc->peer.retran_path = t; |
| 1328 | else | ||
| 1329 | t = asoc->peer.retran_path; | ||
| 1326 | 1330 | ||
| 1327 | SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" | 1331 | SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" |
| 1328 | " %p addr: ", | 1332 | " %p addr: ", |
