diff options
40 files changed, 1031 insertions, 417 deletions
diff --git a/Documentation/devicetree/bindings/video/simple-framebuffer.txt b/Documentation/devicetree/bindings/video/simple-framebuffer.txt new file mode 100644 index 000000000000..3ea460583111 --- /dev/null +++ b/Documentation/devicetree/bindings/video/simple-framebuffer.txt | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | Simple Framebuffer | ||
| 2 | |||
| 3 | A simple frame-buffer describes a raw memory region that may be rendered to, | ||
| 4 | with the assumption that the display hardware has already been set up to scan | ||
| 5 | out from that buffer. | ||
| 6 | |||
| 7 | Required properties: | ||
| 8 | - compatible: "simple-framebuffer" | ||
| 9 | - reg: Should contain the location and size of the framebuffer memory. | ||
| 10 | - width: The width of the framebuffer in pixels. | ||
| 11 | - height: The height of the framebuffer in pixels. | ||
| 12 | - stride: The number of bytes in each line of the framebuffer. | ||
| 13 | - format: The format of the framebuffer surface. Valid values are: | ||
| 14 | - r5g6b5 (16-bit pixels, d[15:11]=r, d[10:5]=g, d[4:0]=b). | ||
| 15 | |||
| 16 | Example: | ||
| 17 | |||
| 18 | framebuffer { | ||
| 19 | compatible = "simple-framebuffer"; | ||
| 20 | reg = <0x1d385000 (1600 * 1200 * 2)>; | ||
| 21 | width = <1600>; | ||
| 22 | height = <1200>; | ||
| 23 | stride = <(1600 * 2)>; | ||
| 24 | format = "r5g6b5"; | ||
| 25 | }; | ||
diff --git a/Documentation/rapidio/rapidio.txt b/Documentation/rapidio/rapidio.txt index c75694b35d08..a9c16c979da2 100644 --- a/Documentation/rapidio/rapidio.txt +++ b/Documentation/rapidio/rapidio.txt | |||
| @@ -79,20 +79,63 @@ master port that is used to communicate with devices within the network. | |||
| 79 | In order to initialize the RapidIO subsystem, a platform must initialize and | 79 | In order to initialize the RapidIO subsystem, a platform must initialize and |
| 80 | register at least one master port within the RapidIO network. To register mport | 80 | register at least one master port within the RapidIO network. To register mport |
| 81 | within the subsystem controller driver initialization code calls function | 81 | within the subsystem controller driver initialization code calls function |
| 82 | rio_register_mport() for each available master port. After all active master | 82 | rio_register_mport() for each available master port. |
| 83 | ports are registered with a RapidIO subsystem, the rio_init_mports() routine | ||
| 84 | is called to perform enumeration and discovery. | ||
| 85 | 83 | ||
| 86 | In the current PowerPC-based implementation a subsys_initcall() is specified to | 84 | RapidIO subsystem uses subsys_initcall() or device_initcall() to perform |
| 87 | perform controller initialization and mport registration. At the end it directly | 85 | controller initialization (depending on controller device type). |
| 88 | calls rio_init_mports() to execute RapidIO enumeration and discovery. | 86 | |
| 87 | After all active master ports are registered with a RapidIO subsystem, | ||
| 88 | an enumeration and/or discovery routine may be called automatically or | ||
| 89 | by user-space command. | ||
| 89 | 90 | ||
| 90 | 4. Enumeration and Discovery | 91 | 4. Enumeration and Discovery |
| 91 | ---------------------------- | 92 | ---------------------------- |
| 92 | 93 | ||
| 93 | When rio_init_mports() is called it scans a list of registered master ports and | 94 | 4.1 Overview |
| 94 | calls an enumeration or discovery routine depending on the configured role of a | 95 | ------------ |
| 95 | master port: host or agent. | 96 | |
| 97 | RapidIO subsystem configuration options allow users to specify enumeration and | ||
| 98 | discovery methods as statically linked components or loadable modules. | ||
| 99 | An enumeration/discovery method implementation and available input parameters | ||
| 100 | define how any given method can be attached to available RapidIO mports: | ||
| 101 | simply to all available mports OR individually to the specified mport device. | ||
| 102 | |||
| 103 | Depending on selected enumeration/discovery build configuration, there are | ||
| 104 | several methods to initiate an enumeration and/or discovery process: | ||
| 105 | |||
| 106 | (a) Statically linked enumeration and discovery process can be started | ||
| 107 | automatically during kernel initialization time using corresponding module | ||
| 108 | parameters. This was the original method used since introduction of RapidIO | ||
| 109 | subsystem. Now this method relies on enumerator module parameter which is | ||
| 110 | 'rio-scan.scan' for existing basic enumeration/discovery method. | ||
| 111 | When automatic start of enumeration/discovery is used a user has to ensure | ||
| 112 | that all discovering endpoints are started before the enumerating endpoint | ||
| 113 | and are waiting for enumeration to be completed. | ||
| 114 | Configuration option CONFIG_RAPIDIO_DISC_TIMEOUT defines time that discovering | ||
| 115 | endpoint waits for enumeration to be completed. If the specified timeout | ||
| 116 | expires the discovery process is terminated without obtaining RapidIO network | ||
| 117 | information. NOTE: a timed out discovery process may be restarted later using | ||
| 118 | a user-space command as it is described later if the given endpoint was | ||
| 119 | enumerated successfully. | ||
| 120 | |||
| 121 | (b) Statically linked enumeration and discovery process can be started by | ||
| 122 | a command from user space. This initiation method provides more flexibility | ||
| 123 | for a system startup compared to the option (a) above. After all participating | ||
| 124 | endpoints have been successfully booted, an enumeration process shall be | ||
| 125 | started first by issuing a user-space command, after an enumeration is | ||
| 126 | completed a discovery process can be started on all remaining endpoints. | ||
| 127 | |||
| 128 | (c) Modular enumeration and discovery process can be started by a command from | ||
| 129 | user space. After an enumeration/discovery module is loaded, a network scan | ||
| 130 | process can be started by issuing a user-space command. | ||
| 131 | Similar to the option (b) above, an enumerator has to be started first. | ||
| 132 | |||
| 133 | (d) Modular enumeration and discovery process can be started by a module | ||
| 134 | initialization routine. In this case an enumerating module shall be loaded | ||
| 135 | first. | ||
| 136 | |||
| 137 | When a network scan process is started it calls an enumeration or discovery | ||
| 138 | routine depending on the configured role of a master port: host or agent. | ||
| 96 | 139 | ||
| 97 | Enumeration is performed by a master port if it is configured as a host port by | 140 | Enumeration is performed by a master port if it is configured as a host port by |
| 98 | assigning a host device ID greater than or equal to zero. A host device ID is | 141 | assigning a host device ID greater than or equal to zero. A host device ID is |
| @@ -104,8 +147,58 @@ for it. | |||
| 104 | The enumeration and discovery routines use RapidIO maintenance transactions | 147 | The enumeration and discovery routines use RapidIO maintenance transactions |
| 105 | to access the configuration space of devices. | 148 | to access the configuration space of devices. |
| 106 | 149 | ||
| 107 | The enumeration process is implemented according to the enumeration algorithm | 150 | 4.2 Automatic Start of Enumeration and Discovery |
| 108 | outlined in the RapidIO Interconnect Specification: Annex I [1]. | 151 | ------------------------------------------------ |
| 152 | |||
| 153 | Automatic enumeration/discovery start method is applicable only to built-in | ||
| 154 | enumeration/discovery RapidIO configuration selection. To enable automatic | ||
| 155 | enumeration/discovery start by existing basic enumerator method set use boot | ||
| 156 | command line parameter "rio-scan.scan=1". | ||
| 157 | |||
| 158 | This configuration requires synchronized start of all RapidIO endpoints that | ||
| 159 | form a network which will be enumerated/discovered. Discovering endpoints have | ||
| 160 | to be started before an enumeration starts to ensure that all RapidIO | ||
| 161 | controllers have been initialized and are ready to be discovered. Configuration | ||
| 162 | parameter CONFIG_RAPIDIO_DISC_TIMEOUT defines time (in seconds) which | ||
| 163 | a discovering endpoint will wait for enumeration to be completed. | ||
| 164 | |||
| 165 | When automatic enumeration/discovery start is selected, basic method's | ||
| 166 | initialization routine calls rio_init_mports() to perform enumeration or | ||
| 167 | discovery for all known mport devices. | ||
| 168 | |||
| 169 | Depending on RapidIO network size and configuration this automatic | ||
| 170 | enumeration/discovery start method may be difficult to use due to the | ||
| 171 | requirement for synchronized start of all endpoints. | ||
| 172 | |||
| 173 | 4.3 User-space Start of Enumeration and Discovery | ||
| 174 | ------------------------------------------------- | ||
| 175 | |||
| 176 | User-space start of enumeration and discovery can be used with built-in and | ||
| 177 | modular build configurations. For user-space controlled start RapidIO subsystem | ||
| 178 | creates the sysfs write-only attribute file '/sys/bus/rapidio/scan'. To initiate | ||
| 179 | an enumeration or discovery process on specific mport device, a user needs to | ||
| 180 | write mport_ID (not RapidIO destination ID) into that file. The mport_ID is a | ||
| 181 | sequential number (0 ... RIO_MAX_MPORTS) assigned during mport device | ||
| 182 | registration. For example for machine with single RapidIO controller, mport_ID | ||
| 183 | for that controller always will be 0. | ||
| 184 | |||
| 185 | To initiate RapidIO enumeration/discovery on all available mports a user may | ||
| 186 | write '-1' (or RIO_MPORT_ANY) into the scan attribute file. | ||
| 187 | |||
| 188 | 4.4 Basic Enumeration Method | ||
| 189 | ---------------------------- | ||
| 190 | |||
| 191 | This is an original enumeration/discovery method which is available since | ||
| 192 | first release of RapidIO subsystem code. The enumeration process is | ||
| 193 | implemented according to the enumeration algorithm outlined in the RapidIO | ||
| 194 | Interconnect Specification: Annex I [1]. | ||
| 195 | |||
| 196 | This method can be configured as statically linked or loadable module. | ||
| 197 | The method's single parameter "scan" allows to trigger the enumeration/discovery | ||
| 198 | process from module initialization routine. | ||
| 199 | |||
| 200 | This enumeration/discovery method can be started only once and does not support | ||
| 201 | unloading if it is built as a module. | ||
| 109 | 202 | ||
| 110 | The enumeration process traverses the network using a recursive depth-first | 203 | The enumeration process traverses the network using a recursive depth-first |
| 111 | algorithm. When a new device is found, the enumerator takes ownership of that | 204 | algorithm. When a new device is found, the enumerator takes ownership of that |
| @@ -160,6 +253,19 @@ time period. If this wait time period expires before enumeration is completed, | |||
| 160 | an agent skips RapidIO discovery and continues with remaining kernel | 253 | an agent skips RapidIO discovery and continues with remaining kernel |
| 161 | initialization. | 254 | initialization. |
| 162 | 255 | ||
| 256 | 4.5 Adding New Enumeration/Discovery Method | ||
| 257 | ------------------------------------------- | ||
| 258 | |||
| 259 | RapidIO subsystem code organization allows addition of new enumeration/discovery | ||
| 260 | methods as new configuration options without significant impact to to the core | ||
| 261 | RapidIO code. | ||
| 262 | |||
| 263 | A new enumeration/discovery method has to be attached to one or more mport | ||
| 264 | devices before an enumeration/discovery process can be started. Normally, | ||
| 265 | method's module initialization routine calls rio_register_scan() to attach | ||
| 266 | an enumerator to a specified mport device (or devices). The basic enumerator | ||
| 267 | implementation demonstrates this process. | ||
| 268 | |||
| 163 | 5. References | 269 | 5. References |
| 164 | ------------- | 270 | ------------- |
| 165 | 271 | ||
diff --git a/Documentation/rapidio/sysfs.txt b/Documentation/rapidio/sysfs.txt index 97f71ce575d6..19878179da4c 100644 --- a/Documentation/rapidio/sysfs.txt +++ b/Documentation/rapidio/sysfs.txt | |||
| @@ -88,3 +88,20 @@ that exports additional attributes. | |||
| 88 | 88 | ||
| 89 | IDT_GEN2: | 89 | IDT_GEN2: |
| 90 | errlog - reads contents of device error log until it is empty. | 90 | errlog - reads contents of device error log until it is empty. |
| 91 | |||
| 92 | |||
| 93 | 5. RapidIO Bus Attributes | ||
| 94 | ------------------------- | ||
| 95 | |||
| 96 | RapidIO bus subdirectory /sys/bus/rapidio implements the following bus-specific | ||
| 97 | attribute: | ||
| 98 | |||
| 99 | scan - allows to trigger enumeration discovery process from user space. This | ||
| 100 | is a write-only attribute. To initiate an enumeration or discovery | ||
| 101 | process on specific mport device, a user needs to write mport_ID (not | ||
| 102 | RapidIO destination ID) into this file. The mport_ID is a sequential | ||
| 103 | number (0 ... RIO_MAX_MPORTS) assigned to the mport device. | ||
| 104 | For example, for a machine with a single RapidIO controller, mport_ID | ||
| 105 | for that controller always will be 0. | ||
| 106 | To initiate RapidIO enumeration/discovery on all available mports | ||
| 107 | a user must write '-1' (or RIO_MPORT_ANY) into this attribute file. | ||
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index f1a29f8e9d33..9bf4371755f2 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
| @@ -117,13 +117,13 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) | |||
| 117 | 117 | ||
| 118 | spin_lock(&brd->brd_lock); | 118 | spin_lock(&brd->brd_lock); |
| 119 | idx = sector >> PAGE_SECTORS_SHIFT; | 119 | idx = sector >> PAGE_SECTORS_SHIFT; |
| 120 | page->index = idx; | ||
| 120 | if (radix_tree_insert(&brd->brd_pages, idx, page)) { | 121 | if (radix_tree_insert(&brd->brd_pages, idx, page)) { |
| 121 | __free_page(page); | 122 | __free_page(page); |
| 122 | page = radix_tree_lookup(&brd->brd_pages, idx); | 123 | page = radix_tree_lookup(&brd->brd_pages, idx); |
| 123 | BUG_ON(!page); | 124 | BUG_ON(!page); |
| 124 | BUG_ON(page->index != idx); | 125 | BUG_ON(page->index != idx); |
| 125 | } else | 126 | } |
| 126 | page->index = idx; | ||
| 127 | spin_unlock(&brd->brd_lock); | 127 | spin_unlock(&brd->brd_lock); |
| 128 | 128 | ||
| 129 | radix_tree_preload_end(); | 129 | radix_tree_preload_end(); |
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index f8ef15f37c5e..3fd130fdfbc1 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c | |||
| @@ -1160,8 +1160,7 @@ static int ace_probe(struct platform_device *dev) | |||
| 1160 | dev_dbg(&dev->dev, "ace_probe(%p)\n", dev); | 1160 | dev_dbg(&dev->dev, "ace_probe(%p)\n", dev); |
| 1161 | 1161 | ||
| 1162 | /* device id and bus width */ | 1162 | /* device id and bus width */ |
| 1163 | of_property_read_u32(dev->dev.of_node, "port-number", &id); | 1163 | if (of_property_read_u32(dev->dev.of_node, "port-number", &id)) |
| 1164 | if (id < 0) | ||
| 1165 | id = 0; | 1164 | id = 0; |
| 1166 | if (of_find_property(dev->dev.of_node, "8-bit", NULL)) | 1165 | if (of_find_property(dev->dev.of_node, "8-bit", NULL)) |
| 1167 | bus_width = ACE_BUS_WIDTH_8; | 1166 | bus_width = ACE_BUS_WIDTH_8; |
diff --git a/drivers/char/random.c b/drivers/char/random.c index cd9a6211dcad..35487e8ded59 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
| @@ -865,16 +865,24 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, | |||
| 865 | if (r->entropy_count / 8 < min + reserved) { | 865 | if (r->entropy_count / 8 < min + reserved) { |
| 866 | nbytes = 0; | 866 | nbytes = 0; |
| 867 | } else { | 867 | } else { |
| 868 | int entropy_count, orig; | ||
| 869 | retry: | ||
| 870 | entropy_count = orig = ACCESS_ONCE(r->entropy_count); | ||
| 868 | /* If limited, never pull more than available */ | 871 | /* If limited, never pull more than available */ |
| 869 | if (r->limit && nbytes + reserved >= r->entropy_count / 8) | 872 | if (r->limit && nbytes + reserved >= entropy_count / 8) |
| 870 | nbytes = r->entropy_count/8 - reserved; | 873 | nbytes = entropy_count/8 - reserved; |
| 871 | 874 | ||
| 872 | if (r->entropy_count / 8 >= nbytes + reserved) | 875 | if (entropy_count / 8 >= nbytes + reserved) { |
| 873 | r->entropy_count -= nbytes*8; | 876 | entropy_count -= nbytes*8; |
| 874 | else | 877 | if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) |
| 875 | r->entropy_count = reserved; | 878 | goto retry; |
| 879 | } else { | ||
| 880 | entropy_count = reserved; | ||
| 881 | if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) | ||
| 882 | goto retry; | ||
| 883 | } | ||
| 876 | 884 | ||
| 877 | if (r->entropy_count < random_write_wakeup_thresh) | 885 | if (entropy_count < random_write_wakeup_thresh) |
| 878 | wakeup_write = 1; | 886 | wakeup_write = 1; |
| 879 | } | 887 | } |
| 880 | 888 | ||
| @@ -957,10 +965,23 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, | |||
| 957 | { | 965 | { |
| 958 | ssize_t ret = 0, i; | 966 | ssize_t ret = 0, i; |
| 959 | __u8 tmp[EXTRACT_SIZE]; | 967 | __u8 tmp[EXTRACT_SIZE]; |
| 968 | unsigned long flags; | ||
| 960 | 969 | ||
| 961 | /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */ | 970 | /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */ |
| 962 | if (fips_enabled && !r->last_data_init) | 971 | if (fips_enabled) { |
| 963 | nbytes += EXTRACT_SIZE; | 972 | spin_lock_irqsave(&r->lock, flags); |
| 973 | if (!r->last_data_init) { | ||
| 974 | r->last_data_init = true; | ||
| 975 | spin_unlock_irqrestore(&r->lock, flags); | ||
| 976 | trace_extract_entropy(r->name, EXTRACT_SIZE, | ||
| 977 | r->entropy_count, _RET_IP_); | ||
| 978 | xfer_secondary_pool(r, EXTRACT_SIZE); | ||
| 979 | extract_buf(r, tmp); | ||
| 980 | spin_lock_irqsave(&r->lock, flags); | ||
| 981 | memcpy(r->last_data, tmp, EXTRACT_SIZE); | ||
| 982 | } | ||
| 983 | spin_unlock_irqrestore(&r->lock, flags); | ||
| 984 | } | ||
| 964 | 985 | ||
| 965 | trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_); | 986 | trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_); |
| 966 | xfer_secondary_pool(r, nbytes); | 987 | xfer_secondary_pool(r, nbytes); |
| @@ -970,19 +991,6 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, | |||
| 970 | extract_buf(r, tmp); | 991 | extract_buf(r, tmp); |
| 971 | 992 | ||
| 972 | if (fips_enabled) { | 993 | if (fips_enabled) { |
| 973 | unsigned long flags; | ||
| 974 | |||
| 975 | |||
| 976 | /* prime last_data value if need be, per fips 140-2 */ | ||
| 977 | if (!r->last_data_init) { | ||
| 978 | spin_lock_irqsave(&r->lock, flags); | ||
| 979 | memcpy(r->last_data, tmp, EXTRACT_SIZE); | ||
| 980 | r->last_data_init = true; | ||
| 981 | nbytes -= EXTRACT_SIZE; | ||
| 982 | spin_unlock_irqrestore(&r->lock, flags); | ||
| 983 | extract_buf(r, tmp); | ||
| 984 | } | ||
| 985 | |||
| 986 | spin_lock_irqsave(&r->lock, flags); | 994 | spin_lock_irqsave(&r->lock, flags); |
| 987 | if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) | 995 | if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) |
| 988 | panic("Hardware RNG duplicated output!\n"); | 996 | panic("Hardware RNG duplicated output!\n"); |
diff --git a/drivers/leds/leds-ot200.c b/drivers/leds/leds-ot200.c index ee14662ed5ce..98cae529373f 100644 --- a/drivers/leds/leds-ot200.c +++ b/drivers/leds/leds-ot200.c | |||
| @@ -47,37 +47,37 @@ static struct ot200_led leds[] = { | |||
| 47 | { | 47 | { |
| 48 | .name = "led_1", | 48 | .name = "led_1", |
| 49 | .port = 0x49, | 49 | .port = 0x49, |
| 50 | .mask = BIT(7), | 50 | .mask = BIT(6), |
| 51 | }, | 51 | }, |
| 52 | { | 52 | { |
| 53 | .name = "led_2", | 53 | .name = "led_2", |
| 54 | .port = 0x49, | 54 | .port = 0x49, |
| 55 | .mask = BIT(6), | 55 | .mask = BIT(5), |
| 56 | }, | 56 | }, |
| 57 | { | 57 | { |
| 58 | .name = "led_3", | 58 | .name = "led_3", |
| 59 | .port = 0x49, | 59 | .port = 0x49, |
| 60 | .mask = BIT(5), | 60 | .mask = BIT(4), |
| 61 | }, | 61 | }, |
| 62 | { | 62 | { |
| 63 | .name = "led_4", | 63 | .name = "led_4", |
| 64 | .port = 0x49, | 64 | .port = 0x49, |
| 65 | .mask = BIT(4), | 65 | .mask = BIT(3), |
| 66 | }, | 66 | }, |
| 67 | { | 67 | { |
| 68 | .name = "led_5", | 68 | .name = "led_5", |
| 69 | .port = 0x49, | 69 | .port = 0x49, |
| 70 | .mask = BIT(3), | 70 | .mask = BIT(2), |
| 71 | }, | 71 | }, |
| 72 | { | 72 | { |
| 73 | .name = "led_6", | 73 | .name = "led_6", |
| 74 | .port = 0x49, | 74 | .port = 0x49, |
| 75 | .mask = BIT(2), | 75 | .mask = BIT(1), |
| 76 | }, | 76 | }, |
| 77 | { | 77 | { |
| 78 | .name = "led_7", | 78 | .name = "led_7", |
| 79 | .port = 0x49, | 79 | .port = 0x49, |
| 80 | .mask = BIT(1), | 80 | .mask = BIT(0), |
| 81 | } | 81 | } |
| 82 | }; | 82 | }; |
| 83 | 83 | ||
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig index 6194d35ebb97..5ab056494bbe 100644 --- a/drivers/rapidio/Kconfig +++ b/drivers/rapidio/Kconfig | |||
| @@ -47,4 +47,24 @@ config RAPIDIO_DEBUG | |||
| 47 | 47 | ||
| 48 | If you are unsure about this, say N here. | 48 | If you are unsure about this, say N here. |
| 49 | 49 | ||
| 50 | choice | ||
| 51 | prompt "Enumeration method" | ||
| 52 | depends on RAPIDIO | ||
| 53 | default RAPIDIO_ENUM_BASIC | ||
| 54 | help | ||
| 55 | There are different enumeration and discovery mechanisms offered | ||
| 56 | for RapidIO subsystem. You may select single built-in method or | ||
| 57 | or any number of methods to be built as modules. | ||
| 58 | Selecting a built-in method disables use of loadable methods. | ||
| 59 | |||
| 60 | If unsure, select Basic built-in. | ||
| 61 | |||
| 62 | config RAPIDIO_ENUM_BASIC | ||
| 63 | tristate "Basic" | ||
| 64 | help | ||
| 65 | This option includes basic RapidIO fabric enumeration and discovery | ||
| 66 | mechanism similar to one described in RapidIO specification Annex 1. | ||
| 67 | |||
| 68 | endchoice | ||
| 69 | |||
| 50 | source "drivers/rapidio/switches/Kconfig" | 70 | source "drivers/rapidio/switches/Kconfig" |
diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile index ec3fb8121004..3036702ffe8b 100644 --- a/drivers/rapidio/Makefile +++ b/drivers/rapidio/Makefile | |||
| @@ -1,7 +1,8 @@ | |||
| 1 | # | 1 | # |
| 2 | # Makefile for RapidIO interconnect services | 2 | # Makefile for RapidIO interconnect services |
| 3 | # | 3 | # |
| 4 | obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o | 4 | obj-y += rio.o rio-access.o rio-driver.o rio-sysfs.o |
| 5 | obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o | ||
| 5 | 6 | ||
| 6 | obj-$(CONFIG_RAPIDIO) += switches/ | 7 | obj-$(CONFIG_RAPIDIO) += switches/ |
| 7 | obj-$(CONFIG_RAPIDIO) += devices/ | 8 | obj-$(CONFIG_RAPIDIO) += devices/ |
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index 6faba406b6e9..a8b2c23a7ef4 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c | |||
| @@ -471,6 +471,10 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr) | |||
| 471 | u32 intval; | 471 | u32 intval; |
| 472 | u32 ch_inte; | 472 | u32 ch_inte; |
| 473 | 473 | ||
| 474 | /* For MSI mode disable all device-level interrupts */ | ||
| 475 | if (priv->flags & TSI721_USING_MSI) | ||
| 476 | iowrite32(0, priv->regs + TSI721_DEV_INTE); | ||
| 477 | |||
| 474 | dev_int = ioread32(priv->regs + TSI721_DEV_INT); | 478 | dev_int = ioread32(priv->regs + TSI721_DEV_INT); |
| 475 | if (!dev_int) | 479 | if (!dev_int) |
| 476 | return IRQ_NONE; | 480 | return IRQ_NONE; |
| @@ -560,6 +564,14 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr) | |||
| 560 | } | 564 | } |
| 561 | } | 565 | } |
| 562 | #endif | 566 | #endif |
| 567 | |||
| 568 | /* For MSI mode re-enable device-level interrupts */ | ||
| 569 | if (priv->flags & TSI721_USING_MSI) { | ||
| 570 | dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | | ||
| 571 | TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH; | ||
| 572 | iowrite32(dev_int, priv->regs + TSI721_DEV_INTE); | ||
| 573 | } | ||
| 574 | |||
| 563 | return IRQ_HANDLED; | 575 | return IRQ_HANDLED; |
| 564 | } | 576 | } |
| 565 | 577 | ||
diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c index 0f4a53bdaa3c..a0c875563d76 100644 --- a/drivers/rapidio/rio-driver.c +++ b/drivers/rapidio/rio-driver.c | |||
| @@ -164,6 +164,13 @@ void rio_unregister_driver(struct rio_driver *rdrv) | |||
| 164 | driver_unregister(&rdrv->driver); | 164 | driver_unregister(&rdrv->driver); |
| 165 | } | 165 | } |
| 166 | 166 | ||
| 167 | void rio_attach_device(struct rio_dev *rdev) | ||
| 168 | { | ||
| 169 | rdev->dev.bus = &rio_bus_type; | ||
| 170 | rdev->dev.parent = &rio_bus; | ||
| 171 | } | ||
| 172 | EXPORT_SYMBOL_GPL(rio_attach_device); | ||
| 173 | |||
| 167 | /** | 174 | /** |
| 168 | * rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure | 175 | * rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure |
| 169 | * @dev: the standard device structure to match against | 176 | * @dev: the standard device structure to match against |
| @@ -200,6 +207,7 @@ struct bus_type rio_bus_type = { | |||
| 200 | .name = "rapidio", | 207 | .name = "rapidio", |
| 201 | .match = rio_match_bus, | 208 | .match = rio_match_bus, |
| 202 | .dev_attrs = rio_dev_attrs, | 209 | .dev_attrs = rio_dev_attrs, |
| 210 | .bus_attrs = rio_bus_attrs, | ||
| 203 | .probe = rio_device_probe, | 211 | .probe = rio_device_probe, |
| 204 | .remove = rio_device_remove, | 212 | .remove = rio_device_remove, |
| 205 | }; | 213 | }; |
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index a965acd3c0e4..4c15dbf81087 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c | |||
| @@ -37,12 +37,8 @@ | |||
| 37 | 37 | ||
| 38 | #include "rio.h" | 38 | #include "rio.h" |
| 39 | 39 | ||
| 40 | LIST_HEAD(rio_devices); | ||
| 41 | |||
| 42 | static void rio_init_em(struct rio_dev *rdev); | 40 | static void rio_init_em(struct rio_dev *rdev); |
| 43 | 41 | ||
| 44 | DEFINE_SPINLOCK(rio_global_list_lock); | ||
| 45 | |||
| 46 | static int next_destid = 0; | 42 | static int next_destid = 0; |
| 47 | static int next_comptag = 1; | 43 | static int next_comptag = 1; |
| 48 | 44 | ||
| @@ -327,127 +323,6 @@ static int rio_is_switch(struct rio_dev *rdev) | |||
| 327 | } | 323 | } |
| 328 | 324 | ||
| 329 | /** | 325 | /** |
| 330 | * rio_switch_init - Sets switch operations for a particular vendor switch | ||
| 331 | * @rdev: RIO device | ||
| 332 | * @do_enum: Enumeration/Discovery mode flag | ||
| 333 | * | ||
| 334 | * Searches the RIO switch ops table for known switch types. If the vid | ||
| 335 | * and did match a switch table entry, then call switch initialization | ||
| 336 | * routine to setup switch-specific routines. | ||
| 337 | */ | ||
| 338 | static void rio_switch_init(struct rio_dev *rdev, int do_enum) | ||
| 339 | { | ||
| 340 | struct rio_switch_ops *cur = __start_rio_switch_ops; | ||
| 341 | struct rio_switch_ops *end = __end_rio_switch_ops; | ||
| 342 | |||
| 343 | while (cur < end) { | ||
| 344 | if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) { | ||
| 345 | pr_debug("RIO: calling init routine for %s\n", | ||
| 346 | rio_name(rdev)); | ||
| 347 | cur->init_hook(rdev, do_enum); | ||
| 348 | break; | ||
| 349 | } | ||
| 350 | cur++; | ||
| 351 | } | ||
| 352 | |||
| 353 | if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) { | ||
| 354 | pr_debug("RIO: adding STD routing ops for %s\n", | ||
| 355 | rio_name(rdev)); | ||
| 356 | rdev->rswitch->add_entry = rio_std_route_add_entry; | ||
| 357 | rdev->rswitch->get_entry = rio_std_route_get_entry; | ||
| 358 | rdev->rswitch->clr_table = rio_std_route_clr_table; | ||
| 359 | } | ||
| 360 | |||
| 361 | if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry) | ||
| 362 | printk(KERN_ERR "RIO: missing routing ops for %s\n", | ||
| 363 | rio_name(rdev)); | ||
| 364 | } | ||
| 365 | |||
| 366 | /** | ||
| 367 | * rio_add_device- Adds a RIO device to the device model | ||
| 368 | * @rdev: RIO device | ||
| 369 | * | ||
| 370 | * Adds the RIO device to the global device list and adds the RIO | ||
| 371 | * device to the RIO device list. Creates the generic sysfs nodes | ||
| 372 | * for an RIO device. | ||
| 373 | */ | ||
| 374 | static int rio_add_device(struct rio_dev *rdev) | ||
| 375 | { | ||
| 376 | int err; | ||
| 377 | |||
| 378 | err = device_add(&rdev->dev); | ||
| 379 | if (err) | ||
| 380 | return err; | ||
| 381 | |||
| 382 | spin_lock(&rio_global_list_lock); | ||
| 383 | list_add_tail(&rdev->global_list, &rio_devices); | ||
| 384 | spin_unlock(&rio_global_list_lock); | ||
| 385 | |||
| 386 | rio_create_sysfs_dev_files(rdev); | ||
| 387 | |||
| 388 | return 0; | ||
| 389 | } | ||
| 390 | |||
| 391 | /** | ||
| 392 | * rio_enable_rx_tx_port - enable input receiver and output transmitter of | ||
| 393 | * given port | ||
| 394 | * @port: Master port associated with the RIO network | ||
| 395 | * @local: local=1 select local port otherwise a far device is reached | ||
| 396 | * @destid: Destination ID of the device to check host bit | ||
| 397 | * @hopcount: Number of hops to reach the target | ||
| 398 | * @port_num: Port (-number on switch) to enable on a far end device | ||
| 399 | * | ||
| 400 | * Returns 0 or 1 from on General Control Command and Status Register | ||
| 401 | * (EXT_PTR+0x3C) | ||
| 402 | */ | ||
| 403 | inline int rio_enable_rx_tx_port(struct rio_mport *port, | ||
| 404 | int local, u16 destid, | ||
| 405 | u8 hopcount, u8 port_num) { | ||
| 406 | #ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS | ||
| 407 | u32 regval; | ||
| 408 | u32 ext_ftr_ptr; | ||
| 409 | |||
| 410 | /* | ||
| 411 | * enable rx input tx output port | ||
| 412 | */ | ||
| 413 | pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = " | ||
| 414 | "%d, port_num = %d)\n", local, destid, hopcount, port_num); | ||
| 415 | |||
| 416 | ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount); | ||
| 417 | |||
| 418 | if (local) { | ||
| 419 | rio_local_read_config_32(port, ext_ftr_ptr + | ||
| 420 | RIO_PORT_N_CTL_CSR(0), | ||
| 421 | ®val); | ||
| 422 | } else { | ||
| 423 | if (rio_mport_read_config_32(port, destid, hopcount, | ||
| 424 | ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), ®val) < 0) | ||
| 425 | return -EIO; | ||
| 426 | } | ||
| 427 | |||
| 428 | if (regval & RIO_PORT_N_CTL_P_TYP_SER) { | ||
| 429 | /* serial */ | ||
| 430 | regval = regval | RIO_PORT_N_CTL_EN_RX_SER | ||
| 431 | | RIO_PORT_N_CTL_EN_TX_SER; | ||
| 432 | } else { | ||
| 433 | /* parallel */ | ||
| 434 | regval = regval | RIO_PORT_N_CTL_EN_RX_PAR | ||
| 435 | | RIO_PORT_N_CTL_EN_TX_PAR; | ||
| 436 | } | ||
| 437 | |||
| 438 | if (local) { | ||
| 439 | rio_local_write_config_32(port, ext_ftr_ptr + | ||
| 440 | RIO_PORT_N_CTL_CSR(0), regval); | ||
| 441 | } else { | ||
| 442 | if (rio_mport_write_config_32(port, destid, hopcount, | ||
| 443 | ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0) | ||
| 444 | return -EIO; | ||
| 445 | } | ||
| 446 | #endif | ||
| 447 | return 0; | ||
| 448 | } | ||
| 449 | |||
| 450 | /** | ||
| 451 | * rio_setup_device- Allocates and sets up a RIO device | 326 | * rio_setup_device- Allocates and sets up a RIO device |
| 452 | * @net: RIO network | 327 | * @net: RIO network |
| 453 | * @port: Master port to send transactions | 328 | * @port: Master port to send transactions |
| @@ -587,8 +462,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, | |||
| 587 | rdev->destid); | 462 | rdev->destid); |
| 588 | } | 463 | } |
| 589 | 464 | ||
| 590 | rdev->dev.bus = &rio_bus_type; | 465 | rio_attach_device(rdev); |
| 591 | rdev->dev.parent = &rio_bus; | ||
| 592 | 466 | ||
| 593 | device_initialize(&rdev->dev); | 467 | device_initialize(&rdev->dev); |
| 594 | rdev->dev.release = rio_release_dev; | 468 | rdev->dev.release = rio_release_dev; |
| @@ -1260,19 +1134,30 @@ static void rio_pw_enable(struct rio_mport *port, int enable) | |||
| 1260 | /** | 1134 | /** |
| 1261 | * rio_enum_mport- Start enumeration through a master port | 1135 | * rio_enum_mport- Start enumeration through a master port |
| 1262 | * @mport: Master port to send transactions | 1136 | * @mport: Master port to send transactions |
| 1137 | * @flags: Enumeration control flags | ||
| 1263 | * | 1138 | * |
| 1264 | * Starts the enumeration process. If somebody has enumerated our | 1139 | * Starts the enumeration process. If somebody has enumerated our |
| 1265 | * master port device, then give up. If not and we have an active | 1140 | * master port device, then give up. If not and we have an active |
| 1266 | * link, then start recursive peer enumeration. Returns %0 if | 1141 | * link, then start recursive peer enumeration. Returns %0 if |
| 1267 | * enumeration succeeds or %-EBUSY if enumeration fails. | 1142 | * enumeration succeeds or %-EBUSY if enumeration fails. |
| 1268 | */ | 1143 | */ |
| 1269 | int rio_enum_mport(struct rio_mport *mport) | 1144 | int rio_enum_mport(struct rio_mport *mport, u32 flags) |
| 1270 | { | 1145 | { |
| 1271 | struct rio_net *net = NULL; | 1146 | struct rio_net *net = NULL; |
| 1272 | int rc = 0; | 1147 | int rc = 0; |
| 1273 | 1148 | ||
| 1274 | printk(KERN_INFO "RIO: enumerate master port %d, %s\n", mport->id, | 1149 | printk(KERN_INFO "RIO: enumerate master port %d, %s\n", mport->id, |
| 1275 | mport->name); | 1150 | mport->name); |
| 1151 | |||
| 1152 | /* | ||
| 1153 | * To avoid multiple start requests (repeat enumeration is not supported | ||
| 1154 | * by this method) check if enumeration/discovery was performed for this | ||
| 1155 | * mport: if mport was added into the list of mports for a net exit | ||
| 1156 | * with error. | ||
| 1157 | */ | ||
| 1158 | if (mport->nnode.next || mport->nnode.prev) | ||
| 1159 | return -EBUSY; | ||
| 1160 | |||
| 1276 | /* If somebody else enumerated our master port device, bail. */ | 1161 | /* If somebody else enumerated our master port device, bail. */ |
| 1277 | if (rio_enum_host(mport) < 0) { | 1162 | if (rio_enum_host(mport) < 0) { |
| 1278 | printk(KERN_INFO | 1163 | printk(KERN_INFO |
| @@ -1362,14 +1247,16 @@ static void rio_build_route_tables(struct rio_net *net) | |||
| 1362 | /** | 1247 | /** |
| 1363 | * rio_disc_mport- Start discovery through a master port | 1248 | * rio_disc_mport- Start discovery through a master port |
| 1364 | * @mport: Master port to send transactions | 1249 | * @mport: Master port to send transactions |
| 1250 | * @flags: discovery control flags | ||
| 1365 | * | 1251 | * |
| 1366 | * Starts the discovery process. If we have an active link, | 1252 | * Starts the discovery process. If we have an active link, |
| 1367 | * then wait for the signal that enumeration is complete. | 1253 | * then wait for the signal that enumeration is complete (if wait |
| 1254 | * is allowed). | ||
| 1368 | * When enumeration completion is signaled, start recursive | 1255 | * When enumeration completion is signaled, start recursive |
| 1369 | * peer discovery. Returns %0 if discovery succeeds or %-EBUSY | 1256 | * peer discovery. Returns %0 if discovery succeeds or %-EBUSY |
| 1370 | * on failure. | 1257 | * on failure. |
| 1371 | */ | 1258 | */ |
| 1372 | int rio_disc_mport(struct rio_mport *mport) | 1259 | int rio_disc_mport(struct rio_mport *mport, u32 flags) |
| 1373 | { | 1260 | { |
| 1374 | struct rio_net *net = NULL; | 1261 | struct rio_net *net = NULL; |
| 1375 | unsigned long to_end; | 1262 | unsigned long to_end; |
| @@ -1379,6 +1266,11 @@ int rio_disc_mport(struct rio_mport *mport) | |||
| 1379 | 1266 | ||
| 1380 | /* If master port has an active link, allocate net and discover peers */ | 1267 | /* If master port has an active link, allocate net and discover peers */ |
| 1381 | if (rio_mport_is_active(mport)) { | 1268 | if (rio_mport_is_active(mport)) { |
| 1269 | if (rio_enum_complete(mport)) | ||
| 1270 | goto enum_done; | ||
| 1271 | else if (flags & RIO_SCAN_ENUM_NO_WAIT) | ||
| 1272 | return -EAGAIN; | ||
| 1273 | |||
| 1382 | pr_debug("RIO: wait for enumeration to complete...\n"); | 1274 | pr_debug("RIO: wait for enumeration to complete...\n"); |
| 1383 | 1275 | ||
| 1384 | to_end = jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ; | 1276 | to_end = jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ; |
| @@ -1421,3 +1313,41 @@ enum_done: | |||
| 1421 | bail: | 1313 | bail: |
| 1422 | return -EBUSY; | 1314 | return -EBUSY; |
| 1423 | } | 1315 | } |
| 1316 | |||
| 1317 | static struct rio_scan rio_scan_ops = { | ||
| 1318 | .enumerate = rio_enum_mport, | ||
| 1319 | .discover = rio_disc_mport, | ||
| 1320 | }; | ||
| 1321 | |||
| 1322 | static bool scan; | ||
| 1323 | module_param(scan, bool, 0); | ||
| 1324 | MODULE_PARM_DESC(scan, "Start RapidIO network enumeration/discovery " | ||
| 1325 | "(default = 0)"); | ||
| 1326 | |||
| 1327 | /** | ||
| 1328 | * rio_basic_attach: | ||
| 1329 | * | ||
| 1330 | * When this enumeration/discovery method is loaded as a module this function | ||
| 1331 | * registers its specific enumeration and discover routines for all available | ||
| 1332 | * RapidIO mport devices. The "scan" command line parameter controls ability of | ||
| 1333 | * the module to start RapidIO enumeration/discovery automatically. | ||
| 1334 | * | ||
| 1335 | * Returns 0 for success or -EIO if unable to register itself. | ||
| 1336 | * | ||
| 1337 | * This enumeration/discovery method cannot be unloaded and therefore does not | ||
| 1338 | * provide a matching cleanup_module routine. | ||
| 1339 | */ | ||
| 1340 | |||
| 1341 | static int __init rio_basic_attach(void) | ||
| 1342 | { | ||
| 1343 | if (rio_register_scan(RIO_MPORT_ANY, &rio_scan_ops)) | ||
| 1344 | return -EIO; | ||
| 1345 | if (scan) | ||
| 1346 | rio_init_mports(); | ||
| 1347 | return 0; | ||
| 1348 | } | ||
| 1349 | |||
| 1350 | late_initcall(rio_basic_attach); | ||
| 1351 | |||
| 1352 | MODULE_DESCRIPTION("Basic RapidIO enumeration/discovery"); | ||
| 1353 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c index 4dbe360989be..66d4acd5e18f 100644 --- a/drivers/rapidio/rio-sysfs.c +++ b/drivers/rapidio/rio-sysfs.c | |||
| @@ -285,3 +285,48 @@ void rio_remove_sysfs_dev_files(struct rio_dev *rdev) | |||
| 285 | rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE); | 285 | rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE); |
| 286 | } | 286 | } |
| 287 | } | 287 | } |
| 288 | |||
| 289 | static ssize_t bus_scan_store(struct bus_type *bus, const char *buf, | ||
| 290 | size_t count) | ||
| 291 | { | ||
| 292 | long val; | ||
| 293 | struct rio_mport *port = NULL; | ||
| 294 | int rc; | ||
| 295 | |||
| 296 | if (kstrtol(buf, 0, &val) < 0) | ||
| 297 | return -EINVAL; | ||
| 298 | |||
| 299 | if (val == RIO_MPORT_ANY) { | ||
| 300 | rc = rio_init_mports(); | ||
| 301 | goto exit; | ||
| 302 | } | ||
| 303 | |||
| 304 | if (val < 0 || val >= RIO_MAX_MPORTS) | ||
| 305 | return -EINVAL; | ||
| 306 | |||
| 307 | port = rio_find_mport((int)val); | ||
| 308 | |||
| 309 | if (!port) { | ||
| 310 | pr_debug("RIO: %s: mport_%d not available\n", | ||
| 311 | __func__, (int)val); | ||
| 312 | return -EINVAL; | ||
| 313 | } | ||
| 314 | |||
| 315 | if (!port->nscan) | ||
| 316 | return -EINVAL; | ||
| 317 | |||
| 318 | if (port->host_deviceid >= 0) | ||
| 319 | rc = port->nscan->enumerate(port, 0); | ||
| 320 | else | ||
| 321 | rc = port->nscan->discover(port, RIO_SCAN_ENUM_NO_WAIT); | ||
| 322 | exit: | ||
| 323 | if (!rc) | ||
| 324 | rc = count; | ||
| 325 | |||
| 326 | return rc; | ||
| 327 | } | ||
| 328 | |||
| 329 | struct bus_attribute rio_bus_attrs[] = { | ||
| 330 | __ATTR(scan, (S_IWUSR|S_IWGRP), NULL, bus_scan_store), | ||
| 331 | __ATTR_NULL | ||
| 332 | }; | ||
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index d553b5d13722..cb1c08996fbb 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c | |||
| @@ -31,7 +31,11 @@ | |||
| 31 | 31 | ||
| 32 | #include "rio.h" | 32 | #include "rio.h" |
| 33 | 33 | ||
| 34 | static LIST_HEAD(rio_devices); | ||
| 35 | static DEFINE_SPINLOCK(rio_global_list_lock); | ||
| 36 | |||
| 34 | static LIST_HEAD(rio_mports); | 37 | static LIST_HEAD(rio_mports); |
| 38 | static DEFINE_MUTEX(rio_mport_list_lock); | ||
| 35 | static unsigned char next_portid; | 39 | static unsigned char next_portid; |
| 36 | static DEFINE_SPINLOCK(rio_mmap_lock); | 40 | static DEFINE_SPINLOCK(rio_mmap_lock); |
| 37 | 41 | ||
| @@ -53,6 +57,32 @@ u16 rio_local_get_device_id(struct rio_mport *port) | |||
| 53 | } | 57 | } |
| 54 | 58 | ||
| 55 | /** | 59 | /** |
| 60 | * rio_add_device- Adds a RIO device to the device model | ||
| 61 | * @rdev: RIO device | ||
| 62 | * | ||
| 63 | * Adds the RIO device to the global device list and adds the RIO | ||
| 64 | * device to the RIO device list. Creates the generic sysfs nodes | ||
| 65 | * for an RIO device. | ||
| 66 | */ | ||
| 67 | int rio_add_device(struct rio_dev *rdev) | ||
| 68 | { | ||
| 69 | int err; | ||
| 70 | |||
| 71 | err = device_add(&rdev->dev); | ||
| 72 | if (err) | ||
| 73 | return err; | ||
| 74 | |||
| 75 | spin_lock(&rio_global_list_lock); | ||
| 76 | list_add_tail(&rdev->global_list, &rio_devices); | ||
| 77 | spin_unlock(&rio_global_list_lock); | ||
| 78 | |||
| 79 | rio_create_sysfs_dev_files(rdev); | ||
| 80 | |||
| 81 | return 0; | ||
| 82 | } | ||
| 83 | EXPORT_SYMBOL_GPL(rio_add_device); | ||
| 84 | |||
| 85 | /** | ||
| 56 | * rio_request_inb_mbox - request inbound mailbox service | 86 | * rio_request_inb_mbox - request inbound mailbox service |
| 57 | * @mport: RIO master port from which to allocate the mailbox resource | 87 | * @mport: RIO master port from which to allocate the mailbox resource |
| 58 | * @dev_id: Device specific pointer to pass on event | 88 | * @dev_id: Device specific pointer to pass on event |
| @@ -489,6 +519,7 @@ rio_mport_get_physefb(struct rio_mport *port, int local, | |||
| 489 | 519 | ||
| 490 | return ext_ftr_ptr; | 520 | return ext_ftr_ptr; |
| 491 | } | 521 | } |
| 522 | EXPORT_SYMBOL_GPL(rio_mport_get_physefb); | ||
| 492 | 523 | ||
| 493 | /** | 524 | /** |
| 494 | * rio_get_comptag - Begin or continue searching for a RIO device by component tag | 525 | * rio_get_comptag - Begin or continue searching for a RIO device by component tag |
| @@ -521,6 +552,7 @@ exit: | |||
| 521 | spin_unlock(&rio_global_list_lock); | 552 | spin_unlock(&rio_global_list_lock); |
| 522 | return rdev; | 553 | return rdev; |
| 523 | } | 554 | } |
| 555 | EXPORT_SYMBOL_GPL(rio_get_comptag); | ||
| 524 | 556 | ||
| 525 | /** | 557 | /** |
| 526 | * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port. | 558 | * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port. |
| @@ -545,6 +577,107 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock) | |||
| 545 | regval); | 577 | regval); |
| 546 | return 0; | 578 | return 0; |
| 547 | } | 579 | } |
| 580 | EXPORT_SYMBOL_GPL(rio_set_port_lockout); | ||
| 581 | |||
| 582 | /** | ||
| 583 | * rio_switch_init - Sets switch operations for a particular vendor switch | ||
| 584 | * @rdev: RIO device | ||
| 585 | * @do_enum: Enumeration/Discovery mode flag | ||
| 586 | * | ||
| 587 | * Searches the RIO switch ops table for known switch types. If the vid | ||
| 588 | * and did match a switch table entry, then call switch initialization | ||
| 589 | * routine to setup switch-specific routines. | ||
| 590 | */ | ||
| 591 | void rio_switch_init(struct rio_dev *rdev, int do_enum) | ||
| 592 | { | ||
| 593 | struct rio_switch_ops *cur = __start_rio_switch_ops; | ||
| 594 | struct rio_switch_ops *end = __end_rio_switch_ops; | ||
| 595 | |||
| 596 | while (cur < end) { | ||
| 597 | if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) { | ||
| 598 | pr_debug("RIO: calling init routine for %s\n", | ||
| 599 | rio_name(rdev)); | ||
| 600 | cur->init_hook(rdev, do_enum); | ||
| 601 | break; | ||
| 602 | } | ||
| 603 | cur++; | ||
| 604 | } | ||
| 605 | |||
| 606 | if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) { | ||
| 607 | pr_debug("RIO: adding STD routing ops for %s\n", | ||
| 608 | rio_name(rdev)); | ||
| 609 | rdev->rswitch->add_entry = rio_std_route_add_entry; | ||
| 610 | rdev->rswitch->get_entry = rio_std_route_get_entry; | ||
| 611 | rdev->rswitch->clr_table = rio_std_route_clr_table; | ||
| 612 | } | ||
| 613 | |||
| 614 | if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry) | ||
| 615 | printk(KERN_ERR "RIO: missing routing ops for %s\n", | ||
| 616 | rio_name(rdev)); | ||
| 617 | } | ||
| 618 | EXPORT_SYMBOL_GPL(rio_switch_init); | ||
| 619 | |||
| 620 | /** | ||
| 621 | * rio_enable_rx_tx_port - enable input receiver and output transmitter of | ||
| 622 | * given port | ||
| 623 | * @port: Master port associated with the RIO network | ||
| 624 | * @local: local=1 select local port otherwise a far device is reached | ||
| 625 | * @destid: Destination ID of the device to check host bit | ||
| 626 | * @hopcount: Number of hops to reach the target | ||
| 627 | * @port_num: Port (-number on switch) to enable on a far end device | ||
| 628 | * | ||
| 629 | * Returns 0 or 1 from on General Control Command and Status Register | ||
| 630 | * (EXT_PTR+0x3C) | ||
| 631 | */ | ||
| 632 | int rio_enable_rx_tx_port(struct rio_mport *port, | ||
| 633 | int local, u16 destid, | ||
| 634 | u8 hopcount, u8 port_num) | ||
| 635 | { | ||
| 636 | #ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS | ||
| 637 | u32 regval; | ||
| 638 | u32 ext_ftr_ptr; | ||
| 639 | |||
| 640 | /* | ||
| 641 | * enable rx input tx output port | ||
| 642 | */ | ||
| 643 | pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = " | ||
| 644 | "%d, port_num = %d)\n", local, destid, hopcount, port_num); | ||
| 645 | |||
| 646 | ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount); | ||
| 647 | |||
| 648 | if (local) { | ||
| 649 | rio_local_read_config_32(port, ext_ftr_ptr + | ||
| 650 | RIO_PORT_N_CTL_CSR(0), | ||
| 651 | ®val); | ||
| 652 | } else { | ||
| 653 | if (rio_mport_read_config_32(port, destid, hopcount, | ||
| 654 | ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), ®val) < 0) | ||
| 655 | return -EIO; | ||
| 656 | } | ||
| 657 | |||
| 658 | if (regval & RIO_PORT_N_CTL_P_TYP_SER) { | ||
| 659 | /* serial */ | ||
| 660 | regval = regval | RIO_PORT_N_CTL_EN_RX_SER | ||
| 661 | | RIO_PORT_N_CTL_EN_TX_SER; | ||
| 662 | } else { | ||
| 663 | /* parallel */ | ||
| 664 | regval = regval | RIO_PORT_N_CTL_EN_RX_PAR | ||
| 665 | | RIO_PORT_N_CTL_EN_TX_PAR; | ||
| 666 | } | ||
| 667 | |||
| 668 | if (local) { | ||
| 669 | rio_local_write_config_32(port, ext_ftr_ptr + | ||
| 670 | RIO_PORT_N_CTL_CSR(0), regval); | ||
| 671 | } else { | ||
| 672 | if (rio_mport_write_config_32(port, destid, hopcount, | ||
| 673 | ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0) | ||
| 674 | return -EIO; | ||
| 675 | } | ||
| 676 | #endif | ||
| 677 | return 0; | ||
| 678 | } | ||
| 679 | EXPORT_SYMBOL_GPL(rio_enable_rx_tx_port); | ||
| 680 | |||
| 548 | 681 | ||
| 549 | /** | 682 | /** |
| 550 | * rio_chk_dev_route - Validate route to the specified device. | 683 | * rio_chk_dev_route - Validate route to the specified device. |
| @@ -610,6 +743,7 @@ rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount) | |||
| 610 | 743 | ||
| 611 | return 0; | 744 | return 0; |
| 612 | } | 745 | } |
| 746 | EXPORT_SYMBOL_GPL(rio_mport_chk_dev_access); | ||
| 613 | 747 | ||
| 614 | /** | 748 | /** |
| 615 | * rio_chk_dev_access - Validate access to the specified device. | 749 | * rio_chk_dev_access - Validate access to the specified device. |
| @@ -941,6 +1075,7 @@ rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, | |||
| 941 | return RIO_GET_BLOCK_ID(reg_val); | 1075 | return RIO_GET_BLOCK_ID(reg_val); |
| 942 | } | 1076 | } |
| 943 | } | 1077 | } |
| 1078 | EXPORT_SYMBOL_GPL(rio_mport_get_efb); | ||
| 944 | 1079 | ||
| 945 | /** | 1080 | /** |
| 946 | * rio_mport_get_feature - query for devices' extended features | 1081 | * rio_mport_get_feature - query for devices' extended features |
| @@ -997,6 +1132,7 @@ rio_mport_get_feature(struct rio_mport * port, int local, u16 destid, | |||
| 997 | 1132 | ||
| 998 | return 0; | 1133 | return 0; |
| 999 | } | 1134 | } |
| 1135 | EXPORT_SYMBOL_GPL(rio_mport_get_feature); | ||
| 1000 | 1136 | ||
| 1001 | /** | 1137 | /** |
| 1002 | * rio_get_asm - Begin or continue searching for a RIO device by vid/did/asm_vid/asm_did | 1138 | * rio_get_asm - Begin or continue searching for a RIO device by vid/did/asm_vid/asm_did |
| @@ -1246,6 +1382,95 @@ EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg); | |||
| 1246 | 1382 | ||
| 1247 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ | 1383 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ |
| 1248 | 1384 | ||
| 1385 | /** | ||
| 1386 | * rio_find_mport - find RIO mport by its ID | ||
| 1387 | * @mport_id: number (ID) of mport device | ||
| 1388 | * | ||
| 1389 | * Given a RIO mport number, the desired mport is located | ||
| 1390 | * in the global list of mports. If the mport is found, a pointer to its | ||
| 1391 | * data structure is returned. If no mport is found, %NULL is returned. | ||
| 1392 | */ | ||
| 1393 | struct rio_mport *rio_find_mport(int mport_id) | ||
| 1394 | { | ||
| 1395 | struct rio_mport *port; | ||
| 1396 | |||
| 1397 | mutex_lock(&rio_mport_list_lock); | ||
| 1398 | list_for_each_entry(port, &rio_mports, node) { | ||
| 1399 | if (port->id == mport_id) | ||
| 1400 | goto found; | ||
| 1401 | } | ||
| 1402 | port = NULL; | ||
| 1403 | found: | ||
| 1404 | mutex_unlock(&rio_mport_list_lock); | ||
| 1405 | |||
| 1406 | return port; | ||
| 1407 | } | ||
| 1408 | |||
| 1409 | /** | ||
| 1410 | * rio_register_scan - enumeration/discovery method registration interface | ||
| 1411 | * @mport_id: mport device ID for which fabric scan routine has to be set | ||
| 1412 | * (RIO_MPORT_ANY = set for all available mports) | ||
| 1413 | * @scan_ops: enumeration/discovery control structure | ||
| 1414 | * | ||
| 1415 | * Assigns enumeration or discovery method to the specified mport device (or all | ||
| 1416 | * available mports if RIO_MPORT_ANY is specified). | ||
| 1417 | * Returns error if the mport already has an enumerator attached to it. | ||
| 1418 | * In case of RIO_MPORT_ANY ignores ports with valid scan routines and returns | ||
| 1419 | * an error if was unable to find at least one available mport. | ||
| 1420 | */ | ||
| 1421 | int rio_register_scan(int mport_id, struct rio_scan *scan_ops) | ||
| 1422 | { | ||
| 1423 | struct rio_mport *port; | ||
| 1424 | int rc = -EBUSY; | ||
| 1425 | |||
| 1426 | mutex_lock(&rio_mport_list_lock); | ||
| 1427 | list_for_each_entry(port, &rio_mports, node) { | ||
| 1428 | if (port->id == mport_id || mport_id == RIO_MPORT_ANY) { | ||
| 1429 | if (port->nscan && mport_id == RIO_MPORT_ANY) | ||
| 1430 | continue; | ||
| 1431 | else if (port->nscan) | ||
| 1432 | break; | ||
| 1433 | |||
| 1434 | port->nscan = scan_ops; | ||
| 1435 | rc = 0; | ||
| 1436 | |||
| 1437 | if (mport_id != RIO_MPORT_ANY) | ||
| 1438 | break; | ||
| 1439 | } | ||
| 1440 | } | ||
| 1441 | mutex_unlock(&rio_mport_list_lock); | ||
| 1442 | |||
| 1443 | return rc; | ||
| 1444 | } | ||
| 1445 | EXPORT_SYMBOL_GPL(rio_register_scan); | ||
| 1446 | |||
| 1447 | /** | ||
| 1448 | * rio_unregister_scan - removes enumeration/discovery method from mport | ||
| 1449 | * @mport_id: mport device ID for which fabric scan routine has to be | ||
| 1450 | * unregistered (RIO_MPORT_ANY = set for all available mports) | ||
| 1451 | * | ||
| 1452 | * Removes enumeration or discovery method assigned to the specified mport | ||
| 1453 | * device (or all available mports if RIO_MPORT_ANY is specified). | ||
| 1454 | */ | ||
| 1455 | int rio_unregister_scan(int mport_id) | ||
| 1456 | { | ||
| 1457 | struct rio_mport *port; | ||
| 1458 | |||
| 1459 | mutex_lock(&rio_mport_list_lock); | ||
| 1460 | list_for_each_entry(port, &rio_mports, node) { | ||
| 1461 | if (port->id == mport_id || mport_id == RIO_MPORT_ANY) { | ||
| 1462 | if (port->nscan) | ||
| 1463 | port->nscan = NULL; | ||
| 1464 | if (mport_id != RIO_MPORT_ANY) | ||
| 1465 | break; | ||
| 1466 | } | ||
| 1467 | } | ||
| 1468 | mutex_unlock(&rio_mport_list_lock); | ||
| 1469 | |||
| 1470 | return 0; | ||
| 1471 | } | ||
| 1472 | EXPORT_SYMBOL_GPL(rio_unregister_scan); | ||
| 1473 | |||
| 1249 | static void rio_fixup_device(struct rio_dev *dev) | 1474 | static void rio_fixup_device(struct rio_dev *dev) |
| 1250 | { | 1475 | { |
| 1251 | } | 1476 | } |
| @@ -1274,7 +1499,7 @@ static void disc_work_handler(struct work_struct *_work) | |||
| 1274 | work = container_of(_work, struct rio_disc_work, work); | 1499 | work = container_of(_work, struct rio_disc_work, work); |
| 1275 | pr_debug("RIO: discovery work for mport %d %s\n", | 1500 | pr_debug("RIO: discovery work for mport %d %s\n", |
| 1276 | work->mport->id, work->mport->name); | 1501 | work->mport->id, work->mport->name); |
| 1277 | rio_disc_mport(work->mport); | 1502 | work->mport->nscan->discover(work->mport, 0); |
| 1278 | } | 1503 | } |
| 1279 | 1504 | ||
| 1280 | int rio_init_mports(void) | 1505 | int rio_init_mports(void) |
| @@ -1290,12 +1515,15 @@ int rio_init_mports(void) | |||
| 1290 | * First, run enumerations and check if we need to perform discovery | 1515 | * First, run enumerations and check if we need to perform discovery |
| 1291 | * on any of the registered mports. | 1516 | * on any of the registered mports. |
| 1292 | */ | 1517 | */ |
| 1518 | mutex_lock(&rio_mport_list_lock); | ||
| 1293 | list_for_each_entry(port, &rio_mports, node) { | 1519 | list_for_each_entry(port, &rio_mports, node) { |
| 1294 | if (port->host_deviceid >= 0) | 1520 | if (port->host_deviceid >= 0) { |
| 1295 | rio_enum_mport(port); | 1521 | if (port->nscan) |
| 1296 | else | 1522 | port->nscan->enumerate(port, 0); |
| 1523 | } else | ||
| 1297 | n++; | 1524 | n++; |
| 1298 | } | 1525 | } |
| 1526 | mutex_unlock(&rio_mport_list_lock); | ||
| 1299 | 1527 | ||
| 1300 | if (!n) | 1528 | if (!n) |
| 1301 | goto no_disc; | 1529 | goto no_disc; |
| @@ -1322,14 +1550,16 @@ int rio_init_mports(void) | |||
| 1322 | } | 1550 | } |
| 1323 | 1551 | ||
| 1324 | n = 0; | 1552 | n = 0; |
| 1553 | mutex_lock(&rio_mport_list_lock); | ||
| 1325 | list_for_each_entry(port, &rio_mports, node) { | 1554 | list_for_each_entry(port, &rio_mports, node) { |
| 1326 | if (port->host_deviceid < 0) { | 1555 | if (port->host_deviceid < 0 && port->nscan) { |
| 1327 | work[n].mport = port; | 1556 | work[n].mport = port; |
| 1328 | INIT_WORK(&work[n].work, disc_work_handler); | 1557 | INIT_WORK(&work[n].work, disc_work_handler); |
| 1329 | queue_work(rio_wq, &work[n].work); | 1558 | queue_work(rio_wq, &work[n].work); |
| 1330 | n++; | 1559 | n++; |
| 1331 | } | 1560 | } |
| 1332 | } | 1561 | } |
| 1562 | mutex_unlock(&rio_mport_list_lock); | ||
| 1333 | 1563 | ||
| 1334 | flush_workqueue(rio_wq); | 1564 | flush_workqueue(rio_wq); |
| 1335 | pr_debug("RIO: destroy discovery workqueue\n"); | 1565 | pr_debug("RIO: destroy discovery workqueue\n"); |
| @@ -1342,8 +1572,6 @@ no_disc: | |||
| 1342 | return 0; | 1572 | return 0; |
| 1343 | } | 1573 | } |
| 1344 | 1574 | ||
| 1345 | device_initcall_sync(rio_init_mports); | ||
| 1346 | |||
| 1347 | static int hdids[RIO_MAX_MPORTS + 1]; | 1575 | static int hdids[RIO_MAX_MPORTS + 1]; |
| 1348 | 1576 | ||
| 1349 | static int rio_get_hdid(int index) | 1577 | static int rio_get_hdid(int index) |
| @@ -1371,7 +1599,10 @@ int rio_register_mport(struct rio_mport *port) | |||
| 1371 | 1599 | ||
| 1372 | port->id = next_portid++; | 1600 | port->id = next_portid++; |
| 1373 | port->host_deviceid = rio_get_hdid(port->id); | 1601 | port->host_deviceid = rio_get_hdid(port->id); |
| 1602 | port->nscan = NULL; | ||
| 1603 | mutex_lock(&rio_mport_list_lock); | ||
| 1374 | list_add_tail(&port->node, &rio_mports); | 1604 | list_add_tail(&port->node, &rio_mports); |
| 1605 | mutex_unlock(&rio_mport_list_lock); | ||
| 1375 | return 0; | 1606 | return 0; |
| 1376 | } | 1607 | } |
| 1377 | 1608 | ||
| @@ -1386,3 +1617,4 @@ EXPORT_SYMBOL_GPL(rio_request_inb_mbox); | |||
| 1386 | EXPORT_SYMBOL_GPL(rio_release_inb_mbox); | 1617 | EXPORT_SYMBOL_GPL(rio_release_inb_mbox); |
| 1387 | EXPORT_SYMBOL_GPL(rio_request_outb_mbox); | 1618 | EXPORT_SYMBOL_GPL(rio_request_outb_mbox); |
| 1388 | EXPORT_SYMBOL_GPL(rio_release_outb_mbox); | 1619 | EXPORT_SYMBOL_GPL(rio_release_outb_mbox); |
| 1620 | EXPORT_SYMBOL_GPL(rio_init_mports); | ||
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h index b1af414f15e6..c14f864dea5c 100644 --- a/drivers/rapidio/rio.h +++ b/drivers/rapidio/rio.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/rio.h> | 15 | #include <linux/rio.h> |
| 16 | 16 | ||
| 17 | #define RIO_MAX_CHK_RETRY 3 | 17 | #define RIO_MAX_CHK_RETRY 3 |
| 18 | #define RIO_MPORT_ANY (-1) | ||
| 18 | 19 | ||
| 19 | /* Functions internal to the RIO core code */ | 20 | /* Functions internal to the RIO core code */ |
| 20 | 21 | ||
| @@ -27,8 +28,6 @@ extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, | |||
| 27 | extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, | 28 | extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, |
| 28 | u8 hopcount); | 29 | u8 hopcount); |
| 29 | extern int rio_create_sysfs_dev_files(struct rio_dev *rdev); | 30 | extern int rio_create_sysfs_dev_files(struct rio_dev *rdev); |
| 30 | extern int rio_enum_mport(struct rio_mport *mport); | ||
| 31 | extern int rio_disc_mport(struct rio_mport *mport); | ||
| 32 | extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, | 31 | extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, |
| 33 | u8 hopcount, u16 table, u16 route_destid, | 32 | u8 hopcount, u16 table, u16 route_destid, |
| 34 | u8 route_port); | 33 | u8 route_port); |
| @@ -39,10 +38,18 @@ extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, | |||
| 39 | u8 hopcount, u16 table); | 38 | u8 hopcount, u16 table); |
| 40 | extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock); | 39 | extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock); |
| 41 | extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from); | 40 | extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from); |
| 41 | extern int rio_add_device(struct rio_dev *rdev); | ||
| 42 | extern void rio_switch_init(struct rio_dev *rdev, int do_enum); | ||
| 43 | extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid, | ||
| 44 | u8 hopcount, u8 port_num); | ||
| 45 | extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops); | ||
| 46 | extern int rio_unregister_scan(int mport_id); | ||
| 47 | extern void rio_attach_device(struct rio_dev *rdev); | ||
| 48 | extern struct rio_mport *rio_find_mport(int mport_id); | ||
| 42 | 49 | ||
| 43 | /* Structures internal to the RIO core code */ | 50 | /* Structures internal to the RIO core code */ |
| 44 | extern struct device_attribute rio_dev_attrs[]; | 51 | extern struct device_attribute rio_dev_attrs[]; |
| 45 | extern spinlock_t rio_global_list_lock; | 52 | extern struct bus_attribute rio_bus_attrs[]; |
| 46 | 53 | ||
| 47 | extern struct rio_switch_ops __start_rio_switch_ops[]; | 54 | extern struct rio_switch_ops __start_rio_switch_ops[]; |
| 48 | extern struct rio_switch_ops __end_rio_switch_ops[]; | 55 | extern struct rio_switch_ops __end_rio_switch_ops[]; |
diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c index 48b6612fae7f..d5af7baa48b5 100644 --- a/drivers/rtc/rtc-max8998.c +++ b/drivers/rtc/rtc-max8998.c | |||
| @@ -285,7 +285,7 @@ static int max8998_rtc_probe(struct platform_device *pdev) | |||
| 285 | info->irq, ret); | 285 | info->irq, ret); |
| 286 | 286 | ||
| 287 | dev_info(&pdev->dev, "RTC CHIP NAME: %s\n", pdev->id_entry->name); | 287 | dev_info(&pdev->dev, "RTC CHIP NAME: %s\n", pdev->id_entry->name); |
| 288 | if (pdata->rtc_delay) { | 288 | if (pdata && pdata->rtc_delay) { |
| 289 | info->lp3974_bug_workaround = true; | 289 | info->lp3974_bug_workaround = true; |
| 290 | dev_warn(&pdev->dev, "LP3974 with RTC REGERR option." | 290 | dev_warn(&pdev->dev, "LP3974 with RTC REGERR option." |
| 291 | " RTC updates will be extremely slow.\n"); | 291 | " RTC updates will be extremely slow.\n"); |
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index 8900ea784817..0f0609b1aa2c 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c | |||
| @@ -306,7 +306,7 @@ static int pl031_remove(struct amba_device *adev) | |||
| 306 | struct pl031_local *ldata = dev_get_drvdata(&adev->dev); | 306 | struct pl031_local *ldata = dev_get_drvdata(&adev->dev); |
| 307 | 307 | ||
| 308 | amba_set_drvdata(adev, NULL); | 308 | amba_set_drvdata(adev, NULL); |
| 309 | free_irq(adev->irq[0], ldata->rtc); | 309 | free_irq(adev->irq[0], ldata); |
| 310 | rtc_device_unregister(ldata->rtc); | 310 | rtc_device_unregister(ldata->rtc); |
| 311 | iounmap(ldata->base); | 311 | iounmap(ldata->base); |
| 312 | kfree(ldata); | 312 | kfree(ldata); |
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index d71d60f94fc1..2e937bdace6f 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig | |||
| @@ -2199,7 +2199,7 @@ config FB_XILINX | |||
| 2199 | 2199 | ||
| 2200 | config FB_GOLDFISH | 2200 | config FB_GOLDFISH |
| 2201 | tristate "Goldfish Framebuffer" | 2201 | tristate "Goldfish Framebuffer" |
| 2202 | depends on FB | 2202 | depends on FB && HAS_DMA |
| 2203 | select FB_CFB_FILLRECT | 2203 | select FB_CFB_FILLRECT |
| 2204 | select FB_CFB_COPYAREA | 2204 | select FB_CFB_COPYAREA |
| 2205 | select FB_CFB_IMAGEBLIT | 2205 | select FB_CFB_IMAGEBLIT |
| @@ -2453,6 +2453,23 @@ config FB_HYPERV | |||
| 2453 | help | 2453 | help |
| 2454 | This framebuffer driver supports Microsoft Hyper-V Synthetic Video. | 2454 | This framebuffer driver supports Microsoft Hyper-V Synthetic Video. |
| 2455 | 2455 | ||
| 2456 | config FB_SIMPLE | ||
| 2457 | bool "Simple framebuffer support" | ||
| 2458 | depends on (FB = y) && OF | ||
| 2459 | select FB_CFB_FILLRECT | ||
| 2460 | select FB_CFB_COPYAREA | ||
| 2461 | select FB_CFB_IMAGEBLIT | ||
| 2462 | help | ||
| 2463 | Say Y if you want support for a simple frame-buffer. | ||
| 2464 | |||
| 2465 | This driver assumes that the display hardware has been initialized | ||
| 2466 | before the kernel boots, and the kernel will simply render to the | ||
| 2467 | pre-allocated frame buffer surface. | ||
| 2468 | |||
| 2469 | Configuration re: surface address, size, and format must be provided | ||
| 2470 | through device tree, or potentially plain old platform data in the | ||
| 2471 | future. | ||
| 2472 | |||
| 2456 | source "drivers/video/omap/Kconfig" | 2473 | source "drivers/video/omap/Kconfig" |
| 2457 | source "drivers/video/omap2/Kconfig" | 2474 | source "drivers/video/omap2/Kconfig" |
| 2458 | source "drivers/video/exynos/Kconfig" | 2475 | source "drivers/video/exynos/Kconfig" |
diff --git a/drivers/video/Makefile b/drivers/video/Makefile index 7234e4a959e8..e8bae8dd4804 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile | |||
| @@ -166,6 +166,7 @@ obj-$(CONFIG_FB_MX3) += mx3fb.o | |||
| 166 | obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o | 166 | obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o |
| 167 | obj-$(CONFIG_FB_MXS) += mxsfb.o | 167 | obj-$(CONFIG_FB_MXS) += mxsfb.o |
| 168 | obj-$(CONFIG_FB_SSD1307) += ssd1307fb.o | 168 | obj-$(CONFIG_FB_SSD1307) += ssd1307fb.o |
| 169 | obj-$(CONFIG_FB_SIMPLE) += simplefb.o | ||
| 169 | 170 | ||
| 170 | # the test framebuffer is last | 171 | # the test framebuffer is last |
| 171 | obj-$(CONFIG_FB_VIRTUAL) += vfb.o | 172 | obj-$(CONFIG_FB_VIRTUAL) += vfb.o |
diff --git a/drivers/video/simplefb.c b/drivers/video/simplefb.c new file mode 100644 index 000000000000..e2e9e3e61b72 --- /dev/null +++ b/drivers/video/simplefb.c | |||
| @@ -0,0 +1,234 @@ | |||
| 1 | /* | ||
| 2 | * Simplest possible simple frame-buffer driver, as a platform device | ||
| 3 | * | ||
| 4 | * Copyright (c) 2013, Stephen Warren | ||
| 5 | * | ||
| 6 | * Based on q40fb.c, which was: | ||
| 7 | * Copyright (C) 2001 Richard Zidlicky <rz@linux-m68k.org> | ||
| 8 | * | ||
| 9 | * Also based on offb.c, which was: | ||
| 10 | * Copyright (C) 1997 Geert Uytterhoeven | ||
| 11 | * Copyright (C) 1996 Paul Mackerras | ||
| 12 | * | ||
| 13 | * This program is free software; you can redistribute it and/or modify it | ||
| 14 | * under the terms and conditions of the GNU General Public License, | ||
| 15 | * version 2, as published by the Free Software Foundation. | ||
| 16 | * | ||
| 17 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 18 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 20 | * more details. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include <linux/errno.h> | ||
| 24 | #include <linux/fb.h> | ||
| 25 | #include <linux/io.h> | ||
| 26 | #include <linux/module.h> | ||
| 27 | #include <linux/platform_device.h> | ||
| 28 | |||
| 29 | static struct fb_fix_screeninfo simplefb_fix = { | ||
| 30 | .id = "simple", | ||
| 31 | .type = FB_TYPE_PACKED_PIXELS, | ||
| 32 | .visual = FB_VISUAL_TRUECOLOR, | ||
| 33 | .accel = FB_ACCEL_NONE, | ||
| 34 | }; | ||
| 35 | |||
| 36 | static struct fb_var_screeninfo simplefb_var = { | ||
| 37 | .height = -1, | ||
| 38 | .width = -1, | ||
| 39 | .activate = FB_ACTIVATE_NOW, | ||
| 40 | .vmode = FB_VMODE_NONINTERLACED, | ||
| 41 | }; | ||
| 42 | |||
| 43 | static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, | ||
| 44 | u_int transp, struct fb_info *info) | ||
| 45 | { | ||
| 46 | u32 *pal = info->pseudo_palette; | ||
| 47 | u32 cr = red >> (16 - info->var.red.length); | ||
| 48 | u32 cg = green >> (16 - info->var.green.length); | ||
| 49 | u32 cb = blue >> (16 - info->var.blue.length); | ||
| 50 | u32 value; | ||
| 51 | |||
| 52 | if (regno >= 16) | ||
| 53 | return -EINVAL; | ||
| 54 | |||
| 55 | value = (cr << info->var.red.offset) | | ||
| 56 | (cg << info->var.green.offset) | | ||
| 57 | (cb << info->var.blue.offset); | ||
| 58 | if (info->var.transp.length > 0) { | ||
| 59 | u32 mask = (1 << info->var.transp.length) - 1; | ||
| 60 | mask <<= info->var.transp.offset; | ||
| 61 | value |= mask; | ||
| 62 | } | ||
| 63 | pal[regno] = value; | ||
| 64 | |||
| 65 | return 0; | ||
| 66 | } | ||
| 67 | |||
| 68 | static struct fb_ops simplefb_ops = { | ||
| 69 | .owner = THIS_MODULE, | ||
| 70 | .fb_setcolreg = simplefb_setcolreg, | ||
| 71 | .fb_fillrect = cfb_fillrect, | ||
| 72 | .fb_copyarea = cfb_copyarea, | ||
| 73 | .fb_imageblit = cfb_imageblit, | ||
| 74 | }; | ||
| 75 | |||
| 76 | struct simplefb_format { | ||
| 77 | const char *name; | ||
| 78 | u32 bits_per_pixel; | ||
| 79 | struct fb_bitfield red; | ||
| 80 | struct fb_bitfield green; | ||
| 81 | struct fb_bitfield blue; | ||
| 82 | struct fb_bitfield transp; | ||
| 83 | }; | ||
| 84 | |||
| 85 | static struct simplefb_format simplefb_formats[] = { | ||
| 86 | { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0} }, | ||
| 87 | }; | ||
| 88 | |||
| 89 | struct simplefb_params { | ||
| 90 | u32 width; | ||
| 91 | u32 height; | ||
| 92 | u32 stride; | ||
| 93 | struct simplefb_format *format; | ||
| 94 | }; | ||
| 95 | |||
| 96 | static int simplefb_parse_dt(struct platform_device *pdev, | ||
| 97 | struct simplefb_params *params) | ||
| 98 | { | ||
| 99 | struct device_node *np = pdev->dev.of_node; | ||
| 100 | int ret; | ||
| 101 | const char *format; | ||
| 102 | int i; | ||
| 103 | |||
| 104 | ret = of_property_read_u32(np, "width", ¶ms->width); | ||
| 105 | if (ret) { | ||
| 106 | dev_err(&pdev->dev, "Can't parse width property\n"); | ||
| 107 | return ret; | ||
| 108 | } | ||
| 109 | |||
| 110 | ret = of_property_read_u32(np, "height", ¶ms->height); | ||
| 111 | if (ret) { | ||
| 112 | dev_err(&pdev->dev, "Can't parse height property\n"); | ||
| 113 | return ret; | ||
| 114 | } | ||
| 115 | |||
| 116 | ret = of_property_read_u32(np, "stride", ¶ms->stride); | ||
| 117 | if (ret) { | ||
| 118 | dev_err(&pdev->dev, "Can't parse stride property\n"); | ||
| 119 | return ret; | ||
| 120 | } | ||
| 121 | |||
| 122 | ret = of_property_read_string(np, "format", &format); | ||
| 123 | if (ret) { | ||
| 124 | dev_err(&pdev->dev, "Can't parse format property\n"); | ||
| 125 | return ret; | ||
| 126 | } | ||
| 127 | params->format = NULL; | ||
| 128 | for (i = 0; i < ARRAY_SIZE(simplefb_formats); i++) { | ||
| 129 | if (strcmp(format, simplefb_formats[i].name)) | ||
| 130 | continue; | ||
| 131 | params->format = &simplefb_formats[i]; | ||
| 132 | break; | ||
| 133 | } | ||
| 134 | if (!params->format) { | ||
| 135 | dev_err(&pdev->dev, "Invalid format value\n"); | ||
| 136 | return -EINVAL; | ||
| 137 | } | ||
| 138 | |||
| 139 | return 0; | ||
| 140 | } | ||
| 141 | |||
| 142 | static int simplefb_probe(struct platform_device *pdev) | ||
| 143 | { | ||
| 144 | int ret; | ||
| 145 | struct simplefb_params params; | ||
| 146 | struct fb_info *info; | ||
| 147 | struct resource *mem; | ||
| 148 | |||
| 149 | if (fb_get_options("simplefb", NULL)) | ||
| 150 | return -ENODEV; | ||
| 151 | |||
| 152 | ret = simplefb_parse_dt(pdev, ¶ms); | ||
| 153 | if (ret) | ||
| 154 | return ret; | ||
| 155 | |||
| 156 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 157 | if (!mem) { | ||
| 158 | dev_err(&pdev->dev, "No memory resource\n"); | ||
| 159 | return -EINVAL; | ||
| 160 | } | ||
| 161 | |||
| 162 | info = framebuffer_alloc(sizeof(u32) * 16, &pdev->dev); | ||
| 163 | if (!info) | ||
| 164 | return -ENOMEM; | ||
| 165 | platform_set_drvdata(pdev, info); | ||
| 166 | |||
| 167 | info->fix = simplefb_fix; | ||
| 168 | info->fix.smem_start = mem->start; | ||
| 169 | info->fix.smem_len = resource_size(mem); | ||
| 170 | info->fix.line_length = params.stride; | ||
| 171 | |||
| 172 | info->var = simplefb_var; | ||
| 173 | info->var.xres = params.width; | ||
| 174 | info->var.yres = params.height; | ||
| 175 | info->var.xres_virtual = params.width; | ||
| 176 | info->var.yres_virtual = params.height; | ||
| 177 | info->var.bits_per_pixel = params.format->bits_per_pixel; | ||
| 178 | info->var.red = params.format->red; | ||
| 179 | info->var.green = params.format->green; | ||
| 180 | info->var.blue = params.format->blue; | ||
| 181 | info->var.transp = params.format->transp; | ||
| 182 | |||
| 183 | info->fbops = &simplefb_ops; | ||
| 184 | info->flags = FBINFO_DEFAULT; | ||
| 185 | info->screen_base = devm_ioremap(&pdev->dev, info->fix.smem_start, | ||
| 186 | info->fix.smem_len); | ||
| 187 | if (!info->screen_base) { | ||
| 188 | framebuffer_release(info); | ||
| 189 | return -ENODEV; | ||
| 190 | } | ||
| 191 | info->pseudo_palette = (void *)(info + 1); | ||
| 192 | |||
| 193 | ret = register_framebuffer(info); | ||
| 194 | if (ret < 0) { | ||
| 195 | dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret); | ||
| 196 | framebuffer_release(info); | ||
| 197 | return ret; | ||
| 198 | } | ||
| 199 | |||
| 200 | dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node); | ||
| 201 | |||
| 202 | return 0; | ||
| 203 | } | ||
| 204 | |||
| 205 | static int simplefb_remove(struct platform_device *pdev) | ||
| 206 | { | ||
| 207 | struct fb_info *info = platform_get_drvdata(pdev); | ||
| 208 | |||
| 209 | unregister_framebuffer(info); | ||
| 210 | framebuffer_release(info); | ||
| 211 | |||
| 212 | return 0; | ||
| 213 | } | ||
| 214 | |||
| 215 | static const struct of_device_id simplefb_of_match[] = { | ||
| 216 | { .compatible = "simple-framebuffer", }, | ||
| 217 | { }, | ||
| 218 | }; | ||
| 219 | MODULE_DEVICE_TABLE(of, simplefb_of_match); | ||
| 220 | |||
| 221 | static struct platform_driver simplefb_driver = { | ||
| 222 | .driver = { | ||
| 223 | .name = "simple-framebuffer", | ||
| 224 | .owner = THIS_MODULE, | ||
| 225 | .of_match_table = simplefb_of_match, | ||
| 226 | }, | ||
| 227 | .probe = simplefb_probe, | ||
| 228 | .remove = simplefb_remove, | ||
| 229 | }; | ||
| 230 | module_platform_driver(simplefb_driver); | ||
| 231 | |||
| 232 | MODULE_AUTHOR("Stephen Warren <swarren@wwwdotorg.org>"); | ||
| 233 | MODULE_DESCRIPTION("Simple framebuffer driver"); | ||
| 234 | MODULE_LICENSE("GPL v2"); | ||
| @@ -307,7 +307,9 @@ static void free_ioctx(struct kioctx *ctx) | |||
| 307 | kunmap_atomic(ring); | 307 | kunmap_atomic(ring); |
| 308 | 308 | ||
| 309 | while (atomic_read(&ctx->reqs_active) > 0) { | 309 | while (atomic_read(&ctx->reqs_active) > 0) { |
| 310 | wait_event(ctx->wait, head != ctx->tail); | 310 | wait_event(ctx->wait, |
| 311 | head != ctx->tail || | ||
| 312 | atomic_read(&ctx->reqs_active) <= 0); | ||
| 311 | 313 | ||
| 312 | avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head; | 314 | avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head; |
| 313 | 315 | ||
| @@ -1299,8 +1301,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, | |||
| 1299 | * < min_nr if the timeout specified by timeout has elapsed | 1301 | * < min_nr if the timeout specified by timeout has elapsed |
| 1300 | * before sufficient events are available, where timeout == NULL | 1302 | * before sufficient events are available, where timeout == NULL |
| 1301 | * specifies an infinite timeout. Note that the timeout pointed to by | 1303 | * specifies an infinite timeout. Note that the timeout pointed to by |
| 1302 | * timeout is relative and will be updated if not NULL and the | 1304 | * timeout is relative. Will fail with -ENOSYS if not implemented. |
| 1303 | * operation blocks. Will fail with -ENOSYS if not implemented. | ||
| 1304 | */ | 1305 | */ |
| 1305 | SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, | 1306 | SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, |
| 1306 | long, min_nr, | 1307 | long, min_nr, |
diff --git a/fs/fat/inode.c b/fs/fat/inode.c index dfce656ddb33..5d4513cb1b3c 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c | |||
| @@ -1229,6 +1229,19 @@ static int fat_read_root(struct inode *inode) | |||
| 1229 | return 0; | 1229 | return 0; |
| 1230 | } | 1230 | } |
| 1231 | 1231 | ||
| 1232 | static unsigned long calc_fat_clusters(struct super_block *sb) | ||
| 1233 | { | ||
| 1234 | struct msdos_sb_info *sbi = MSDOS_SB(sb); | ||
| 1235 | |||
| 1236 | /* Divide first to avoid overflow */ | ||
| 1237 | if (sbi->fat_bits != 12) { | ||
| 1238 | unsigned long ent_per_sec = sb->s_blocksize * 8 / sbi->fat_bits; | ||
| 1239 | return ent_per_sec * sbi->fat_length; | ||
| 1240 | } | ||
| 1241 | |||
| 1242 | return sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits; | ||
| 1243 | } | ||
| 1244 | |||
| 1232 | /* | 1245 | /* |
| 1233 | * Read the super block of an MS-DOS FS. | 1246 | * Read the super block of an MS-DOS FS. |
| 1234 | */ | 1247 | */ |
| @@ -1434,7 +1447,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, | |||
| 1434 | sbi->dirty = b->fat16.state & FAT_STATE_DIRTY; | 1447 | sbi->dirty = b->fat16.state & FAT_STATE_DIRTY; |
| 1435 | 1448 | ||
| 1436 | /* check that FAT table does not overflow */ | 1449 | /* check that FAT table does not overflow */ |
| 1437 | fat_clusters = sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits; | 1450 | fat_clusters = calc_fat_clusters(sb); |
| 1438 | total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT); | 1451 | total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT); |
| 1439 | if (total_clusters > MAX_FAT(sb)) { | 1452 | if (total_clusters > MAX_FAT(sb)) { |
| 1440 | if (!silent) | 1453 | if (!silent) |
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c index f3b1a15ccd59..d3fa6bd9503e 100644 --- a/fs/hfs/bnode.c +++ b/fs/hfs/bnode.c | |||
| @@ -415,7 +415,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) | |||
| 415 | spin_lock(&tree->hash_lock); | 415 | spin_lock(&tree->hash_lock); |
| 416 | node = hfs_bnode_findhash(tree, num); | 416 | node = hfs_bnode_findhash(tree, num); |
| 417 | spin_unlock(&tree->hash_lock); | 417 | spin_unlock(&tree->hash_lock); |
| 418 | BUG_ON(node); | 418 | if (node) { |
| 419 | pr_crit("new node %u already hashed?\n", num); | ||
| 420 | WARN_ON(1); | ||
| 421 | return node; | ||
| 422 | } | ||
| 419 | node = __hfs_bnode_create(tree, num); | 423 | node = __hfs_bnode_create(tree, num); |
| 420 | if (!node) | 424 | if (!node) |
| 421 | return ERR_PTR(-ENOMEM); | 425 | return ERR_PTR(-ENOMEM); |
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 689fb608648e..bccfec8343c5 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c | |||
| @@ -219,13 +219,32 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc) | |||
| 219 | 219 | ||
| 220 | static int nilfs_set_page_dirty(struct page *page) | 220 | static int nilfs_set_page_dirty(struct page *page) |
| 221 | { | 221 | { |
| 222 | int ret = __set_page_dirty_buffers(page); | 222 | int ret = __set_page_dirty_nobuffers(page); |
| 223 | 223 | ||
| 224 | if (ret) { | 224 | if (page_has_buffers(page)) { |
| 225 | struct inode *inode = page->mapping->host; | 225 | struct inode *inode = page->mapping->host; |
| 226 | unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); | 226 | unsigned nr_dirty = 0; |
| 227 | struct buffer_head *bh, *head; | ||
| 227 | 228 | ||
| 228 | nilfs_set_file_dirty(inode, nr_dirty); | 229 | /* |
| 230 | * This page is locked by callers, and no other thread | ||
| 231 | * concurrently marks its buffers dirty since they are | ||
| 232 | * only dirtied through routines in fs/buffer.c in | ||
| 233 | * which call sites of mark_buffer_dirty are protected | ||
| 234 | * by page lock. | ||
| 235 | */ | ||
| 236 | bh = head = page_buffers(page); | ||
| 237 | do { | ||
| 238 | /* Do not mark hole blocks dirty */ | ||
| 239 | if (buffer_dirty(bh) || !buffer_mapped(bh)) | ||
| 240 | continue; | ||
| 241 | |||
| 242 | set_buffer_dirty(bh); | ||
| 243 | nr_dirty++; | ||
| 244 | } while (bh = bh->b_this_page, bh != head); | ||
| 245 | |||
| 246 | if (nr_dirty) | ||
| 247 | nilfs_set_file_dirty(inode, nr_dirty); | ||
| 229 | } | 248 | } |
| 230 | return ret; | 249 | return ret; |
| 231 | } | 250 | } |
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index 1c39efb71bab..2487116d0d33 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c | |||
| @@ -790,7 +790,7 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
| 790 | &hole_size, &rec, &is_last); | 790 | &hole_size, &rec, &is_last); |
| 791 | if (ret) { | 791 | if (ret) { |
| 792 | mlog_errno(ret); | 792 | mlog_errno(ret); |
| 793 | goto out; | 793 | goto out_unlock; |
| 794 | } | 794 | } |
| 795 | 795 | ||
| 796 | if (rec.e_blkno == 0ULL) { | 796 | if (rec.e_blkno == 0ULL) { |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 8a7509f9e6f5..ff54014a24ec 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
| @@ -2288,7 +2288,7 @@ relock: | |||
| 2288 | ret = ocfs2_inode_lock(inode, NULL, 1); | 2288 | ret = ocfs2_inode_lock(inode, NULL, 1); |
| 2289 | if (ret < 0) { | 2289 | if (ret < 0) { |
| 2290 | mlog_errno(ret); | 2290 | mlog_errno(ret); |
| 2291 | goto out_sems; | 2291 | goto out; |
| 2292 | } | 2292 | } |
| 2293 | 2293 | ||
| 2294 | ocfs2_inode_unlock(inode, 1); | 2294 | ocfs2_inode_unlock(inode, 1); |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index e96329ceb28c..e9ef6d6b51d5 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -562,6 +562,9 @@ int __trace_bprintk(unsigned long ip, const char *fmt, ...); | |||
| 562 | extern __printf(2, 3) | 562 | extern __printf(2, 3) |
| 563 | int __trace_printk(unsigned long ip, const char *fmt, ...); | 563 | int __trace_printk(unsigned long ip, const char *fmt, ...); |
| 564 | 564 | ||
| 565 | extern int __trace_bputs(unsigned long ip, const char *str); | ||
| 566 | extern int __trace_puts(unsigned long ip, const char *str, int size); | ||
| 567 | |||
| 565 | /** | 568 | /** |
| 566 | * trace_puts - write a string into the ftrace buffer | 569 | * trace_puts - write a string into the ftrace buffer |
| 567 | * @str: the string to record | 570 | * @str: the string to record |
| @@ -587,8 +590,6 @@ int __trace_printk(unsigned long ip, const char *fmt, ...); | |||
| 587 | * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) | 590 | * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) |
| 588 | */ | 591 | */ |
| 589 | 592 | ||
| 590 | extern int __trace_bputs(unsigned long ip, const char *str); | ||
| 591 | extern int __trace_puts(unsigned long ip, const char *str, int size); | ||
| 592 | #define trace_puts(str) ({ \ | 593 | #define trace_puts(str) ({ \ |
| 593 | static const char *trace_printk_fmt \ | 594 | static const char *trace_printk_fmt \ |
| 594 | __attribute__((section("__trace_printk_fmt"))) = \ | 595 | __attribute__((section("__trace_printk_fmt"))) = \ |
diff --git a/include/linux/rio.h b/include/linux/rio.h index a3e784278667..18e099342e6f 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h | |||
| @@ -83,7 +83,6 @@ | |||
| 83 | 83 | ||
| 84 | extern struct bus_type rio_bus_type; | 84 | extern struct bus_type rio_bus_type; |
| 85 | extern struct device rio_bus; | 85 | extern struct device rio_bus; |
| 86 | extern struct list_head rio_devices; /* list of all devices */ | ||
| 87 | 86 | ||
| 88 | struct rio_mport; | 87 | struct rio_mport; |
| 89 | struct rio_dev; | 88 | struct rio_dev; |
| @@ -237,6 +236,7 @@ enum rio_phy_type { | |||
| 237 | * @name: Port name string | 236 | * @name: Port name string |
| 238 | * @priv: Master port private data | 237 | * @priv: Master port private data |
| 239 | * @dma: DMA device associated with mport | 238 | * @dma: DMA device associated with mport |
| 239 | * @nscan: RapidIO network enumeration/discovery operations | ||
| 240 | */ | 240 | */ |
| 241 | struct rio_mport { | 241 | struct rio_mport { |
| 242 | struct list_head dbells; /* list of doorbell events */ | 242 | struct list_head dbells; /* list of doorbell events */ |
| @@ -262,8 +262,14 @@ struct rio_mport { | |||
| 262 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | 262 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE |
| 263 | struct dma_device dma; | 263 | struct dma_device dma; |
| 264 | #endif | 264 | #endif |
| 265 | struct rio_scan *nscan; | ||
| 265 | }; | 266 | }; |
| 266 | 267 | ||
| 268 | /* | ||
| 269 | * Enumeration/discovery control flags | ||
| 270 | */ | ||
| 271 | #define RIO_SCAN_ENUM_NO_WAIT 0x00000001 /* Do not wait for enum completed */ | ||
| 272 | |||
| 267 | struct rio_id_table { | 273 | struct rio_id_table { |
| 268 | u16 start; /* logical minimal id */ | 274 | u16 start; /* logical minimal id */ |
| 269 | u32 max; /* max number of IDs in table */ | 275 | u32 max; /* max number of IDs in table */ |
| @@ -460,6 +466,16 @@ static inline struct rio_mport *dma_to_mport(struct dma_device *ddev) | |||
| 460 | } | 466 | } |
| 461 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ | 467 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ |
| 462 | 468 | ||
| 469 | /** | ||
| 470 | * struct rio_scan - RIO enumeration and discovery operations | ||
| 471 | * @enumerate: Callback to perform RapidIO fabric enumeration. | ||
| 472 | * @discover: Callback to perform RapidIO fabric discovery. | ||
| 473 | */ | ||
| 474 | struct rio_scan { | ||
| 475 | int (*enumerate)(struct rio_mport *mport, u32 flags); | ||
| 476 | int (*discover)(struct rio_mport *mport, u32 flags); | ||
| 477 | }; | ||
| 478 | |||
| 463 | /* Architecture and hardware-specific functions */ | 479 | /* Architecture and hardware-specific functions */ |
| 464 | extern int rio_register_mport(struct rio_mport *); | 480 | extern int rio_register_mport(struct rio_mport *); |
| 465 | extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int); | 481 | extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int); |
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h index b75c05920ab5..5059994fe297 100644 --- a/include/linux/rio_drv.h +++ b/include/linux/rio_drv.h | |||
| @@ -433,5 +433,6 @@ extern u16 rio_local_get_device_id(struct rio_mport *port); | |||
| 433 | extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from); | 433 | extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from); |
| 434 | extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did, | 434 | extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did, |
| 435 | struct rio_dev *from); | 435 | struct rio_dev *from); |
| 436 | extern int rio_init_mports(void); | ||
| 436 | 437 | ||
| 437 | #endif /* LINUX_RIO_DRV_H */ | 438 | #endif /* LINUX_RIO_DRV_H */ |
diff --git a/include/linux/wait.h b/include/linux/wait.h index ac38be2692d8..1133695eb067 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
| @@ -217,6 +217,8 @@ do { \ | |||
| 217 | if (!ret) \ | 217 | if (!ret) \ |
| 218 | break; \ | 218 | break; \ |
| 219 | } \ | 219 | } \ |
| 220 | if (!ret && (condition)) \ | ||
| 221 | ret = 1; \ | ||
| 220 | finish_wait(&wq, &__wait); \ | 222 | finish_wait(&wq, &__wait); \ |
| 221 | } while (0) | 223 | } while (0) |
| 222 | 224 | ||
| @@ -233,8 +235,9 @@ do { \ | |||
| 233 | * wake_up() has to be called after changing any variable that could | 235 | * wake_up() has to be called after changing any variable that could |
| 234 | * change the result of the wait condition. | 236 | * change the result of the wait condition. |
| 235 | * | 237 | * |
| 236 | * The function returns 0 if the @timeout elapsed, and the remaining | 238 | * The function returns 0 if the @timeout elapsed, or the remaining |
| 237 | * jiffies if the condition evaluated to true before the timeout elapsed. | 239 | * jiffies (at least 1) if the @condition evaluated to %true before |
| 240 | * the @timeout elapsed. | ||
| 238 | */ | 241 | */ |
| 239 | #define wait_event_timeout(wq, condition, timeout) \ | 242 | #define wait_event_timeout(wq, condition, timeout) \ |
| 240 | ({ \ | 243 | ({ \ |
| @@ -302,6 +305,8 @@ do { \ | |||
| 302 | ret = -ERESTARTSYS; \ | 305 | ret = -ERESTARTSYS; \ |
| 303 | break; \ | 306 | break; \ |
| 304 | } \ | 307 | } \ |
| 308 | if (!ret && (condition)) \ | ||
| 309 | ret = 1; \ | ||
| 305 | finish_wait(&wq, &__wait); \ | 310 | finish_wait(&wq, &__wait); \ |
| 306 | } while (0) | 311 | } while (0) |
| 307 | 312 | ||
| @@ -318,9 +323,10 @@ do { \ | |||
| 318 | * wake_up() has to be called after changing any variable that could | 323 | * wake_up() has to be called after changing any variable that could |
| 319 | * change the result of the wait condition. | 324 | * change the result of the wait condition. |
| 320 | * | 325 | * |
| 321 | * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it | 326 | * Returns: |
| 322 | * was interrupted by a signal, and the remaining jiffies otherwise | 327 | * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by |
| 323 | * if the condition evaluated to true before the timeout elapsed. | 328 | * a signal, or the remaining jiffies (at least 1) if the @condition |
| 329 | * evaluated to %true before the @timeout elapsed. | ||
| 324 | */ | 330 | */ |
| 325 | #define wait_event_interruptible_timeout(wq, condition, timeout) \ | 331 | #define wait_event_interruptible_timeout(wq, condition, timeout) \ |
| 326 | ({ \ | 332 | ({ \ |
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 83a2970295d1..6bd4a90d1991 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
| @@ -1021,9 +1021,6 @@ static void audit_log_rule_change(char *action, struct audit_krule *rule, int re | |||
| 1021 | * @seq: netlink audit message sequence (serial) number | 1021 | * @seq: netlink audit message sequence (serial) number |
| 1022 | * @data: payload data | 1022 | * @data: payload data |
| 1023 | * @datasz: size of payload data | 1023 | * @datasz: size of payload data |
| 1024 | * @loginuid: loginuid of sender | ||
| 1025 | * @sessionid: sessionid for netlink audit message | ||
| 1026 | * @sid: SE Linux Security ID of sender | ||
| 1027 | */ | 1024 | */ |
| 1028 | int audit_receive_filter(int type, int pid, int seq, void *data, size_t datasz) | 1025 | int audit_receive_filter(int type, int pid, int seq, void *data, size_t datasz) |
| 1029 | { | 1026 | { |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 03a89a2f464b..362c329b83fe 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -2325,7 +2325,12 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
| 2325 | pte_unmap(pte); | 2325 | pte_unmap(pte); |
| 2326 | spin_lock(&mm->page_table_lock); | 2326 | spin_lock(&mm->page_table_lock); |
| 2327 | BUG_ON(!pmd_none(*pmd)); | 2327 | BUG_ON(!pmd_none(*pmd)); |
| 2328 | set_pmd_at(mm, address, pmd, _pmd); | 2328 | /* |
| 2329 | * We can only use set_pmd_at when establishing | ||
| 2330 | * hugepmds and never for establishing regular pmds that | ||
| 2331 | * points to regular pagetables. Use pmd_populate for that | ||
| 2332 | */ | ||
| 2333 | pmd_populate(mm, pmd, pmd_pgtable(_pmd)); | ||
| 2329 | spin_unlock(&mm->page_table_lock); | 2334 | spin_unlock(&mm->page_table_lock); |
| 2330 | anon_vma_unlock_write(vma->anon_vma); | 2335 | anon_vma_unlock_write(vma->anon_vma); |
| 2331 | goto out; | 2336 | goto out; |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index cb1c9dedf9b6..010d6c14129a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -4108,8 +4108,6 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype, | |||
| 4108 | if (mem_cgroup_disabled()) | 4108 | if (mem_cgroup_disabled()) |
| 4109 | return NULL; | 4109 | return NULL; |
| 4110 | 4110 | ||
| 4111 | VM_BUG_ON(PageSwapCache(page)); | ||
| 4112 | |||
| 4113 | if (PageTransHuge(page)) { | 4111 | if (PageTransHuge(page)) { |
| 4114 | nr_pages <<= compound_order(page); | 4112 | nr_pages <<= compound_order(page); |
| 4115 | VM_BUG_ON(!PageTransHuge(page)); | 4113 | VM_BUG_ON(!PageTransHuge(page)); |
| @@ -4205,6 +4203,18 @@ void mem_cgroup_uncharge_page(struct page *page) | |||
| 4205 | if (page_mapped(page)) | 4203 | if (page_mapped(page)) |
| 4206 | return; | 4204 | return; |
| 4207 | VM_BUG_ON(page->mapping && !PageAnon(page)); | 4205 | VM_BUG_ON(page->mapping && !PageAnon(page)); |
| 4206 | /* | ||
| 4207 | * If the page is in swap cache, uncharge should be deferred | ||
| 4208 | * to the swap path, which also properly accounts swap usage | ||
| 4209 | * and handles memcg lifetime. | ||
| 4210 | * | ||
| 4211 | * Note that this check is not stable and reclaim may add the | ||
| 4212 | * page to swap cache at any time after this. However, if the | ||
| 4213 | * page is not in swap cache by the time page->mapcount hits | ||
| 4214 | * 0, there won't be any page table references to the swap | ||
| 4215 | * slot, and reclaim will free it and not actually write the | ||
| 4216 | * page to disk. | ||
| 4217 | */ | ||
| 4208 | if (PageSwapCache(page)) | 4218 | if (PageSwapCache(page)) |
| 4209 | return; | 4219 | return; |
| 4210 | __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false); | 4220 | __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false); |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a221fac1f47d..1ad92b46753e 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
| @@ -720,9 +720,12 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, | |||
| 720 | start = phys_start_pfn << PAGE_SHIFT; | 720 | start = phys_start_pfn << PAGE_SHIFT; |
| 721 | size = nr_pages * PAGE_SIZE; | 721 | size = nr_pages * PAGE_SIZE; |
| 722 | ret = release_mem_region_adjustable(&iomem_resource, start, size); | 722 | ret = release_mem_region_adjustable(&iomem_resource, start, size); |
| 723 | if (ret) | 723 | if (ret) { |
| 724 | pr_warn("Unable to release resource <%016llx-%016llx> (%d)\n", | 724 | resource_size_t endres = start + size - 1; |
| 725 | start, start + size - 1, ret); | 725 | |
| 726 | pr_warn("Unable to release resource <%pa-%pa> (%d)\n", | ||
| 727 | &start, &endres, ret); | ||
| 728 | } | ||
| 726 | 729 | ||
| 727 | sections_to_remove = nr_pages / PAGES_PER_SECTION; | 730 | sections_to_remove = nr_pages / PAGES_PER_SECTION; |
| 728 | for (i = 0; i < sections_to_remove; i++) { | 731 | for (i = 0; i < sections_to_remove; i++) { |
diff --git a/mm/migrate.c b/mm/migrate.c index 27ed22579fd9..b1f57501de9c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
| @@ -165,7 +165,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, | |||
| 165 | pte = arch_make_huge_pte(pte, vma, new, 0); | 165 | pte = arch_make_huge_pte(pte, vma, new, 0); |
| 166 | } | 166 | } |
| 167 | #endif | 167 | #endif |
| 168 | flush_cache_page(vma, addr, pte_pfn(pte)); | 168 | flush_dcache_page(new); |
| 169 | set_pte_at(mm, addr, ptep, pte); | 169 | set_pte_at(mm, addr, ptep, pte); |
| 170 | 170 | ||
| 171 | if (PageHuge(new)) { | 171 | if (PageHuge(new)) { |
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index be04122fb277..6725ff183374 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c | |||
| @@ -40,48 +40,44 @@ void __mmu_notifier_release(struct mm_struct *mm) | |||
| 40 | int id; | 40 | int id; |
| 41 | 41 | ||
| 42 | /* | 42 | /* |
| 43 | * srcu_read_lock() here will block synchronize_srcu() in | 43 | * SRCU here will block mmu_notifier_unregister until |
| 44 | * mmu_notifier_unregister() until all registered | 44 | * ->release returns. |
| 45 | * ->release() callouts this function makes have | ||
| 46 | * returned. | ||
| 47 | */ | 45 | */ |
| 48 | id = srcu_read_lock(&srcu); | 46 | id = srcu_read_lock(&srcu); |
| 47 | hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) | ||
| 48 | /* | ||
| 49 | * If ->release runs before mmu_notifier_unregister it must be | ||
| 50 | * handled, as it's the only way for the driver to flush all | ||
| 51 | * existing sptes and stop the driver from establishing any more | ||
| 52 | * sptes before all the pages in the mm are freed. | ||
| 53 | */ | ||
| 54 | if (mn->ops->release) | ||
| 55 | mn->ops->release(mn, mm); | ||
| 56 | srcu_read_unlock(&srcu, id); | ||
| 57 | |||
| 49 | spin_lock(&mm->mmu_notifier_mm->lock); | 58 | spin_lock(&mm->mmu_notifier_mm->lock); |
| 50 | while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { | 59 | while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { |
| 51 | mn = hlist_entry(mm->mmu_notifier_mm->list.first, | 60 | mn = hlist_entry(mm->mmu_notifier_mm->list.first, |
| 52 | struct mmu_notifier, | 61 | struct mmu_notifier, |
| 53 | hlist); | 62 | hlist); |
| 54 | |||
| 55 | /* | 63 | /* |
| 56 | * Unlink. This will prevent mmu_notifier_unregister() | 64 | * We arrived before mmu_notifier_unregister so |
| 57 | * from also making the ->release() callout. | 65 | * mmu_notifier_unregister will do nothing other than to wait |
| 66 | * for ->release to finish and for mmu_notifier_unregister to | ||
| 67 | * return. | ||
| 58 | */ | 68 | */ |
| 59 | hlist_del_init_rcu(&mn->hlist); | 69 | hlist_del_init_rcu(&mn->hlist); |
| 60 | spin_unlock(&mm->mmu_notifier_mm->lock); | ||
| 61 | |||
| 62 | /* | ||
| 63 | * Clear sptes. (see 'release' description in mmu_notifier.h) | ||
| 64 | */ | ||
| 65 | if (mn->ops->release) | ||
| 66 | mn->ops->release(mn, mm); | ||
| 67 | |||
| 68 | spin_lock(&mm->mmu_notifier_mm->lock); | ||
| 69 | } | 70 | } |
| 70 | spin_unlock(&mm->mmu_notifier_mm->lock); | 71 | spin_unlock(&mm->mmu_notifier_mm->lock); |
| 71 | 72 | ||
| 72 | /* | 73 | /* |
| 73 | * All callouts to ->release() which we have done are complete. | 74 | * synchronize_srcu here prevents mmu_notifier_release from returning to |
| 74 | * Allow synchronize_srcu() in mmu_notifier_unregister() to complete | 75 | * exit_mmap (which would proceed with freeing all pages in the mm) |
| 75 | */ | 76 | * until the ->release method returns, if it was invoked by |
| 76 | srcu_read_unlock(&srcu, id); | 77 | * mmu_notifier_unregister. |
| 77 | 78 | * | |
| 78 | /* | 79 | * The mmu_notifier_mm can't go away from under us because one mm_count |
| 79 | * mmu_notifier_unregister() may have unlinked a notifier and may | 80 | * is held by exit_mmap. |
| 80 | * still be calling out to it. Additionally, other notifiers | ||
| 81 | * may have been active via vmtruncate() et. al. Block here | ||
| 82 | * to ensure that all notifier callouts for this mm have been | ||
| 83 | * completed and the sptes are really cleaned up before returning | ||
| 84 | * to exit_mmap(). | ||
| 85 | */ | 81 | */ |
| 86 | synchronize_srcu(&srcu); | 82 | synchronize_srcu(&srcu); |
| 87 | } | 83 | } |
| @@ -292,31 +288,34 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) | |||
| 292 | { | 288 | { |
| 293 | BUG_ON(atomic_read(&mm->mm_count) <= 0); | 289 | BUG_ON(atomic_read(&mm->mm_count) <= 0); |
| 294 | 290 | ||
| 295 | spin_lock(&mm->mmu_notifier_mm->lock); | ||
| 296 | if (!hlist_unhashed(&mn->hlist)) { | 291 | if (!hlist_unhashed(&mn->hlist)) { |
| 292 | /* | ||
| 293 | * SRCU here will force exit_mmap to wait for ->release to | ||
| 294 | * finish before freeing the pages. | ||
| 295 | */ | ||
| 297 | int id; | 296 | int id; |
| 298 | 297 | ||
| 298 | id = srcu_read_lock(&srcu); | ||
| 299 | /* | 299 | /* |
| 300 | * Ensure we synchronize up with __mmu_notifier_release(). | 300 | * exit_mmap will block in mmu_notifier_release to guarantee |
| 301 | * that ->release is called before freeing the pages. | ||
| 301 | */ | 302 | */ |
| 302 | id = srcu_read_lock(&srcu); | ||
| 303 | |||
| 304 | hlist_del_rcu(&mn->hlist); | ||
| 305 | spin_unlock(&mm->mmu_notifier_mm->lock); | ||
| 306 | |||
| 307 | if (mn->ops->release) | 303 | if (mn->ops->release) |
| 308 | mn->ops->release(mn, mm); | 304 | mn->ops->release(mn, mm); |
| 305 | srcu_read_unlock(&srcu, id); | ||
| 309 | 306 | ||
| 307 | spin_lock(&mm->mmu_notifier_mm->lock); | ||
| 310 | /* | 308 | /* |
| 311 | * Allow __mmu_notifier_release() to complete. | 309 | * Can not use list_del_rcu() since __mmu_notifier_release |
| 310 | * can delete it before we hold the lock. | ||
| 312 | */ | 311 | */ |
| 313 | srcu_read_unlock(&srcu, id); | 312 | hlist_del_init_rcu(&mn->hlist); |
| 314 | } else | ||
| 315 | spin_unlock(&mm->mmu_notifier_mm->lock); | 313 | spin_unlock(&mm->mmu_notifier_mm->lock); |
| 314 | } | ||
| 316 | 315 | ||
| 317 | /* | 316 | /* |
| 318 | * Wait for any running method to finish, including ->release() if it | 317 | * Wait for any running method to finish, of course including |
| 319 | * was run by __mmu_notifier_release() instead of us. | 318 | * ->release if it was run by mmu_notifier_relase instead of us. |
| 320 | */ | 319 | */ |
| 321 | synchronize_srcu(&srcu); | 320 | synchronize_srcu(&srcu); |
| 322 | 321 | ||
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 35aa294656cd..5da2cbcfdbb5 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
| @@ -127,28 +127,7 @@ static int walk_hugetlb_range(struct vm_area_struct *vma, | |||
| 127 | return 0; | 127 | return 0; |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk) | ||
| 131 | { | ||
| 132 | struct vm_area_struct *vma; | ||
| 133 | |||
| 134 | /* We don't need vma lookup at all. */ | ||
| 135 | if (!walk->hugetlb_entry) | ||
| 136 | return NULL; | ||
| 137 | |||
| 138 | VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem)); | ||
| 139 | vma = find_vma(walk->mm, addr); | ||
| 140 | if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma)) | ||
| 141 | return vma; | ||
| 142 | |||
| 143 | return NULL; | ||
| 144 | } | ||
| 145 | |||
| 146 | #else /* CONFIG_HUGETLB_PAGE */ | 130 | #else /* CONFIG_HUGETLB_PAGE */ |
| 147 | static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk) | ||
| 148 | { | ||
| 149 | return NULL; | ||
| 150 | } | ||
| 151 | |||
| 152 | static int walk_hugetlb_range(struct vm_area_struct *vma, | 131 | static int walk_hugetlb_range(struct vm_area_struct *vma, |
| 153 | unsigned long addr, unsigned long end, | 132 | unsigned long addr, unsigned long end, |
| 154 | struct mm_walk *walk) | 133 | struct mm_walk *walk) |
| @@ -198,30 +177,53 @@ int walk_page_range(unsigned long addr, unsigned long end, | |||
| 198 | if (!walk->mm) | 177 | if (!walk->mm) |
| 199 | return -EINVAL; | 178 | return -EINVAL; |
| 200 | 179 | ||
| 180 | VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem)); | ||
| 181 | |||
| 201 | pgd = pgd_offset(walk->mm, addr); | 182 | pgd = pgd_offset(walk->mm, addr); |
| 202 | do { | 183 | do { |
| 203 | struct vm_area_struct *vma; | 184 | struct vm_area_struct *vma = NULL; |
| 204 | 185 | ||
| 205 | next = pgd_addr_end(addr, end); | 186 | next = pgd_addr_end(addr, end); |
| 206 | 187 | ||
| 207 | /* | 188 | /* |
| 208 | * handle hugetlb vma individually because pagetable walk for | 189 | * This function was not intended to be vma based. |
| 209 | * the hugetlb page is dependent on the architecture and | 190 | * But there are vma special cases to be handled: |
| 210 | * we can't handled it in the same manner as non-huge pages. | 191 | * - hugetlb vma's |
| 192 | * - VM_PFNMAP vma's | ||
| 211 | */ | 193 | */ |
| 212 | vma = hugetlb_vma(addr, walk); | 194 | vma = find_vma(walk->mm, addr); |
| 213 | if (vma) { | 195 | if (vma) { |
| 214 | if (vma->vm_end < next) | 196 | /* |
| 197 | * There are no page structures backing a VM_PFNMAP | ||
| 198 | * range, so do not allow split_huge_page_pmd(). | ||
| 199 | */ | ||
| 200 | if ((vma->vm_start <= addr) && | ||
| 201 | (vma->vm_flags & VM_PFNMAP)) { | ||
| 215 | next = vma->vm_end; | 202 | next = vma->vm_end; |
| 203 | pgd = pgd_offset(walk->mm, next); | ||
| 204 | continue; | ||
| 205 | } | ||
| 216 | /* | 206 | /* |
| 217 | * Hugepage is very tightly coupled with vma, so | 207 | * Handle hugetlb vma individually because pagetable |
| 218 | * walk through hugetlb entries within a given vma. | 208 | * walk for the hugetlb page is dependent on the |
| 209 | * architecture and we can't handled it in the same | ||
| 210 | * manner as non-huge pages. | ||
| 219 | */ | 211 | */ |
| 220 | err = walk_hugetlb_range(vma, addr, next, walk); | 212 | if (walk->hugetlb_entry && (vma->vm_start <= addr) && |
| 221 | if (err) | 213 | is_vm_hugetlb_page(vma)) { |
| 222 | break; | 214 | if (vma->vm_end < next) |
| 223 | pgd = pgd_offset(walk->mm, next); | 215 | next = vma->vm_end; |
| 224 | continue; | 216 | /* |
| 217 | * Hugepage is very tightly coupled with vma, | ||
| 218 | * so walk through hugetlb entries within a | ||
| 219 | * given vma. | ||
| 220 | */ | ||
| 221 | err = walk_hugetlb_range(vma, addr, next, walk); | ||
| 222 | if (err) | ||
| 223 | break; | ||
| 224 | pgd = pgd_offset(walk->mm, next); | ||
| 225 | continue; | ||
| 226 | } | ||
| 225 | } | 227 | } |
| 226 | 228 | ||
| 227 | if (pgd_none_or_clear_bad(pgd)) { | 229 | if (pgd_none_or_clear_bad(pgd)) { |
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index d4abc59ce1d9..0a63658065f0 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile | |||
| @@ -6,7 +6,6 @@ TARGETS += memory-hotplug | |||
| 6 | TARGETS += mqueue | 6 | TARGETS += mqueue |
| 7 | TARGETS += net | 7 | TARGETS += net |
| 8 | TARGETS += ptrace | 8 | TARGETS += ptrace |
| 9 | TARGETS += soft-dirty | ||
| 10 | TARGETS += vm | 9 | TARGETS += vm |
| 11 | 10 | ||
| 12 | all: | 11 | all: |
diff --git a/tools/testing/selftests/soft-dirty/Makefile b/tools/testing/selftests/soft-dirty/Makefile deleted file mode 100644 index a9cdc823d6e0..000000000000 --- a/tools/testing/selftests/soft-dirty/Makefile +++ /dev/null | |||
| @@ -1,10 +0,0 @@ | |||
| 1 | CFLAGS += -iquote../../../../include/uapi -Wall | ||
| 2 | soft-dirty: soft-dirty.c | ||
| 3 | |||
| 4 | all: soft-dirty | ||
| 5 | |||
| 6 | clean: | ||
| 7 | rm -f soft-dirty | ||
| 8 | |||
| 9 | run_tests: all | ||
| 10 | @./soft-dirty || echo "soft-dirty selftests: [FAIL]" | ||
diff --git a/tools/testing/selftests/soft-dirty/soft-dirty.c b/tools/testing/selftests/soft-dirty/soft-dirty.c deleted file mode 100644 index aba4f87f87f0..000000000000 --- a/tools/testing/selftests/soft-dirty/soft-dirty.c +++ /dev/null | |||
| @@ -1,114 +0,0 @@ | |||
| 1 | #include <stdlib.h> | ||
| 2 | #include <stdio.h> | ||
| 3 | #include <sys/mman.h> | ||
| 4 | #include <unistd.h> | ||
| 5 | #include <fcntl.h> | ||
| 6 | #include <sys/types.h> | ||
| 7 | |||
| 8 | typedef unsigned long long u64; | ||
| 9 | |||
| 10 | #define PME_PRESENT (1ULL << 63) | ||
| 11 | #define PME_SOFT_DIRTY (1Ull << 55) | ||
| 12 | |||
| 13 | #define PAGES_TO_TEST 3 | ||
| 14 | #ifndef PAGE_SIZE | ||
| 15 | #define PAGE_SIZE 4096 | ||
| 16 | #endif | ||
| 17 | |||
| 18 | static void get_pagemap2(char *mem, u64 *map) | ||
| 19 | { | ||
| 20 | int fd; | ||
| 21 | |||
| 22 | fd = open("/proc/self/pagemap2", O_RDONLY); | ||
| 23 | if (fd < 0) { | ||
| 24 | perror("Can't open pagemap2"); | ||
| 25 | exit(1); | ||
| 26 | } | ||
| 27 | |||
| 28 | lseek(fd, (unsigned long)mem / PAGE_SIZE * sizeof(u64), SEEK_SET); | ||
| 29 | read(fd, map, sizeof(u64) * PAGES_TO_TEST); | ||
| 30 | close(fd); | ||
| 31 | } | ||
| 32 | |||
| 33 | static inline char map_p(u64 map) | ||
| 34 | { | ||
| 35 | return map & PME_PRESENT ? 'p' : '-'; | ||
| 36 | } | ||
| 37 | |||
| 38 | static inline char map_sd(u64 map) | ||
| 39 | { | ||
| 40 | return map & PME_SOFT_DIRTY ? 'd' : '-'; | ||
| 41 | } | ||
| 42 | |||
| 43 | static int check_pte(int step, int page, u64 *map, u64 want) | ||
| 44 | { | ||
| 45 | if ((map[page] & want) != want) { | ||
| 46 | printf("Step %d Page %d has %c%c, want %c%c\n", | ||
| 47 | step, page, | ||
| 48 | map_p(map[page]), map_sd(map[page]), | ||
| 49 | map_p(want), map_sd(want)); | ||
| 50 | return 1; | ||
| 51 | } | ||
| 52 | |||
| 53 | return 0; | ||
| 54 | } | ||
| 55 | |||
| 56 | static void clear_refs(void) | ||
| 57 | { | ||
| 58 | int fd; | ||
| 59 | char *v = "4"; | ||
| 60 | |||
| 61 | fd = open("/proc/self/clear_refs", O_WRONLY); | ||
| 62 | if (write(fd, v, 3) < 3) { | ||
| 63 | perror("Can't clear soft-dirty bit"); | ||
| 64 | exit(1); | ||
| 65 | } | ||
| 66 | close(fd); | ||
| 67 | } | ||
| 68 | |||
| 69 | int main(void) | ||
| 70 | { | ||
| 71 | char *mem, x; | ||
| 72 | u64 map[PAGES_TO_TEST]; | ||
| 73 | |||
| 74 | mem = mmap(NULL, PAGES_TO_TEST * PAGE_SIZE, | ||
| 75 | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0); | ||
| 76 | |||
| 77 | x = mem[0]; | ||
| 78 | mem[2 * PAGE_SIZE] = 'c'; | ||
| 79 | get_pagemap2(mem, map); | ||
| 80 | |||
| 81 | if (check_pte(1, 0, map, PME_PRESENT)) | ||
| 82 | return 1; | ||
| 83 | if (check_pte(1, 1, map, 0)) | ||
| 84 | return 1; | ||
| 85 | if (check_pte(1, 2, map, PME_PRESENT | PME_SOFT_DIRTY)) | ||
| 86 | return 1; | ||
| 87 | |||
| 88 | clear_refs(); | ||
| 89 | get_pagemap2(mem, map); | ||
| 90 | |||
| 91 | if (check_pte(2, 0, map, PME_PRESENT)) | ||
| 92 | return 1; | ||
| 93 | if (check_pte(2, 1, map, 0)) | ||
| 94 | return 1; | ||
| 95 | if (check_pte(2, 2, map, PME_PRESENT)) | ||
| 96 | return 1; | ||
| 97 | |||
| 98 | mem[0] = 'a'; | ||
| 99 | mem[PAGE_SIZE] = 'b'; | ||
| 100 | x = mem[2 * PAGE_SIZE]; | ||
| 101 | get_pagemap2(mem, map); | ||
| 102 | |||
| 103 | if (check_pte(3, 0, map, PME_PRESENT | PME_SOFT_DIRTY)) | ||
| 104 | return 1; | ||
| 105 | if (check_pte(3, 1, map, PME_PRESENT | PME_SOFT_DIRTY)) | ||
| 106 | return 1; | ||
| 107 | if (check_pte(3, 2, map, PME_PRESENT)) | ||
| 108 | return 1; | ||
| 109 | |||
| 110 | (void)x; /* gcc warn */ | ||
| 111 | |||
| 112 | printf("PASS\n"); | ||
| 113 | return 0; | ||
| 114 | } | ||
