diff options
Diffstat (limited to 'drivers')
43 files changed, 3857 insertions, 2197 deletions
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 4373adb2119a..9d9490e22e07 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -26,6 +26,10 @@ | |||
26 | #define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 | 26 | #define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 |
27 | #define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC | 27 | #define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC |
28 | #define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE | 28 | #define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE |
29 | #define PCI_DEVICE_ID_INTEL_IGDGM_HB 0xA010 | ||
30 | #define PCI_DEVICE_ID_INTEL_IGDGM_IG 0xA011 | ||
31 | #define PCI_DEVICE_ID_INTEL_IGDG_HB 0xA000 | ||
32 | #define PCI_DEVICE_ID_INTEL_IGDG_IG 0xA001 | ||
29 | #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 | 33 | #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 |
30 | #define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 | 34 | #define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 |
31 | #define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 | 35 | #define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 |
@@ -60,7 +64,12 @@ | |||
60 | 64 | ||
61 | #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ | 65 | #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ |
62 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ | 66 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ |
63 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB) | 67 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \ |
68 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \ | ||
69 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB) | ||
70 | |||
71 | #define IS_IGD (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \ | ||
72 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB) | ||
64 | 73 | ||
65 | #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \ | 74 | #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \ |
66 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ | 75 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ |
@@ -510,7 +519,7 @@ static void intel_i830_init_gtt_entries(void) | |||
510 | size = 512; | 519 | size = 512; |
511 | } | 520 | } |
512 | size += 4; /* add in BIOS popup space */ | 521 | size += 4; /* add in BIOS popup space */ |
513 | } else if (IS_G33) { | 522 | } else if (IS_G33 && !IS_IGD) { |
514 | /* G33's GTT size defined in gmch_ctrl */ | 523 | /* G33's GTT size defined in gmch_ctrl */ |
515 | switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { | 524 | switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { |
516 | case G33_PGETBL_SIZE_1M: | 525 | case G33_PGETBL_SIZE_1M: |
@@ -526,7 +535,7 @@ static void intel_i830_init_gtt_entries(void) | |||
526 | size = 512; | 535 | size = 512; |
527 | } | 536 | } |
528 | size += 4; | 537 | size += 4; |
529 | } else if (IS_G4X) { | 538 | } else if (IS_G4X || IS_IGD) { |
530 | /* On 4 series hardware, GTT stolen is separate from graphics | 539 | /* On 4 series hardware, GTT stolen is separate from graphics |
531 | * stolen, ignore it in stolen gtt entries counting. However, | 540 | * stolen, ignore it in stolen gtt entries counting. However, |
532 | * 4KB of the stolen memory doesn't get mapped to the GTT. | 541 | * 4KB of the stolen memory doesn't get mapped to the GTT. |
@@ -2161,6 +2170,10 @@ static const struct intel_driver_description { | |||
2161 | NULL, &intel_g33_driver }, | 2170 | NULL, &intel_g33_driver }, |
2162 | { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", | 2171 | { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", |
2163 | NULL, &intel_g33_driver }, | 2172 | NULL, &intel_g33_driver }, |
2173 | { PCI_DEVICE_ID_INTEL_IGDGM_HB, PCI_DEVICE_ID_INTEL_IGDGM_IG, 0, "IGD", | ||
2174 | NULL, &intel_g33_driver }, | ||
2175 | { PCI_DEVICE_ID_INTEL_IGDG_HB, PCI_DEVICE_ID_INTEL_IGDG_IG, 0, "IGD", | ||
2176 | NULL, &intel_g33_driver }, | ||
2164 | { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, | 2177 | { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, |
2165 | "Mobile IntelĀ® GM45 Express", NULL, &intel_i965_driver }, | 2178 | "Mobile IntelĀ® GM45 Express", NULL, &intel_i965_driver }, |
2166 | { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0, | 2179 | { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0, |
@@ -2355,6 +2368,8 @@ static struct pci_device_id agp_intel_pci_table[] = { | |||
2355 | ID(PCI_DEVICE_ID_INTEL_82945G_HB), | 2368 | ID(PCI_DEVICE_ID_INTEL_82945G_HB), |
2356 | ID(PCI_DEVICE_ID_INTEL_82945GM_HB), | 2369 | ID(PCI_DEVICE_ID_INTEL_82945GM_HB), |
2357 | ID(PCI_DEVICE_ID_INTEL_82945GME_HB), | 2370 | ID(PCI_DEVICE_ID_INTEL_82945GME_HB), |
2371 | ID(PCI_DEVICE_ID_INTEL_IGDGM_HB), | ||
2372 | ID(PCI_DEVICE_ID_INTEL_IGDG_HB), | ||
2358 | ID(PCI_DEVICE_ID_INTEL_82946GZ_HB), | 2373 | ID(PCI_DEVICE_ID_INTEL_82946GZ_HB), |
2359 | ID(PCI_DEVICE_ID_INTEL_82G35_HB), | 2374 | ID(PCI_DEVICE_ID_INTEL_82G35_HB), |
2360 | ID(PCI_DEVICE_ID_INTEL_82965Q_HB), | 2375 | ID(PCI_DEVICE_ID_INTEL_82965Q_HB), |
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c index a5dd7a665aa8..8b8c8c22f0fc 100644 --- a/drivers/firewire/fw-card.c +++ b/drivers/firewire/fw-card.c | |||
@@ -63,8 +63,7 @@ static int descriptor_count; | |||
63 | #define BIB_CMC ((1) << 30) | 63 | #define BIB_CMC ((1) << 30) |
64 | #define BIB_IMC ((1) << 31) | 64 | #define BIB_IMC ((1) << 31) |
65 | 65 | ||
66 | static u32 * | 66 | static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length) |
67 | generate_config_rom(struct fw_card *card, size_t *config_rom_length) | ||
68 | { | 67 | { |
69 | struct fw_descriptor *desc; | 68 | struct fw_descriptor *desc; |
70 | static u32 config_rom[256]; | 69 | static u32 config_rom[256]; |
@@ -128,8 +127,7 @@ generate_config_rom(struct fw_card *card, size_t *config_rom_length) | |||
128 | return config_rom; | 127 | return config_rom; |
129 | } | 128 | } |
130 | 129 | ||
131 | static void | 130 | static void update_config_roms(void) |
132 | update_config_roms(void) | ||
133 | { | 131 | { |
134 | struct fw_card *card; | 132 | struct fw_card *card; |
135 | u32 *config_rom; | 133 | u32 *config_rom; |
@@ -141,8 +139,7 @@ update_config_roms(void) | |||
141 | } | 139 | } |
142 | } | 140 | } |
143 | 141 | ||
144 | int | 142 | int fw_core_add_descriptor(struct fw_descriptor *desc) |
145 | fw_core_add_descriptor(struct fw_descriptor *desc) | ||
146 | { | 143 | { |
147 | size_t i; | 144 | size_t i; |
148 | 145 | ||
@@ -171,8 +168,7 @@ fw_core_add_descriptor(struct fw_descriptor *desc) | |||
171 | return 0; | 168 | return 0; |
172 | } | 169 | } |
173 | 170 | ||
174 | void | 171 | void fw_core_remove_descriptor(struct fw_descriptor *desc) |
175 | fw_core_remove_descriptor(struct fw_descriptor *desc) | ||
176 | { | 172 | { |
177 | mutex_lock(&card_mutex); | 173 | mutex_lock(&card_mutex); |
178 | 174 | ||
@@ -185,12 +181,30 @@ fw_core_remove_descriptor(struct fw_descriptor *desc) | |||
185 | mutex_unlock(&card_mutex); | 181 | mutex_unlock(&card_mutex); |
186 | } | 182 | } |
187 | 183 | ||
184 | static int set_broadcast_channel(struct device *dev, void *data) | ||
185 | { | ||
186 | fw_device_set_broadcast_channel(fw_device(dev), (long)data); | ||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | static void allocate_broadcast_channel(struct fw_card *card, int generation) | ||
191 | { | ||
192 | int channel, bandwidth = 0; | ||
193 | |||
194 | fw_iso_resource_manage(card, generation, 1ULL << 31, | ||
195 | &channel, &bandwidth, true); | ||
196 | if (channel == 31) { | ||
197 | card->broadcast_channel_allocated = true; | ||
198 | device_for_each_child(card->device, (void *)(long)generation, | ||
199 | set_broadcast_channel); | ||
200 | } | ||
201 | } | ||
202 | |||
188 | static const char gap_count_table[] = { | 203 | static const char gap_count_table[] = { |
189 | 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 | 204 | 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 |
190 | }; | 205 | }; |
191 | 206 | ||
192 | void | 207 | void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) |
193 | fw_schedule_bm_work(struct fw_card *card, unsigned long delay) | ||
194 | { | 208 | { |
195 | int scheduled; | 209 | int scheduled; |
196 | 210 | ||
@@ -200,37 +214,38 @@ fw_schedule_bm_work(struct fw_card *card, unsigned long delay) | |||
200 | fw_card_put(card); | 214 | fw_card_put(card); |
201 | } | 215 | } |
202 | 216 | ||
203 | static void | 217 | static void fw_card_bm_work(struct work_struct *work) |
204 | fw_card_bm_work(struct work_struct *work) | ||
205 | { | 218 | { |
206 | struct fw_card *card = container_of(work, struct fw_card, work.work); | 219 | struct fw_card *card = container_of(work, struct fw_card, work.work); |
207 | struct fw_device *root_device; | 220 | struct fw_device *root_device; |
208 | struct fw_node *root_node, *local_node; | 221 | struct fw_node *root_node; |
209 | unsigned long flags; | 222 | unsigned long flags; |
210 | int root_id, new_root_id, irm_id, gap_count, generation, grace, rcode; | 223 | int root_id, new_root_id, irm_id, local_id; |
224 | int gap_count, generation, grace, rcode; | ||
211 | bool do_reset = false; | 225 | bool do_reset = false; |
212 | bool root_device_is_running; | 226 | bool root_device_is_running; |
213 | bool root_device_is_cmc; | 227 | bool root_device_is_cmc; |
214 | __be32 lock_data[2]; | 228 | __be32 lock_data[2]; |
215 | 229 | ||
216 | spin_lock_irqsave(&card->lock, flags); | 230 | spin_lock_irqsave(&card->lock, flags); |
217 | local_node = card->local_node; | ||
218 | root_node = card->root_node; | ||
219 | 231 | ||
220 | if (local_node == NULL) { | 232 | if (card->local_node == NULL) { |
221 | spin_unlock_irqrestore(&card->lock, flags); | 233 | spin_unlock_irqrestore(&card->lock, flags); |
222 | goto out_put_card; | 234 | goto out_put_card; |
223 | } | 235 | } |
224 | fw_node_get(local_node); | ||
225 | fw_node_get(root_node); | ||
226 | 236 | ||
227 | generation = card->generation; | 237 | generation = card->generation; |
238 | root_node = card->root_node; | ||
239 | fw_node_get(root_node); | ||
228 | root_device = root_node->data; | 240 | root_device = root_node->data; |
229 | root_device_is_running = root_device && | 241 | root_device_is_running = root_device && |
230 | atomic_read(&root_device->state) == FW_DEVICE_RUNNING; | 242 | atomic_read(&root_device->state) == FW_DEVICE_RUNNING; |
231 | root_device_is_cmc = root_device && root_device->cmc; | 243 | root_device_is_cmc = root_device && root_device->cmc; |
232 | root_id = root_node->node_id; | 244 | root_id = root_node->node_id; |
233 | grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); | 245 | irm_id = card->irm_node->node_id; |
246 | local_id = card->local_node->node_id; | ||
247 | |||
248 | grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); | ||
234 | 249 | ||
235 | if (is_next_generation(generation, card->bm_generation) || | 250 | if (is_next_generation(generation, card->bm_generation) || |
236 | (card->bm_generation != generation && grace)) { | 251 | (card->bm_generation != generation && grace)) { |
@@ -246,16 +261,15 @@ fw_card_bm_work(struct work_struct *work) | |||
246 | * next generation. | 261 | * next generation. |
247 | */ | 262 | */ |
248 | 263 | ||
249 | irm_id = card->irm_node->node_id; | ||
250 | if (!card->irm_node->link_on) { | 264 | if (!card->irm_node->link_on) { |
251 | new_root_id = local_node->node_id; | 265 | new_root_id = local_id; |
252 | fw_notify("IRM has link off, making local node (%02x) root.\n", | 266 | fw_notify("IRM has link off, making local node (%02x) root.\n", |
253 | new_root_id); | 267 | new_root_id); |
254 | goto pick_me; | 268 | goto pick_me; |
255 | } | 269 | } |
256 | 270 | ||
257 | lock_data[0] = cpu_to_be32(0x3f); | 271 | lock_data[0] = cpu_to_be32(0x3f); |
258 | lock_data[1] = cpu_to_be32(local_node->node_id); | 272 | lock_data[1] = cpu_to_be32(local_id); |
259 | 273 | ||
260 | spin_unlock_irqrestore(&card->lock, flags); | 274 | spin_unlock_irqrestore(&card->lock, flags); |
261 | 275 | ||
@@ -269,9 +283,14 @@ fw_card_bm_work(struct work_struct *work) | |||
269 | goto out; | 283 | goto out; |
270 | 284 | ||
271 | if (rcode == RCODE_COMPLETE && | 285 | if (rcode == RCODE_COMPLETE && |
272 | lock_data[0] != cpu_to_be32(0x3f)) | 286 | lock_data[0] != cpu_to_be32(0x3f)) { |
273 | /* Somebody else is BM, let them do the work. */ | 287 | |
288 | /* Somebody else is BM. Only act as IRM. */ | ||
289 | if (local_id == irm_id) | ||
290 | allocate_broadcast_channel(card, generation); | ||
291 | |||
274 | goto out; | 292 | goto out; |
293 | } | ||
275 | 294 | ||
276 | spin_lock_irqsave(&card->lock, flags); | 295 | spin_lock_irqsave(&card->lock, flags); |
277 | 296 | ||
@@ -282,19 +301,18 @@ fw_card_bm_work(struct work_struct *work) | |||
282 | * do a bus reset and pick the local node as | 301 | * do a bus reset and pick the local node as |
283 | * root, and thus, IRM. | 302 | * root, and thus, IRM. |
284 | */ | 303 | */ |
285 | new_root_id = local_node->node_id; | 304 | new_root_id = local_id; |
286 | fw_notify("BM lock failed, making local node (%02x) root.\n", | 305 | fw_notify("BM lock failed, making local node (%02x) root.\n", |
287 | new_root_id); | 306 | new_root_id); |
288 | goto pick_me; | 307 | goto pick_me; |
289 | } | 308 | } |
290 | } else if (card->bm_generation != generation) { | 309 | } else if (card->bm_generation != generation) { |
291 | /* | 310 | /* |
292 | * OK, we weren't BM in the last generation, and it's | 311 | * We weren't BM in the last generation, and the last |
293 | * less than 100ms since last bus reset. Reschedule | 312 | * bus reset is less than 125ms ago. Reschedule this job. |
294 | * this task 100ms from now. | ||
295 | */ | 313 | */ |
296 | spin_unlock_irqrestore(&card->lock, flags); | 314 | spin_unlock_irqrestore(&card->lock, flags); |
297 | fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 10)); | 315 | fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); |
298 | goto out; | 316 | goto out; |
299 | } | 317 | } |
300 | 318 | ||
@@ -310,7 +328,7 @@ fw_card_bm_work(struct work_struct *work) | |||
310 | * Either link_on is false, or we failed to read the | 328 | * Either link_on is false, or we failed to read the |
311 | * config rom. In either case, pick another root. | 329 | * config rom. In either case, pick another root. |
312 | */ | 330 | */ |
313 | new_root_id = local_node->node_id; | 331 | new_root_id = local_id; |
314 | } else if (!root_device_is_running) { | 332 | } else if (!root_device_is_running) { |
315 | /* | 333 | /* |
316 | * If we haven't probed this device yet, bail out now | 334 | * If we haven't probed this device yet, bail out now |
@@ -332,7 +350,7 @@ fw_card_bm_work(struct work_struct *work) | |||
332 | * successfully read the config rom, but it's not | 350 | * successfully read the config rom, but it's not |
333 | * cycle master capable. | 351 | * cycle master capable. |
334 | */ | 352 | */ |
335 | new_root_id = local_node->node_id; | 353 | new_root_id = local_id; |
336 | } | 354 | } |
337 | 355 | ||
338 | pick_me: | 356 | pick_me: |
@@ -363,25 +381,28 @@ fw_card_bm_work(struct work_struct *work) | |||
363 | card->index, new_root_id, gap_count); | 381 | card->index, new_root_id, gap_count); |
364 | fw_send_phy_config(card, new_root_id, generation, gap_count); | 382 | fw_send_phy_config(card, new_root_id, generation, gap_count); |
365 | fw_core_initiate_bus_reset(card, 1); | 383 | fw_core_initiate_bus_reset(card, 1); |
384 | /* Will allocate broadcast channel after the reset. */ | ||
385 | } else { | ||
386 | if (local_id == irm_id) | ||
387 | allocate_broadcast_channel(card, generation); | ||
366 | } | 388 | } |
389 | |||
367 | out: | 390 | out: |
368 | fw_node_put(root_node); | 391 | fw_node_put(root_node); |
369 | fw_node_put(local_node); | ||
370 | out_put_card: | 392 | out_put_card: |
371 | fw_card_put(card); | 393 | fw_card_put(card); |
372 | } | 394 | } |
373 | 395 | ||
374 | static void | 396 | static void flush_timer_callback(unsigned long data) |
375 | flush_timer_callback(unsigned long data) | ||
376 | { | 397 | { |
377 | struct fw_card *card = (struct fw_card *)data; | 398 | struct fw_card *card = (struct fw_card *)data; |
378 | 399 | ||
379 | fw_flush_transactions(card); | 400 | fw_flush_transactions(card); |
380 | } | 401 | } |
381 | 402 | ||
382 | void | 403 | void fw_card_initialize(struct fw_card *card, |
383 | fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, | 404 | const struct fw_card_driver *driver, |
384 | struct device *device) | 405 | struct device *device) |
385 | { | 406 | { |
386 | static atomic_t index = ATOMIC_INIT(-1); | 407 | static atomic_t index = ATOMIC_INIT(-1); |
387 | 408 | ||
@@ -406,13 +427,12 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, | |||
406 | } | 427 | } |
407 | EXPORT_SYMBOL(fw_card_initialize); | 428 | EXPORT_SYMBOL(fw_card_initialize); |
408 | 429 | ||
409 | int | 430 | int fw_card_add(struct fw_card *card, |
410 | fw_card_add(struct fw_card *card, | 431 | u32 max_receive, u32 link_speed, u64 guid) |
411 | u32 max_receive, u32 link_speed, u64 guid) | ||
412 | { | 432 | { |
413 | u32 *config_rom; | 433 | u32 *config_rom; |
414 | size_t length; | 434 | size_t length; |
415 | int err; | 435 | int ret; |
416 | 436 | ||
417 | card->max_receive = max_receive; | 437 | card->max_receive = max_receive; |
418 | card->link_speed = link_speed; | 438 | card->link_speed = link_speed; |
@@ -423,13 +443,14 @@ fw_card_add(struct fw_card *card, | |||
423 | list_add_tail(&card->link, &card_list); | 443 | list_add_tail(&card->link, &card_list); |
424 | mutex_unlock(&card_mutex); | 444 | mutex_unlock(&card_mutex); |
425 | 445 | ||
426 | err = card->driver->enable(card, config_rom, length); | 446 | ret = card->driver->enable(card, config_rom, length); |
427 | if (err < 0) { | 447 | if (ret < 0) { |
428 | mutex_lock(&card_mutex); | 448 | mutex_lock(&card_mutex); |
429 | list_del(&card->link); | 449 | list_del(&card->link); |
430 | mutex_unlock(&card_mutex); | 450 | mutex_unlock(&card_mutex); |
431 | } | 451 | } |
432 | return err; | 452 | |
453 | return ret; | ||
433 | } | 454 | } |
434 | EXPORT_SYMBOL(fw_card_add); | 455 | EXPORT_SYMBOL(fw_card_add); |
435 | 456 | ||
@@ -442,23 +463,20 @@ EXPORT_SYMBOL(fw_card_add); | |||
442 | * dummy driver just fails all IO. | 463 | * dummy driver just fails all IO. |
443 | */ | 464 | */ |
444 | 465 | ||
445 | static int | 466 | static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length) |
446 | dummy_enable(struct fw_card *card, u32 *config_rom, size_t length) | ||
447 | { | 467 | { |
448 | BUG(); | 468 | BUG(); |
449 | return -1; | 469 | return -1; |
450 | } | 470 | } |
451 | 471 | ||
452 | static int | 472 | static int dummy_update_phy_reg(struct fw_card *card, int address, |
453 | dummy_update_phy_reg(struct fw_card *card, int address, | 473 | int clear_bits, int set_bits) |
454 | int clear_bits, int set_bits) | ||
455 | { | 474 | { |
456 | return -ENODEV; | 475 | return -ENODEV; |
457 | } | 476 | } |
458 | 477 | ||
459 | static int | 478 | static int dummy_set_config_rom(struct fw_card *card, |
460 | dummy_set_config_rom(struct fw_card *card, | 479 | u32 *config_rom, size_t length) |
461 | u32 *config_rom, size_t length) | ||
462 | { | 480 | { |
463 | /* | 481 | /* |
464 | * We take the card out of card_list before setting the dummy | 482 | * We take the card out of card_list before setting the dummy |
@@ -468,27 +486,23 @@ dummy_set_config_rom(struct fw_card *card, | |||
468 | return -1; | 486 | return -1; |
469 | } | 487 | } |
470 | 488 | ||
471 | static void | 489 | static void dummy_send_request(struct fw_card *card, struct fw_packet *packet) |
472 | dummy_send_request(struct fw_card *card, struct fw_packet *packet) | ||
473 | { | 490 | { |
474 | packet->callback(packet, card, -ENODEV); | 491 | packet->callback(packet, card, -ENODEV); |
475 | } | 492 | } |
476 | 493 | ||
477 | static void | 494 | static void dummy_send_response(struct fw_card *card, struct fw_packet *packet) |
478 | dummy_send_response(struct fw_card *card, struct fw_packet *packet) | ||
479 | { | 495 | { |
480 | packet->callback(packet, card, -ENODEV); | 496 | packet->callback(packet, card, -ENODEV); |
481 | } | 497 | } |
482 | 498 | ||
483 | static int | 499 | static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) |
484 | dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) | ||
485 | { | 500 | { |
486 | return -ENOENT; | 501 | return -ENOENT; |
487 | } | 502 | } |
488 | 503 | ||
489 | static int | 504 | static int dummy_enable_phys_dma(struct fw_card *card, |
490 | dummy_enable_phys_dma(struct fw_card *card, | 505 | int node_id, int generation) |
491 | int node_id, int generation) | ||
492 | { | 506 | { |
493 | return -ENODEV; | 507 | return -ENODEV; |
494 | } | 508 | } |
@@ -503,16 +517,14 @@ static struct fw_card_driver dummy_driver = { | |||
503 | .enable_phys_dma = dummy_enable_phys_dma, | 517 | .enable_phys_dma = dummy_enable_phys_dma, |
504 | }; | 518 | }; |
505 | 519 | ||
506 | void | 520 | void fw_card_release(struct kref *kref) |
507 | fw_card_release(struct kref *kref) | ||
508 | { | 521 | { |
509 | struct fw_card *card = container_of(kref, struct fw_card, kref); | 522 | struct fw_card *card = container_of(kref, struct fw_card, kref); |
510 | 523 | ||
511 | complete(&card->done); | 524 | complete(&card->done); |
512 | } | 525 | } |
513 | 526 | ||
514 | void | 527 | void fw_core_remove_card(struct fw_card *card) |
515 | fw_core_remove_card(struct fw_card *card) | ||
516 | { | 528 | { |
517 | card->driver->update_phy_reg(card, 4, | 529 | card->driver->update_phy_reg(card, 4, |
518 | PHY_LINK_ACTIVE | PHY_CONTENDER, 0); | 530 | PHY_LINK_ACTIVE | PHY_CONTENDER, 0); |
@@ -536,8 +548,7 @@ fw_core_remove_card(struct fw_card *card) | |||
536 | } | 548 | } |
537 | EXPORT_SYMBOL(fw_core_remove_card); | 549 | EXPORT_SYMBOL(fw_core_remove_card); |
538 | 550 | ||
539 | int | 551 | int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset) |
540 | fw_core_initiate_bus_reset(struct fw_card *card, int short_reset) | ||
541 | { | 552 | { |
542 | int reg = short_reset ? 5 : 1; | 553 | int reg = short_reset ? 5 : 1; |
543 | int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; | 554 | int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; |
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index ed03234cbea8..7eb6594cc3e5 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c | |||
@@ -18,87 +18,162 @@ | |||
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/module.h> | 21 | #include <linux/compat.h> |
22 | #include <linux/kernel.h> | 22 | #include <linux/delay.h> |
23 | #include <linux/wait.h> | ||
24 | #include <linux/errno.h> | ||
25 | #include <linux/device.h> | 23 | #include <linux/device.h> |
26 | #include <linux/vmalloc.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/firewire-cdev.h> | ||
26 | #include <linux/idr.h> | ||
27 | #include <linux/jiffies.h> | ||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/kref.h> | ||
30 | #include <linux/mm.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/mutex.h> | ||
27 | #include <linux/poll.h> | 33 | #include <linux/poll.h> |
28 | #include <linux/preempt.h> | 34 | #include <linux/preempt.h> |
35 | #include <linux/spinlock.h> | ||
29 | #include <linux/time.h> | 36 | #include <linux/time.h> |
30 | #include <linux/delay.h> | 37 | #include <linux/vmalloc.h> |
31 | #include <linux/mm.h> | 38 | #include <linux/wait.h> |
32 | #include <linux/idr.h> | 39 | #include <linux/workqueue.h> |
33 | #include <linux/compat.h> | 40 | |
34 | #include <linux/firewire-cdev.h> | ||
35 | #include <asm/system.h> | 41 | #include <asm/system.h> |
36 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
37 | #include "fw-transaction.h" | 43 | |
38 | #include "fw-topology.h" | ||
39 | #include "fw-device.h" | 44 | #include "fw-device.h" |
45 | #include "fw-topology.h" | ||
46 | #include "fw-transaction.h" | ||
47 | |||
48 | struct client { | ||
49 | u32 version; | ||
50 | struct fw_device *device; | ||
51 | |||
52 | spinlock_t lock; | ||
53 | bool in_shutdown; | ||
54 | struct idr resource_idr; | ||
55 | struct list_head event_list; | ||
56 | wait_queue_head_t wait; | ||
57 | u64 bus_reset_closure; | ||
58 | |||
59 | struct fw_iso_context *iso_context; | ||
60 | u64 iso_closure; | ||
61 | struct fw_iso_buffer buffer; | ||
62 | unsigned long vm_start; | ||
40 | 63 | ||
41 | struct client; | ||
42 | struct client_resource { | ||
43 | struct list_head link; | 64 | struct list_head link; |
44 | void (*release)(struct client *client, struct client_resource *r); | 65 | struct kref kref; |
45 | u32 handle; | ||
46 | }; | 66 | }; |
47 | 67 | ||
68 | static inline void client_get(struct client *client) | ||
69 | { | ||
70 | kref_get(&client->kref); | ||
71 | } | ||
72 | |||
73 | static void client_release(struct kref *kref) | ||
74 | { | ||
75 | struct client *client = container_of(kref, struct client, kref); | ||
76 | |||
77 | fw_device_put(client->device); | ||
78 | kfree(client); | ||
79 | } | ||
80 | |||
81 | static void client_put(struct client *client) | ||
82 | { | ||
83 | kref_put(&client->kref, client_release); | ||
84 | } | ||
85 | |||
86 | struct client_resource; | ||
87 | typedef void (*client_resource_release_fn_t)(struct client *, | ||
88 | struct client_resource *); | ||
89 | struct client_resource { | ||
90 | client_resource_release_fn_t release; | ||
91 | int handle; | ||
92 | }; | ||
93 | |||
94 | struct address_handler_resource { | ||
95 | struct client_resource resource; | ||
96 | struct fw_address_handler handler; | ||
97 | __u64 closure; | ||
98 | struct client *client; | ||
99 | }; | ||
100 | |||
101 | struct outbound_transaction_resource { | ||
102 | struct client_resource resource; | ||
103 | struct fw_transaction transaction; | ||
104 | }; | ||
105 | |||
106 | struct inbound_transaction_resource { | ||
107 | struct client_resource resource; | ||
108 | struct fw_request *request; | ||
109 | void *data; | ||
110 | size_t length; | ||
111 | }; | ||
112 | |||
113 | struct descriptor_resource { | ||
114 | struct client_resource resource; | ||
115 | struct fw_descriptor descriptor; | ||
116 | u32 data[0]; | ||
117 | }; | ||
118 | |||
119 | struct iso_resource { | ||
120 | struct client_resource resource; | ||
121 | struct client *client; | ||
122 | /* Schedule work and access todo only with client->lock held. */ | ||
123 | struct delayed_work work; | ||
124 | enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC, | ||
125 | ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo; | ||
126 | int generation; | ||
127 | u64 channels; | ||
128 | s32 bandwidth; | ||
129 | struct iso_resource_event *e_alloc, *e_dealloc; | ||
130 | }; | ||
131 | |||
132 | static void schedule_iso_resource(struct iso_resource *); | ||
133 | static void release_iso_resource(struct client *, struct client_resource *); | ||
134 | |||
48 | /* | 135 | /* |
49 | * dequeue_event() just kfree()'s the event, so the event has to be | 136 | * dequeue_event() just kfree()'s the event, so the event has to be |
50 | * the first field in the struct. | 137 | * the first field in a struct XYZ_event. |
51 | */ | 138 | */ |
52 | |||
53 | struct event { | 139 | struct event { |
54 | struct { void *data; size_t size; } v[2]; | 140 | struct { void *data; size_t size; } v[2]; |
55 | struct list_head link; | 141 | struct list_head link; |
56 | }; | 142 | }; |
57 | 143 | ||
58 | struct bus_reset { | 144 | struct bus_reset_event { |
59 | struct event event; | 145 | struct event event; |
60 | struct fw_cdev_event_bus_reset reset; | 146 | struct fw_cdev_event_bus_reset reset; |
61 | }; | 147 | }; |
62 | 148 | ||
63 | struct response { | 149 | struct outbound_transaction_event { |
64 | struct event event; | 150 | struct event event; |
65 | struct fw_transaction transaction; | ||
66 | struct client *client; | 151 | struct client *client; |
67 | struct client_resource resource; | 152 | struct outbound_transaction_resource r; |
68 | struct fw_cdev_event_response response; | 153 | struct fw_cdev_event_response response; |
69 | }; | 154 | }; |
70 | 155 | ||
71 | struct iso_interrupt { | 156 | struct inbound_transaction_event { |
72 | struct event event; | 157 | struct event event; |
73 | struct fw_cdev_event_iso_interrupt interrupt; | 158 | struct fw_cdev_event_request request; |
74 | }; | 159 | }; |
75 | 160 | ||
76 | struct client { | 161 | struct iso_interrupt_event { |
77 | u32 version; | 162 | struct event event; |
78 | struct fw_device *device; | 163 | struct fw_cdev_event_iso_interrupt interrupt; |
79 | spinlock_t lock; | 164 | }; |
80 | u32 resource_handle; | ||
81 | struct list_head resource_list; | ||
82 | struct list_head event_list; | ||
83 | wait_queue_head_t wait; | ||
84 | u64 bus_reset_closure; | ||
85 | |||
86 | struct fw_iso_context *iso_context; | ||
87 | u64 iso_closure; | ||
88 | struct fw_iso_buffer buffer; | ||
89 | unsigned long vm_start; | ||
90 | 165 | ||
91 | struct list_head link; | 166 | struct iso_resource_event { |
167 | struct event event; | ||
168 | struct fw_cdev_event_iso_resource resource; | ||
92 | }; | 169 | }; |
93 | 170 | ||
94 | static inline void __user * | 171 | static inline void __user *u64_to_uptr(__u64 value) |
95 | u64_to_uptr(__u64 value) | ||
96 | { | 172 | { |
97 | return (void __user *)(unsigned long)value; | 173 | return (void __user *)(unsigned long)value; |
98 | } | 174 | } |
99 | 175 | ||
100 | static inline __u64 | 176 | static inline __u64 uptr_to_u64(void __user *ptr) |
101 | uptr_to_u64(void __user *ptr) | ||
102 | { | 177 | { |
103 | return (__u64)(unsigned long)ptr; | 178 | return (__u64)(unsigned long)ptr; |
104 | } | 179 | } |
@@ -107,7 +182,6 @@ static int fw_device_op_open(struct inode *inode, struct file *file) | |||
107 | { | 182 | { |
108 | struct fw_device *device; | 183 | struct fw_device *device; |
109 | struct client *client; | 184 | struct client *client; |
110 | unsigned long flags; | ||
111 | 185 | ||
112 | device = fw_device_get_by_devt(inode->i_rdev); | 186 | device = fw_device_get_by_devt(inode->i_rdev); |
113 | if (device == NULL) | 187 | if (device == NULL) |
@@ -125,16 +199,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file) | |||
125 | } | 199 | } |
126 | 200 | ||
127 | client->device = device; | 201 | client->device = device; |
128 | INIT_LIST_HEAD(&client->event_list); | ||
129 | INIT_LIST_HEAD(&client->resource_list); | ||
130 | spin_lock_init(&client->lock); | 202 | spin_lock_init(&client->lock); |
203 | idr_init(&client->resource_idr); | ||
204 | INIT_LIST_HEAD(&client->event_list); | ||
131 | init_waitqueue_head(&client->wait); | 205 | init_waitqueue_head(&client->wait); |
206 | kref_init(&client->kref); | ||
132 | 207 | ||
133 | file->private_data = client; | 208 | file->private_data = client; |
134 | 209 | ||
135 | spin_lock_irqsave(&device->card->lock, flags); | 210 | mutex_lock(&device->client_list_mutex); |
136 | list_add_tail(&client->link, &device->client_list); | 211 | list_add_tail(&client->link, &device->client_list); |
137 | spin_unlock_irqrestore(&device->card->lock, flags); | 212 | mutex_unlock(&device->client_list_mutex); |
138 | 213 | ||
139 | return 0; | 214 | return 0; |
140 | } | 215 | } |
@@ -150,68 +225,69 @@ static void queue_event(struct client *client, struct event *event, | |||
150 | event->v[1].size = size1; | 225 | event->v[1].size = size1; |
151 | 226 | ||
152 | spin_lock_irqsave(&client->lock, flags); | 227 | spin_lock_irqsave(&client->lock, flags); |
153 | list_add_tail(&event->link, &client->event_list); | 228 | if (client->in_shutdown) |
229 | kfree(event); | ||
230 | else | ||
231 | list_add_tail(&event->link, &client->event_list); | ||
154 | spin_unlock_irqrestore(&client->lock, flags); | 232 | spin_unlock_irqrestore(&client->lock, flags); |
155 | 233 | ||
156 | wake_up_interruptible(&client->wait); | 234 | wake_up_interruptible(&client->wait); |
157 | } | 235 | } |
158 | 236 | ||
159 | static int | 237 | static int dequeue_event(struct client *client, |
160 | dequeue_event(struct client *client, char __user *buffer, size_t count) | 238 | char __user *buffer, size_t count) |
161 | { | 239 | { |
162 | unsigned long flags; | ||
163 | struct event *event; | 240 | struct event *event; |
164 | size_t size, total; | 241 | size_t size, total; |
165 | int i, retval; | 242 | int i, ret; |
166 | 243 | ||
167 | retval = wait_event_interruptible(client->wait, | 244 | ret = wait_event_interruptible(client->wait, |
168 | !list_empty(&client->event_list) || | 245 | !list_empty(&client->event_list) || |
169 | fw_device_is_shutdown(client->device)); | 246 | fw_device_is_shutdown(client->device)); |
170 | if (retval < 0) | 247 | if (ret < 0) |
171 | return retval; | 248 | return ret; |
172 | 249 | ||
173 | if (list_empty(&client->event_list) && | 250 | if (list_empty(&client->event_list) && |
174 | fw_device_is_shutdown(client->device)) | 251 | fw_device_is_shutdown(client->device)) |
175 | return -ENODEV; | 252 | return -ENODEV; |
176 | 253 | ||
177 | spin_lock_irqsave(&client->lock, flags); | 254 | spin_lock_irq(&client->lock); |
178 | event = container_of(client->event_list.next, struct event, link); | 255 | event = list_first_entry(&client->event_list, struct event, link); |
179 | list_del(&event->link); | 256 | list_del(&event->link); |
180 | spin_unlock_irqrestore(&client->lock, flags); | 257 | spin_unlock_irq(&client->lock); |
181 | 258 | ||
182 | total = 0; | 259 | total = 0; |
183 | for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { | 260 | for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { |
184 | size = min(event->v[i].size, count - total); | 261 | size = min(event->v[i].size, count - total); |
185 | if (copy_to_user(buffer + total, event->v[i].data, size)) { | 262 | if (copy_to_user(buffer + total, event->v[i].data, size)) { |
186 | retval = -EFAULT; | 263 | ret = -EFAULT; |
187 | goto out; | 264 | goto out; |
188 | } | 265 | } |
189 | total += size; | 266 | total += size; |
190 | } | 267 | } |
191 | retval = total; | 268 | ret = total; |
192 | 269 | ||
193 | out: | 270 | out: |
194 | kfree(event); | 271 | kfree(event); |
195 | 272 | ||
196 | return retval; | 273 | return ret; |
197 | } | 274 | } |
198 | 275 | ||
199 | static ssize_t | 276 | static ssize_t fw_device_op_read(struct file *file, char __user *buffer, |
200 | fw_device_op_read(struct file *file, | 277 | size_t count, loff_t *offset) |
201 | char __user *buffer, size_t count, loff_t *offset) | ||
202 | { | 278 | { |
203 | struct client *client = file->private_data; | 279 | struct client *client = file->private_data; |
204 | 280 | ||
205 | return dequeue_event(client, buffer, count); | 281 | return dequeue_event(client, buffer, count); |
206 | } | 282 | } |
207 | 283 | ||
208 | /* caller must hold card->lock so that node pointers can be dereferenced here */ | 284 | static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, |
209 | static void | 285 | struct client *client) |
210 | fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, | ||
211 | struct client *client) | ||
212 | { | 286 | { |
213 | struct fw_card *card = client->device->card; | 287 | struct fw_card *card = client->device->card; |
214 | 288 | ||
289 | spin_lock_irq(&card->lock); | ||
290 | |||
215 | event->closure = client->bus_reset_closure; | 291 | event->closure = client->bus_reset_closure; |
216 | event->type = FW_CDEV_EVENT_BUS_RESET; | 292 | event->type = FW_CDEV_EVENT_BUS_RESET; |
217 | event->generation = client->device->generation; | 293 | event->generation = client->device->generation; |
@@ -220,39 +296,49 @@ fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, | |||
220 | event->bm_node_id = 0; /* FIXME: We don't track the BM. */ | 296 | event->bm_node_id = 0; /* FIXME: We don't track the BM. */ |
221 | event->irm_node_id = card->irm_node->node_id; | 297 | event->irm_node_id = card->irm_node->node_id; |
222 | event->root_node_id = card->root_node->node_id; | 298 | event->root_node_id = card->root_node->node_id; |
299 | |||
300 | spin_unlock_irq(&card->lock); | ||
223 | } | 301 | } |
224 | 302 | ||
225 | static void | 303 | static void for_each_client(struct fw_device *device, |
226 | for_each_client(struct fw_device *device, | 304 | void (*callback)(struct client *client)) |
227 | void (*callback)(struct client *client)) | ||
228 | { | 305 | { |
229 | struct fw_card *card = device->card; | ||
230 | struct client *c; | 306 | struct client *c; |
231 | unsigned long flags; | ||
232 | |||
233 | spin_lock_irqsave(&card->lock, flags); | ||
234 | 307 | ||
308 | mutex_lock(&device->client_list_mutex); | ||
235 | list_for_each_entry(c, &device->client_list, link) | 309 | list_for_each_entry(c, &device->client_list, link) |
236 | callback(c); | 310 | callback(c); |
311 | mutex_unlock(&device->client_list_mutex); | ||
312 | } | ||
313 | |||
314 | static int schedule_reallocations(int id, void *p, void *data) | ||
315 | { | ||
316 | struct client_resource *r = p; | ||
237 | 317 | ||
238 | spin_unlock_irqrestore(&card->lock, flags); | 318 | if (r->release == release_iso_resource) |
319 | schedule_iso_resource(container_of(r, | ||
320 | struct iso_resource, resource)); | ||
321 | return 0; | ||
239 | } | 322 | } |
240 | 323 | ||
241 | static void | 324 | static void queue_bus_reset_event(struct client *client) |
242 | queue_bus_reset_event(struct client *client) | ||
243 | { | 325 | { |
244 | struct bus_reset *bus_reset; | 326 | struct bus_reset_event *e; |
245 | 327 | ||
246 | bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC); | 328 | e = kzalloc(sizeof(*e), GFP_KERNEL); |
247 | if (bus_reset == NULL) { | 329 | if (e == NULL) { |
248 | fw_notify("Out of memory when allocating bus reset event\n"); | 330 | fw_notify("Out of memory when allocating bus reset event\n"); |
249 | return; | 331 | return; |
250 | } | 332 | } |
251 | 333 | ||
252 | fill_bus_reset_event(&bus_reset->reset, client); | 334 | fill_bus_reset_event(&e->reset, client); |
335 | |||
336 | queue_event(client, &e->event, | ||
337 | &e->reset, sizeof(e->reset), NULL, 0); | ||
253 | 338 | ||
254 | queue_event(client, &bus_reset->event, | 339 | spin_lock_irq(&client->lock); |
255 | &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0); | 340 | idr_for_each(&client->resource_idr, schedule_reallocations, client); |
341 | spin_unlock_irq(&client->lock); | ||
256 | } | 342 | } |
257 | 343 | ||
258 | void fw_device_cdev_update(struct fw_device *device) | 344 | void fw_device_cdev_update(struct fw_device *device) |
@@ -274,11 +360,11 @@ static int ioctl_get_info(struct client *client, void *buffer) | |||
274 | { | 360 | { |
275 | struct fw_cdev_get_info *get_info = buffer; | 361 | struct fw_cdev_get_info *get_info = buffer; |
276 | struct fw_cdev_event_bus_reset bus_reset; | 362 | struct fw_cdev_event_bus_reset bus_reset; |
277 | struct fw_card *card = client->device->card; | ||
278 | unsigned long ret = 0; | 363 | unsigned long ret = 0; |
279 | 364 | ||
280 | client->version = get_info->version; | 365 | client->version = get_info->version; |
281 | get_info->version = FW_CDEV_VERSION; | 366 | get_info->version = FW_CDEV_VERSION; |
367 | get_info->card = client->device->card->index; | ||
282 | 368 | ||
283 | down_read(&fw_device_rwsem); | 369 | down_read(&fw_device_rwsem); |
284 | 370 | ||
@@ -300,49 +386,61 @@ static int ioctl_get_info(struct client *client, void *buffer) | |||
300 | client->bus_reset_closure = get_info->bus_reset_closure; | 386 | client->bus_reset_closure = get_info->bus_reset_closure; |
301 | if (get_info->bus_reset != 0) { | 387 | if (get_info->bus_reset != 0) { |
302 | void __user *uptr = u64_to_uptr(get_info->bus_reset); | 388 | void __user *uptr = u64_to_uptr(get_info->bus_reset); |
303 | unsigned long flags; | ||
304 | 389 | ||
305 | spin_lock_irqsave(&card->lock, flags); | ||
306 | fill_bus_reset_event(&bus_reset, client); | 390 | fill_bus_reset_event(&bus_reset, client); |
307 | spin_unlock_irqrestore(&card->lock, flags); | ||
308 | |||
309 | if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) | 391 | if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) |
310 | return -EFAULT; | 392 | return -EFAULT; |
311 | } | 393 | } |
312 | 394 | ||
313 | get_info->card = card->index; | ||
314 | |||
315 | return 0; | 395 | return 0; |
316 | } | 396 | } |
317 | 397 | ||
318 | static void | 398 | static int add_client_resource(struct client *client, |
319 | add_client_resource(struct client *client, struct client_resource *resource) | 399 | struct client_resource *resource, gfp_t gfp_mask) |
320 | { | 400 | { |
321 | unsigned long flags; | 401 | unsigned long flags; |
402 | int ret; | ||
403 | |||
404 | retry: | ||
405 | if (idr_pre_get(&client->resource_idr, gfp_mask) == 0) | ||
406 | return -ENOMEM; | ||
322 | 407 | ||
323 | spin_lock_irqsave(&client->lock, flags); | 408 | spin_lock_irqsave(&client->lock, flags); |
324 | list_add_tail(&resource->link, &client->resource_list); | 409 | if (client->in_shutdown) |
325 | resource->handle = client->resource_handle++; | 410 | ret = -ECANCELED; |
411 | else | ||
412 | ret = idr_get_new(&client->resource_idr, resource, | ||
413 | &resource->handle); | ||
414 | if (ret >= 0) { | ||
415 | client_get(client); | ||
416 | if (resource->release == release_iso_resource) | ||
417 | schedule_iso_resource(container_of(resource, | ||
418 | struct iso_resource, resource)); | ||
419 | } | ||
326 | spin_unlock_irqrestore(&client->lock, flags); | 420 | spin_unlock_irqrestore(&client->lock, flags); |
421 | |||
422 | if (ret == -EAGAIN) | ||
423 | goto retry; | ||
424 | |||
425 | return ret < 0 ? ret : 0; | ||
327 | } | 426 | } |
328 | 427 | ||
329 | static int | 428 | static int release_client_resource(struct client *client, u32 handle, |
330 | release_client_resource(struct client *client, u32 handle, | 429 | client_resource_release_fn_t release, |
331 | struct client_resource **resource) | 430 | struct client_resource **resource) |
332 | { | 431 | { |
333 | struct client_resource *r; | 432 | struct client_resource *r; |
334 | unsigned long flags; | ||
335 | 433 | ||
336 | spin_lock_irqsave(&client->lock, flags); | 434 | spin_lock_irq(&client->lock); |
337 | list_for_each_entry(r, &client->resource_list, link) { | 435 | if (client->in_shutdown) |
338 | if (r->handle == handle) { | 436 | r = NULL; |
339 | list_del(&r->link); | 437 | else |
340 | break; | 438 | r = idr_find(&client->resource_idr, handle); |
341 | } | 439 | if (r && r->release == release) |
342 | } | 440 | idr_remove(&client->resource_idr, handle); |
343 | spin_unlock_irqrestore(&client->lock, flags); | 441 | spin_unlock_irq(&client->lock); |
344 | 442 | ||
345 | if (&r->link == &client->resource_list) | 443 | if (!(r && r->release == release)) |
346 | return -EINVAL; | 444 | return -EINVAL; |
347 | 445 | ||
348 | if (resource) | 446 | if (resource) |
@@ -350,203 +448,239 @@ release_client_resource(struct client *client, u32 handle, | |||
350 | else | 448 | else |
351 | r->release(client, r); | 449 | r->release(client, r); |
352 | 450 | ||
451 | client_put(client); | ||
452 | |||
353 | return 0; | 453 | return 0; |
354 | } | 454 | } |
355 | 455 | ||
356 | static void | 456 | static void release_transaction(struct client *client, |
357 | release_transaction(struct client *client, struct client_resource *resource) | 457 | struct client_resource *resource) |
358 | { | 458 | { |
359 | struct response *response = | 459 | struct outbound_transaction_resource *r = container_of(resource, |
360 | container_of(resource, struct response, resource); | 460 | struct outbound_transaction_resource, resource); |
361 | 461 | ||
362 | fw_cancel_transaction(client->device->card, &response->transaction); | 462 | fw_cancel_transaction(client->device->card, &r->transaction); |
363 | } | 463 | } |
364 | 464 | ||
365 | static void | 465 | static void complete_transaction(struct fw_card *card, int rcode, |
366 | complete_transaction(struct fw_card *card, int rcode, | 466 | void *payload, size_t length, void *data) |
367 | void *payload, size_t length, void *data) | ||
368 | { | 467 | { |
369 | struct response *response = data; | 468 | struct outbound_transaction_event *e = data; |
370 | struct client *client = response->client; | 469 | struct fw_cdev_event_response *rsp = &e->response; |
470 | struct client *client = e->client; | ||
371 | unsigned long flags; | 471 | unsigned long flags; |
372 | struct fw_cdev_event_response *r = &response->response; | ||
373 | 472 | ||
374 | if (length < r->length) | 473 | if (length < rsp->length) |
375 | r->length = length; | 474 | rsp->length = length; |
376 | if (rcode == RCODE_COMPLETE) | 475 | if (rcode == RCODE_COMPLETE) |
377 | memcpy(r->data, payload, r->length); | 476 | memcpy(rsp->data, payload, rsp->length); |
378 | 477 | ||
379 | spin_lock_irqsave(&client->lock, flags); | 478 | spin_lock_irqsave(&client->lock, flags); |
380 | list_del(&response->resource.link); | 479 | /* |
480 | * 1. If called while in shutdown, the idr tree must be left untouched. | ||
481 | * The idr handle will be removed and the client reference will be | ||
482 | * dropped later. | ||
483 | * 2. If the call chain was release_client_resource -> | ||
484 | * release_transaction -> complete_transaction (instead of a normal | ||
485 | * conclusion of the transaction), i.e. if this resource was already | ||
486 | * unregistered from the idr, the client reference will be dropped | ||
487 | * by release_client_resource and we must not drop it here. | ||
488 | */ | ||
489 | if (!client->in_shutdown && | ||
490 | idr_find(&client->resource_idr, e->r.resource.handle)) { | ||
491 | idr_remove(&client->resource_idr, e->r.resource.handle); | ||
492 | /* Drop the idr's reference */ | ||
493 | client_put(client); | ||
494 | } | ||
381 | spin_unlock_irqrestore(&client->lock, flags); | 495 | spin_unlock_irqrestore(&client->lock, flags); |
382 | 496 | ||
383 | r->type = FW_CDEV_EVENT_RESPONSE; | 497 | rsp->type = FW_CDEV_EVENT_RESPONSE; |
384 | r->rcode = rcode; | 498 | rsp->rcode = rcode; |
385 | 499 | ||
386 | /* | 500 | /* |
387 | * In the case that sizeof(*r) doesn't align with the position of the | 501 | * In the case that sizeof(*rsp) doesn't align with the position of the |
388 | * data, and the read is short, preserve an extra copy of the data | 502 | * data, and the read is short, preserve an extra copy of the data |
389 | * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless | 503 | * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless |
390 | * for short reads and some apps depended on it, this is both safe | 504 | * for short reads and some apps depended on it, this is both safe |
391 | * and prudent for compatibility. | 505 | * and prudent for compatibility. |
392 | */ | 506 | */ |
393 | if (r->length <= sizeof(*r) - offsetof(typeof(*r), data)) | 507 | if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data)) |
394 | queue_event(client, &response->event, r, sizeof(*r), | 508 | queue_event(client, &e->event, rsp, sizeof(*rsp), |
395 | r->data, r->length); | 509 | rsp->data, rsp->length); |
396 | else | 510 | else |
397 | queue_event(client, &response->event, r, sizeof(*r) + r->length, | 511 | queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, |
398 | NULL, 0); | 512 | NULL, 0); |
513 | |||
514 | /* Drop the transaction callback's reference */ | ||
515 | client_put(client); | ||
399 | } | 516 | } |
400 | 517 | ||
401 | static int ioctl_send_request(struct client *client, void *buffer) | 518 | static int init_request(struct client *client, |
519 | struct fw_cdev_send_request *request, | ||
520 | int destination_id, int speed) | ||
402 | { | 521 | { |
403 | struct fw_device *device = client->device; | 522 | struct outbound_transaction_event *e; |
404 | struct fw_cdev_send_request *request = buffer; | 523 | int ret; |
405 | struct response *response; | ||
406 | 524 | ||
407 | /* What is the biggest size we'll accept, really? */ | 525 | if (request->tcode != TCODE_STREAM_DATA && |
408 | if (request->length > 4096) | 526 | (request->length > 4096 || request->length > 512 << speed)) |
409 | return -EINVAL; | 527 | return -EIO; |
410 | 528 | ||
411 | response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL); | 529 | e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); |
412 | if (response == NULL) | 530 | if (e == NULL) |
413 | return -ENOMEM; | 531 | return -ENOMEM; |
414 | 532 | ||
415 | response->client = client; | 533 | e->client = client; |
416 | response->response.length = request->length; | 534 | e->response.length = request->length; |
417 | response->response.closure = request->closure; | 535 | e->response.closure = request->closure; |
418 | 536 | ||
419 | if (request->data && | 537 | if (request->data && |
420 | copy_from_user(response->response.data, | 538 | copy_from_user(e->response.data, |
421 | u64_to_uptr(request->data), request->length)) { | 539 | u64_to_uptr(request->data), request->length)) { |
422 | kfree(response); | 540 | ret = -EFAULT; |
423 | return -EFAULT; | 541 | goto failed; |
424 | } | 542 | } |
425 | 543 | ||
426 | response->resource.release = release_transaction; | 544 | e->r.resource.release = release_transaction; |
427 | add_client_resource(client, &response->resource); | 545 | ret = add_client_resource(client, &e->r.resource, GFP_KERNEL); |
546 | if (ret < 0) | ||
547 | goto failed; | ||
428 | 548 | ||
429 | fw_send_request(device->card, &response->transaction, | 549 | /* Get a reference for the transaction callback */ |
430 | request->tcode & 0x1f, | 550 | client_get(client); |
431 | device->node->node_id, | ||
432 | request->generation, | ||
433 | device->max_speed, | ||
434 | request->offset, | ||
435 | response->response.data, request->length, | ||
436 | complete_transaction, response); | ||
437 | 551 | ||
438 | if (request->data) | 552 | fw_send_request(client->device->card, &e->r.transaction, |
439 | return sizeof(request) + request->length; | 553 | request->tcode, destination_id, request->generation, |
440 | else | 554 | speed, request->offset, e->response.data, |
441 | return sizeof(request); | 555 | request->length, complete_transaction, e); |
556 | return 0; | ||
557 | |||
558 | failed: | ||
559 | kfree(e); | ||
560 | |||
561 | return ret; | ||
442 | } | 562 | } |
443 | 563 | ||
444 | struct address_handler { | 564 | static int ioctl_send_request(struct client *client, void *buffer) |
445 | struct fw_address_handler handler; | 565 | { |
446 | __u64 closure; | 566 | struct fw_cdev_send_request *request = buffer; |
447 | struct client *client; | ||
448 | struct client_resource resource; | ||
449 | }; | ||
450 | 567 | ||
451 | struct request { | 568 | switch (request->tcode) { |
452 | struct fw_request *request; | 569 | case TCODE_WRITE_QUADLET_REQUEST: |
453 | void *data; | 570 | case TCODE_WRITE_BLOCK_REQUEST: |
454 | size_t length; | 571 | case TCODE_READ_QUADLET_REQUEST: |
455 | struct client_resource resource; | 572 | case TCODE_READ_BLOCK_REQUEST: |
456 | }; | 573 | case TCODE_LOCK_MASK_SWAP: |
574 | case TCODE_LOCK_COMPARE_SWAP: | ||
575 | case TCODE_LOCK_FETCH_ADD: | ||
576 | case TCODE_LOCK_LITTLE_ADD: | ||
577 | case TCODE_LOCK_BOUNDED_ADD: | ||
578 | case TCODE_LOCK_WRAP_ADD: | ||
579 | case TCODE_LOCK_VENDOR_DEPENDENT: | ||
580 | break; | ||
581 | default: | ||
582 | return -EINVAL; | ||
583 | } | ||
457 | 584 | ||
458 | struct request_event { | 585 | return init_request(client, request, client->device->node_id, |
459 | struct event event; | 586 | client->device->max_speed); |
460 | struct fw_cdev_event_request request; | 587 | } |
461 | }; | ||
462 | 588 | ||
463 | static void | 589 | static void release_request(struct client *client, |
464 | release_request(struct client *client, struct client_resource *resource) | 590 | struct client_resource *resource) |
465 | { | 591 | { |
466 | struct request *request = | 592 | struct inbound_transaction_resource *r = container_of(resource, |
467 | container_of(resource, struct request, resource); | 593 | struct inbound_transaction_resource, resource); |
468 | 594 | ||
469 | fw_send_response(client->device->card, request->request, | 595 | fw_send_response(client->device->card, r->request, |
470 | RCODE_CONFLICT_ERROR); | 596 | RCODE_CONFLICT_ERROR); |
471 | kfree(request); | 597 | kfree(r); |
472 | } | 598 | } |
473 | 599 | ||
474 | static void | 600 | static void handle_request(struct fw_card *card, struct fw_request *request, |
475 | handle_request(struct fw_card *card, struct fw_request *r, | 601 | int tcode, int destination, int source, |
476 | int tcode, int destination, int source, | 602 | int generation, int speed, |
477 | int generation, int speed, | 603 | unsigned long long offset, |
478 | unsigned long long offset, | 604 | void *payload, size_t length, void *callback_data) |
479 | void *payload, size_t length, void *callback_data) | ||
480 | { | 605 | { |
481 | struct address_handler *handler = callback_data; | 606 | struct address_handler_resource *handler = callback_data; |
482 | struct request *request; | 607 | struct inbound_transaction_resource *r; |
483 | struct request_event *e; | 608 | struct inbound_transaction_event *e; |
484 | struct client *client = handler->client; | 609 | int ret; |
485 | 610 | ||
486 | request = kmalloc(sizeof(*request), GFP_ATOMIC); | 611 | r = kmalloc(sizeof(*r), GFP_ATOMIC); |
487 | e = kmalloc(sizeof(*e), GFP_ATOMIC); | 612 | e = kmalloc(sizeof(*e), GFP_ATOMIC); |
488 | if (request == NULL || e == NULL) { | 613 | if (r == NULL || e == NULL) |
489 | kfree(request); | 614 | goto failed; |
490 | kfree(e); | ||
491 | fw_send_response(card, r, RCODE_CONFLICT_ERROR); | ||
492 | return; | ||
493 | } | ||
494 | 615 | ||
495 | request->request = r; | 616 | r->request = request; |
496 | request->data = payload; | 617 | r->data = payload; |
497 | request->length = length; | 618 | r->length = length; |
498 | 619 | ||
499 | request->resource.release = release_request; | 620 | r->resource.release = release_request; |
500 | add_client_resource(client, &request->resource); | 621 | ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); |
622 | if (ret < 0) | ||
623 | goto failed; | ||
501 | 624 | ||
502 | e->request.type = FW_CDEV_EVENT_REQUEST; | 625 | e->request.type = FW_CDEV_EVENT_REQUEST; |
503 | e->request.tcode = tcode; | 626 | e->request.tcode = tcode; |
504 | e->request.offset = offset; | 627 | e->request.offset = offset; |
505 | e->request.length = length; | 628 | e->request.length = length; |
506 | e->request.handle = request->resource.handle; | 629 | e->request.handle = r->resource.handle; |
507 | e->request.closure = handler->closure; | 630 | e->request.closure = handler->closure; |
508 | 631 | ||
509 | queue_event(client, &e->event, | 632 | queue_event(handler->client, &e->event, |
510 | &e->request, sizeof(e->request), payload, length); | 633 | &e->request, sizeof(e->request), payload, length); |
634 | return; | ||
635 | |||
636 | failed: | ||
637 | kfree(r); | ||
638 | kfree(e); | ||
639 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); | ||
511 | } | 640 | } |
512 | 641 | ||
513 | static void | 642 | static void release_address_handler(struct client *client, |
514 | release_address_handler(struct client *client, | 643 | struct client_resource *resource) |
515 | struct client_resource *resource) | ||
516 | { | 644 | { |
517 | struct address_handler *handler = | 645 | struct address_handler_resource *r = |
518 | container_of(resource, struct address_handler, resource); | 646 | container_of(resource, struct address_handler_resource, resource); |
519 | 647 | ||
520 | fw_core_remove_address_handler(&handler->handler); | 648 | fw_core_remove_address_handler(&r->handler); |
521 | kfree(handler); | 649 | kfree(r); |
522 | } | 650 | } |
523 | 651 | ||
524 | static int ioctl_allocate(struct client *client, void *buffer) | 652 | static int ioctl_allocate(struct client *client, void *buffer) |
525 | { | 653 | { |
526 | struct fw_cdev_allocate *request = buffer; | 654 | struct fw_cdev_allocate *request = buffer; |
527 | struct address_handler *handler; | 655 | struct address_handler_resource *r; |
528 | struct fw_address_region region; | 656 | struct fw_address_region region; |
657 | int ret; | ||
529 | 658 | ||
530 | handler = kmalloc(sizeof(*handler), GFP_KERNEL); | 659 | r = kmalloc(sizeof(*r), GFP_KERNEL); |
531 | if (handler == NULL) | 660 | if (r == NULL) |
532 | return -ENOMEM; | 661 | return -ENOMEM; |
533 | 662 | ||
534 | region.start = request->offset; | 663 | region.start = request->offset; |
535 | region.end = request->offset + request->length; | 664 | region.end = request->offset + request->length; |
536 | handler->handler.length = request->length; | 665 | r->handler.length = request->length; |
537 | handler->handler.address_callback = handle_request; | 666 | r->handler.address_callback = handle_request; |
538 | handler->handler.callback_data = handler; | 667 | r->handler.callback_data = r; |
539 | handler->closure = request->closure; | 668 | r->closure = request->closure; |
540 | handler->client = client; | 669 | r->client = client; |
541 | 670 | ||
542 | if (fw_core_add_address_handler(&handler->handler, ®ion) < 0) { | 671 | ret = fw_core_add_address_handler(&r->handler, ®ion); |
543 | kfree(handler); | 672 | if (ret < 0) { |
544 | return -EBUSY; | 673 | kfree(r); |
674 | return ret; | ||
545 | } | 675 | } |
546 | 676 | ||
547 | handler->resource.release = release_address_handler; | 677 | r->resource.release = release_address_handler; |
548 | add_client_resource(client, &handler->resource); | 678 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); |
549 | request->handle = handler->resource.handle; | 679 | if (ret < 0) { |
680 | release_address_handler(client, &r->resource); | ||
681 | return ret; | ||
682 | } | ||
683 | request->handle = r->resource.handle; | ||
550 | 684 | ||
551 | return 0; | 685 | return 0; |
552 | } | 686 | } |
@@ -555,18 +689,22 @@ static int ioctl_deallocate(struct client *client, void *buffer) | |||
555 | { | 689 | { |
556 | struct fw_cdev_deallocate *request = buffer; | 690 | struct fw_cdev_deallocate *request = buffer; |
557 | 691 | ||
558 | return release_client_resource(client, request->handle, NULL); | 692 | return release_client_resource(client, request->handle, |
693 | release_address_handler, NULL); | ||
559 | } | 694 | } |
560 | 695 | ||
561 | static int ioctl_send_response(struct client *client, void *buffer) | 696 | static int ioctl_send_response(struct client *client, void *buffer) |
562 | { | 697 | { |
563 | struct fw_cdev_send_response *request = buffer; | 698 | struct fw_cdev_send_response *request = buffer; |
564 | struct client_resource *resource; | 699 | struct client_resource *resource; |
565 | struct request *r; | 700 | struct inbound_transaction_resource *r; |
566 | 701 | ||
567 | if (release_client_resource(client, request->handle, &resource) < 0) | 702 | if (release_client_resource(client, request->handle, |
703 | release_request, &resource) < 0) | ||
568 | return -EINVAL; | 704 | return -EINVAL; |
569 | r = container_of(resource, struct request, resource); | 705 | |
706 | r = container_of(resource, struct inbound_transaction_resource, | ||
707 | resource); | ||
570 | if (request->length < r->length) | 708 | if (request->length < r->length) |
571 | r->length = request->length; | 709 | r->length = request->length; |
572 | if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) | 710 | if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) |
@@ -588,85 +726,92 @@ static int ioctl_initiate_bus_reset(struct client *client, void *buffer) | |||
588 | return fw_core_initiate_bus_reset(client->device->card, short_reset); | 726 | return fw_core_initiate_bus_reset(client->device->card, short_reset); |
589 | } | 727 | } |
590 | 728 | ||
591 | struct descriptor { | ||
592 | struct fw_descriptor d; | ||
593 | struct client_resource resource; | ||
594 | u32 data[0]; | ||
595 | }; | ||
596 | |||
597 | static void release_descriptor(struct client *client, | 729 | static void release_descriptor(struct client *client, |
598 | struct client_resource *resource) | 730 | struct client_resource *resource) |
599 | { | 731 | { |
600 | struct descriptor *descriptor = | 732 | struct descriptor_resource *r = |
601 | container_of(resource, struct descriptor, resource); | 733 | container_of(resource, struct descriptor_resource, resource); |
602 | 734 | ||
603 | fw_core_remove_descriptor(&descriptor->d); | 735 | fw_core_remove_descriptor(&r->descriptor); |
604 | kfree(descriptor); | 736 | kfree(r); |
605 | } | 737 | } |
606 | 738 | ||
607 | static int ioctl_add_descriptor(struct client *client, void *buffer) | 739 | static int ioctl_add_descriptor(struct client *client, void *buffer) |
608 | { | 740 | { |
609 | struct fw_cdev_add_descriptor *request = buffer; | 741 | struct fw_cdev_add_descriptor *request = buffer; |
610 | struct descriptor *descriptor; | 742 | struct fw_card *card = client->device->card; |
611 | int retval; | 743 | struct descriptor_resource *r; |
744 | int ret; | ||
745 | |||
746 | /* Access policy: Allow this ioctl only on local nodes' device files. */ | ||
747 | spin_lock_irq(&card->lock); | ||
748 | ret = client->device->node_id != card->local_node->node_id; | ||
749 | spin_unlock_irq(&card->lock); | ||
750 | if (ret) | ||
751 | return -ENOSYS; | ||
612 | 752 | ||
613 | if (request->length > 256) | 753 | if (request->length > 256) |
614 | return -EINVAL; | 754 | return -EINVAL; |
615 | 755 | ||
616 | descriptor = | 756 | r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL); |
617 | kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL); | 757 | if (r == NULL) |
618 | if (descriptor == NULL) | ||
619 | return -ENOMEM; | 758 | return -ENOMEM; |
620 | 759 | ||
621 | if (copy_from_user(descriptor->data, | 760 | if (copy_from_user(r->data, |
622 | u64_to_uptr(request->data), request->length * 4)) { | 761 | u64_to_uptr(request->data), request->length * 4)) { |
623 | kfree(descriptor); | 762 | ret = -EFAULT; |
624 | return -EFAULT; | 763 | goto failed; |
625 | } | 764 | } |
626 | 765 | ||
627 | descriptor->d.length = request->length; | 766 | r->descriptor.length = request->length; |
628 | descriptor->d.immediate = request->immediate; | 767 | r->descriptor.immediate = request->immediate; |
629 | descriptor->d.key = request->key; | 768 | r->descriptor.key = request->key; |
630 | descriptor->d.data = descriptor->data; | 769 | r->descriptor.data = r->data; |
631 | 770 | ||
632 | retval = fw_core_add_descriptor(&descriptor->d); | 771 | ret = fw_core_add_descriptor(&r->descriptor); |
633 | if (retval < 0) { | 772 | if (ret < 0) |
634 | kfree(descriptor); | 773 | goto failed; |
635 | return retval; | ||
636 | } | ||
637 | 774 | ||
638 | descriptor->resource.release = release_descriptor; | 775 | r->resource.release = release_descriptor; |
639 | add_client_resource(client, &descriptor->resource); | 776 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); |
640 | request->handle = descriptor->resource.handle; | 777 | if (ret < 0) { |
778 | fw_core_remove_descriptor(&r->descriptor); | ||
779 | goto failed; | ||
780 | } | ||
781 | request->handle = r->resource.handle; | ||
641 | 782 | ||
642 | return 0; | 783 | return 0; |
784 | failed: | ||
785 | kfree(r); | ||
786 | |||
787 | return ret; | ||
643 | } | 788 | } |
644 | 789 | ||
645 | static int ioctl_remove_descriptor(struct client *client, void *buffer) | 790 | static int ioctl_remove_descriptor(struct client *client, void *buffer) |
646 | { | 791 | { |
647 | struct fw_cdev_remove_descriptor *request = buffer; | 792 | struct fw_cdev_remove_descriptor *request = buffer; |
648 | 793 | ||
649 | return release_client_resource(client, request->handle, NULL); | 794 | return release_client_resource(client, request->handle, |
795 | release_descriptor, NULL); | ||
650 | } | 796 | } |
651 | 797 | ||
652 | static void | 798 | static void iso_callback(struct fw_iso_context *context, u32 cycle, |
653 | iso_callback(struct fw_iso_context *context, u32 cycle, | 799 | size_t header_length, void *header, void *data) |
654 | size_t header_length, void *header, void *data) | ||
655 | { | 800 | { |
656 | struct client *client = data; | 801 | struct client *client = data; |
657 | struct iso_interrupt *irq; | 802 | struct iso_interrupt_event *e; |
658 | 803 | ||
659 | irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC); | 804 | e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC); |
660 | if (irq == NULL) | 805 | if (e == NULL) |
661 | return; | 806 | return; |
662 | 807 | ||
663 | irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; | 808 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; |
664 | irq->interrupt.closure = client->iso_closure; | 809 | e->interrupt.closure = client->iso_closure; |
665 | irq->interrupt.cycle = cycle; | 810 | e->interrupt.cycle = cycle; |
666 | irq->interrupt.header_length = header_length; | 811 | e->interrupt.header_length = header_length; |
667 | memcpy(irq->interrupt.header, header, header_length); | 812 | memcpy(e->interrupt.header, header, header_length); |
668 | queue_event(client, &irq->event, &irq->interrupt, | 813 | queue_event(client, &e->event, &e->interrupt, |
669 | sizeof(irq->interrupt) + header_length, NULL, 0); | 814 | sizeof(e->interrupt) + header_length, NULL, 0); |
670 | } | 815 | } |
671 | 816 | ||
672 | static int ioctl_create_iso_context(struct client *client, void *buffer) | 817 | static int ioctl_create_iso_context(struct client *client, void *buffer) |
@@ -871,6 +1016,261 @@ static int ioctl_get_cycle_timer(struct client *client, void *buffer) | |||
871 | return 0; | 1016 | return 0; |
872 | } | 1017 | } |
873 | 1018 | ||
1019 | static void iso_resource_work(struct work_struct *work) | ||
1020 | { | ||
1021 | struct iso_resource_event *e; | ||
1022 | struct iso_resource *r = | ||
1023 | container_of(work, struct iso_resource, work.work); | ||
1024 | struct client *client = r->client; | ||
1025 | int generation, channel, bandwidth, todo; | ||
1026 | bool skip, free, success; | ||
1027 | |||
1028 | spin_lock_irq(&client->lock); | ||
1029 | generation = client->device->generation; | ||
1030 | todo = r->todo; | ||
1031 | /* Allow 1000ms grace period for other reallocations. */ | ||
1032 | if (todo == ISO_RES_ALLOC && | ||
1033 | time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) { | ||
1034 | if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3))) | ||
1035 | client_get(client); | ||
1036 | skip = true; | ||
1037 | } else { | ||
1038 | /* We could be called twice within the same generation. */ | ||
1039 | skip = todo == ISO_RES_REALLOC && | ||
1040 | r->generation == generation; | ||
1041 | } | ||
1042 | free = todo == ISO_RES_DEALLOC || | ||
1043 | todo == ISO_RES_ALLOC_ONCE || | ||
1044 | todo == ISO_RES_DEALLOC_ONCE; | ||
1045 | r->generation = generation; | ||
1046 | spin_unlock_irq(&client->lock); | ||
1047 | |||
1048 | if (skip) | ||
1049 | goto out; | ||
1050 | |||
1051 | bandwidth = r->bandwidth; | ||
1052 | |||
1053 | fw_iso_resource_manage(client->device->card, generation, | ||
1054 | r->channels, &channel, &bandwidth, | ||
1055 | todo == ISO_RES_ALLOC || | ||
1056 | todo == ISO_RES_REALLOC || | ||
1057 | todo == ISO_RES_ALLOC_ONCE); | ||
1058 | /* | ||
1059 | * Is this generation outdated already? As long as this resource sticks | ||
1060 | * in the idr, it will be scheduled again for a newer generation or at | ||
1061 | * shutdown. | ||
1062 | */ | ||
1063 | if (channel == -EAGAIN && | ||
1064 | (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC)) | ||
1065 | goto out; | ||
1066 | |||
1067 | success = channel >= 0 || bandwidth > 0; | ||
1068 | |||
1069 | spin_lock_irq(&client->lock); | ||
1070 | /* | ||
1071 | * Transit from allocation to reallocation, except if the client | ||
1072 | * requested deallocation in the meantime. | ||
1073 | */ | ||
1074 | if (r->todo == ISO_RES_ALLOC) | ||
1075 | r->todo = ISO_RES_REALLOC; | ||
1076 | /* | ||
1077 | * Allocation or reallocation failure? Pull this resource out of the | ||
1078 | * idr and prepare for deletion, unless the client is shutting down. | ||
1079 | */ | ||
1080 | if (r->todo == ISO_RES_REALLOC && !success && | ||
1081 | !client->in_shutdown && | ||
1082 | idr_find(&client->resource_idr, r->resource.handle)) { | ||
1083 | idr_remove(&client->resource_idr, r->resource.handle); | ||
1084 | client_put(client); | ||
1085 | free = true; | ||
1086 | } | ||
1087 | spin_unlock_irq(&client->lock); | ||
1088 | |||
1089 | if (todo == ISO_RES_ALLOC && channel >= 0) | ||
1090 | r->channels = 1ULL << channel; | ||
1091 | |||
1092 | if (todo == ISO_RES_REALLOC && success) | ||
1093 | goto out; | ||
1094 | |||
1095 | if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) { | ||
1096 | e = r->e_alloc; | ||
1097 | r->e_alloc = NULL; | ||
1098 | } else { | ||
1099 | e = r->e_dealloc; | ||
1100 | r->e_dealloc = NULL; | ||
1101 | } | ||
1102 | e->resource.handle = r->resource.handle; | ||
1103 | e->resource.channel = channel; | ||
1104 | e->resource.bandwidth = bandwidth; | ||
1105 | |||
1106 | queue_event(client, &e->event, | ||
1107 | &e->resource, sizeof(e->resource), NULL, 0); | ||
1108 | |||
1109 | if (free) { | ||
1110 | cancel_delayed_work(&r->work); | ||
1111 | kfree(r->e_alloc); | ||
1112 | kfree(r->e_dealloc); | ||
1113 | kfree(r); | ||
1114 | } | ||
1115 | out: | ||
1116 | client_put(client); | ||
1117 | } | ||
1118 | |||
1119 | static void schedule_iso_resource(struct iso_resource *r) | ||
1120 | { | ||
1121 | client_get(r->client); | ||
1122 | if (!schedule_delayed_work(&r->work, 0)) | ||
1123 | client_put(r->client); | ||
1124 | } | ||
1125 | |||
1126 | static void release_iso_resource(struct client *client, | ||
1127 | struct client_resource *resource) | ||
1128 | { | ||
1129 | struct iso_resource *r = | ||
1130 | container_of(resource, struct iso_resource, resource); | ||
1131 | |||
1132 | spin_lock_irq(&client->lock); | ||
1133 | r->todo = ISO_RES_DEALLOC; | ||
1134 | schedule_iso_resource(r); | ||
1135 | spin_unlock_irq(&client->lock); | ||
1136 | } | ||
1137 | |||
1138 | static int init_iso_resource(struct client *client, | ||
1139 | struct fw_cdev_allocate_iso_resource *request, int todo) | ||
1140 | { | ||
1141 | struct iso_resource_event *e1, *e2; | ||
1142 | struct iso_resource *r; | ||
1143 | int ret; | ||
1144 | |||
1145 | if ((request->channels == 0 && request->bandwidth == 0) || | ||
1146 | request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || | ||
1147 | request->bandwidth < 0) | ||
1148 | return -EINVAL; | ||
1149 | |||
1150 | r = kmalloc(sizeof(*r), GFP_KERNEL); | ||
1151 | e1 = kmalloc(sizeof(*e1), GFP_KERNEL); | ||
1152 | e2 = kmalloc(sizeof(*e2), GFP_KERNEL); | ||
1153 | if (r == NULL || e1 == NULL || e2 == NULL) { | ||
1154 | ret = -ENOMEM; | ||
1155 | goto fail; | ||
1156 | } | ||
1157 | |||
1158 | INIT_DELAYED_WORK(&r->work, iso_resource_work); | ||
1159 | r->client = client; | ||
1160 | r->todo = todo; | ||
1161 | r->generation = -1; | ||
1162 | r->channels = request->channels; | ||
1163 | r->bandwidth = request->bandwidth; | ||
1164 | r->e_alloc = e1; | ||
1165 | r->e_dealloc = e2; | ||
1166 | |||
1167 | e1->resource.closure = request->closure; | ||
1168 | e1->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED; | ||
1169 | e2->resource.closure = request->closure; | ||
1170 | e2->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED; | ||
1171 | |||
1172 | if (todo == ISO_RES_ALLOC) { | ||
1173 | r->resource.release = release_iso_resource; | ||
1174 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); | ||
1175 | if (ret < 0) | ||
1176 | goto fail; | ||
1177 | } else { | ||
1178 | r->resource.release = NULL; | ||
1179 | r->resource.handle = -1; | ||
1180 | schedule_iso_resource(r); | ||
1181 | } | ||
1182 | request->handle = r->resource.handle; | ||
1183 | |||
1184 | return 0; | ||
1185 | fail: | ||
1186 | kfree(r); | ||
1187 | kfree(e1); | ||
1188 | kfree(e2); | ||
1189 | |||
1190 | return ret; | ||
1191 | } | ||
1192 | |||
1193 | static int ioctl_allocate_iso_resource(struct client *client, void *buffer) | ||
1194 | { | ||
1195 | struct fw_cdev_allocate_iso_resource *request = buffer; | ||
1196 | |||
1197 | return init_iso_resource(client, request, ISO_RES_ALLOC); | ||
1198 | } | ||
1199 | |||
1200 | static int ioctl_deallocate_iso_resource(struct client *client, void *buffer) | ||
1201 | { | ||
1202 | struct fw_cdev_deallocate *request = buffer; | ||
1203 | |||
1204 | return release_client_resource(client, request->handle, | ||
1205 | release_iso_resource, NULL); | ||
1206 | } | ||
1207 | |||
1208 | static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer) | ||
1209 | { | ||
1210 | struct fw_cdev_allocate_iso_resource *request = buffer; | ||
1211 | |||
1212 | return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE); | ||
1213 | } | ||
1214 | |||
1215 | static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer) | ||
1216 | { | ||
1217 | struct fw_cdev_allocate_iso_resource *request = buffer; | ||
1218 | |||
1219 | return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE); | ||
1220 | } | ||
1221 | |||
1222 | /* | ||
1223 | * Returns a speed code: Maximum speed to or from this device, | ||
1224 | * limited by the device's link speed, the local node's link speed, | ||
1225 | * and all PHY port speeds between the two links. | ||
1226 | */ | ||
1227 | static int ioctl_get_speed(struct client *client, void *buffer) | ||
1228 | { | ||
1229 | return client->device->max_speed; | ||
1230 | } | ||
1231 | |||
1232 | static int ioctl_send_broadcast_request(struct client *client, void *buffer) | ||
1233 | { | ||
1234 | struct fw_cdev_send_request *request = buffer; | ||
1235 | |||
1236 | switch (request->tcode) { | ||
1237 | case TCODE_WRITE_QUADLET_REQUEST: | ||
1238 | case TCODE_WRITE_BLOCK_REQUEST: | ||
1239 | break; | ||
1240 | default: | ||
1241 | return -EINVAL; | ||
1242 | } | ||
1243 | |||
1244 | /* Security policy: Only allow accesses to Units Space. */ | ||
1245 | if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) | ||
1246 | return -EACCES; | ||
1247 | |||
1248 | return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100); | ||
1249 | } | ||
1250 | |||
1251 | static int ioctl_send_stream_packet(struct client *client, void *buffer) | ||
1252 | { | ||
1253 | struct fw_cdev_send_stream_packet *p = buffer; | ||
1254 | struct fw_cdev_send_request request; | ||
1255 | int dest; | ||
1256 | |||
1257 | if (p->speed > client->device->card->link_speed || | ||
1258 | p->length > 1024 << p->speed) | ||
1259 | return -EIO; | ||
1260 | |||
1261 | if (p->tag > 3 || p->channel > 63 || p->sy > 15) | ||
1262 | return -EINVAL; | ||
1263 | |||
1264 | dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy); | ||
1265 | request.tcode = TCODE_STREAM_DATA; | ||
1266 | request.length = p->length; | ||
1267 | request.closure = p->closure; | ||
1268 | request.data = p->data; | ||
1269 | request.generation = p->generation; | ||
1270 | |||
1271 | return init_request(client, &request, dest, p->speed); | ||
1272 | } | ||
1273 | |||
874 | static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { | 1274 | static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { |
875 | ioctl_get_info, | 1275 | ioctl_get_info, |
876 | ioctl_send_request, | 1276 | ioctl_send_request, |
@@ -885,13 +1285,20 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { | |||
885 | ioctl_start_iso, | 1285 | ioctl_start_iso, |
886 | ioctl_stop_iso, | 1286 | ioctl_stop_iso, |
887 | ioctl_get_cycle_timer, | 1287 | ioctl_get_cycle_timer, |
1288 | ioctl_allocate_iso_resource, | ||
1289 | ioctl_deallocate_iso_resource, | ||
1290 | ioctl_allocate_iso_resource_once, | ||
1291 | ioctl_deallocate_iso_resource_once, | ||
1292 | ioctl_get_speed, | ||
1293 | ioctl_send_broadcast_request, | ||
1294 | ioctl_send_stream_packet, | ||
888 | }; | 1295 | }; |
889 | 1296 | ||
890 | static int | 1297 | static int dispatch_ioctl(struct client *client, |
891 | dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) | 1298 | unsigned int cmd, void __user *arg) |
892 | { | 1299 | { |
893 | char buffer[256]; | 1300 | char buffer[256]; |
894 | int retval; | 1301 | int ret; |
895 | 1302 | ||
896 | if (_IOC_TYPE(cmd) != '#' || | 1303 | if (_IOC_TYPE(cmd) != '#' || |
897 | _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) | 1304 | _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) |
@@ -903,9 +1310,9 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) | |||
903 | return -EFAULT; | 1310 | return -EFAULT; |
904 | } | 1311 | } |
905 | 1312 | ||
906 | retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer); | 1313 | ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer); |
907 | if (retval < 0) | 1314 | if (ret < 0) |
908 | return retval; | 1315 | return ret; |
909 | 1316 | ||
910 | if (_IOC_DIR(cmd) & _IOC_READ) { | 1317 | if (_IOC_DIR(cmd) & _IOC_READ) { |
911 | if (_IOC_SIZE(cmd) > sizeof(buffer) || | 1318 | if (_IOC_SIZE(cmd) > sizeof(buffer) || |
@@ -913,12 +1320,11 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) | |||
913 | return -EFAULT; | 1320 | return -EFAULT; |
914 | } | 1321 | } |
915 | 1322 | ||
916 | return retval; | 1323 | return ret; |
917 | } | 1324 | } |
918 | 1325 | ||
919 | static long | 1326 | static long fw_device_op_ioctl(struct file *file, |
920 | fw_device_op_ioctl(struct file *file, | 1327 | unsigned int cmd, unsigned long arg) |
921 | unsigned int cmd, unsigned long arg) | ||
922 | { | 1328 | { |
923 | struct client *client = file->private_data; | 1329 | struct client *client = file->private_data; |
924 | 1330 | ||
@@ -929,9 +1335,8 @@ fw_device_op_ioctl(struct file *file, | |||
929 | } | 1335 | } |
930 | 1336 | ||
931 | #ifdef CONFIG_COMPAT | 1337 | #ifdef CONFIG_COMPAT |
932 | static long | 1338 | static long fw_device_op_compat_ioctl(struct file *file, |
933 | fw_device_op_compat_ioctl(struct file *file, | 1339 | unsigned int cmd, unsigned long arg) |
934 | unsigned int cmd, unsigned long arg) | ||
935 | { | 1340 | { |
936 | struct client *client = file->private_data; | 1341 | struct client *client = file->private_data; |
937 | 1342 | ||
@@ -947,7 +1352,7 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) | |||
947 | struct client *client = file->private_data; | 1352 | struct client *client = file->private_data; |
948 | enum dma_data_direction direction; | 1353 | enum dma_data_direction direction; |
949 | unsigned long size; | 1354 | unsigned long size; |
950 | int page_count, retval; | 1355 | int page_count, ret; |
951 | 1356 | ||
952 | if (fw_device_is_shutdown(client->device)) | 1357 | if (fw_device_is_shutdown(client->device)) |
953 | return -ENODEV; | 1358 | return -ENODEV; |
@@ -973,48 +1378,57 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) | |||
973 | else | 1378 | else |
974 | direction = DMA_FROM_DEVICE; | 1379 | direction = DMA_FROM_DEVICE; |
975 | 1380 | ||
976 | retval = fw_iso_buffer_init(&client->buffer, client->device->card, | 1381 | ret = fw_iso_buffer_init(&client->buffer, client->device->card, |
977 | page_count, direction); | 1382 | page_count, direction); |
978 | if (retval < 0) | 1383 | if (ret < 0) |
979 | return retval; | 1384 | return ret; |
980 | 1385 | ||
981 | retval = fw_iso_buffer_map(&client->buffer, vma); | 1386 | ret = fw_iso_buffer_map(&client->buffer, vma); |
982 | if (retval < 0) | 1387 | if (ret < 0) |
983 | fw_iso_buffer_destroy(&client->buffer, client->device->card); | 1388 | fw_iso_buffer_destroy(&client->buffer, client->device->card); |
984 | 1389 | ||
985 | return retval; | 1390 | return ret; |
1391 | } | ||
1392 | |||
1393 | static int shutdown_resource(int id, void *p, void *data) | ||
1394 | { | ||
1395 | struct client_resource *r = p; | ||
1396 | struct client *client = data; | ||
1397 | |||
1398 | r->release(client, r); | ||
1399 | client_put(client); | ||
1400 | |||
1401 | return 0; | ||
986 | } | 1402 | } |
987 | 1403 | ||
988 | static int fw_device_op_release(struct inode *inode, struct file *file) | 1404 | static int fw_device_op_release(struct inode *inode, struct file *file) |
989 | { | 1405 | { |
990 | struct client *client = file->private_data; | 1406 | struct client *client = file->private_data; |
991 | struct event *e, *next_e; | 1407 | struct event *e, *next_e; |
992 | struct client_resource *r, *next_r; | ||
993 | unsigned long flags; | ||
994 | 1408 | ||
995 | if (client->buffer.pages) | 1409 | mutex_lock(&client->device->client_list_mutex); |
996 | fw_iso_buffer_destroy(&client->buffer, client->device->card); | 1410 | list_del(&client->link); |
1411 | mutex_unlock(&client->device->client_list_mutex); | ||
997 | 1412 | ||
998 | if (client->iso_context) | 1413 | if (client->iso_context) |
999 | fw_iso_context_destroy(client->iso_context); | 1414 | fw_iso_context_destroy(client->iso_context); |
1000 | 1415 | ||
1001 | list_for_each_entry_safe(r, next_r, &client->resource_list, link) | 1416 | if (client->buffer.pages) |
1002 | r->release(client, r); | 1417 | fw_iso_buffer_destroy(&client->buffer, client->device->card); |
1003 | 1418 | ||
1004 | /* | 1419 | /* Freeze client->resource_idr and client->event_list */ |
1005 | * FIXME: We should wait for the async tasklets to stop | 1420 | spin_lock_irq(&client->lock); |
1006 | * running before freeing the memory. | 1421 | client->in_shutdown = true; |
1007 | */ | 1422 | spin_unlock_irq(&client->lock); |
1423 | |||
1424 | idr_for_each(&client->resource_idr, shutdown_resource, client); | ||
1425 | idr_remove_all(&client->resource_idr); | ||
1426 | idr_destroy(&client->resource_idr); | ||
1008 | 1427 | ||
1009 | list_for_each_entry_safe(e, next_e, &client->event_list, link) | 1428 | list_for_each_entry_safe(e, next_e, &client->event_list, link) |
1010 | kfree(e); | 1429 | kfree(e); |
1011 | 1430 | ||
1012 | spin_lock_irqsave(&client->device->card->lock, flags); | 1431 | client_put(client); |
1013 | list_del(&client->link); | ||
1014 | spin_unlock_irqrestore(&client->device->card->lock, flags); | ||
1015 | |||
1016 | fw_device_put(client->device); | ||
1017 | kfree(client); | ||
1018 | 1432 | ||
1019 | return 0; | 1433 | return 0; |
1020 | } | 1434 | } |
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index bf53acb45652..a47e2129d83d 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c | |||
@@ -18,22 +18,26 @@ | |||
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/module.h> | 21 | #include <linux/ctype.h> |
22 | #include <linux/wait.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/kthread.h> | ||
25 | #include <linux/device.h> | ||
26 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
23 | #include <linux/device.h> | ||
24 | #include <linux/errno.h> | ||
27 | #include <linux/idr.h> | 25 | #include <linux/idr.h> |
28 | #include <linux/jiffies.h> | 26 | #include <linux/jiffies.h> |
29 | #include <linux/string.h> | 27 | #include <linux/kobject.h> |
28 | #include <linux/list.h> | ||
29 | #include <linux/mutex.h> | ||
30 | #include <linux/rwsem.h> | 30 | #include <linux/rwsem.h> |
31 | #include <linux/semaphore.h> | 31 | #include <linux/semaphore.h> |
32 | #include <linux/spinlock.h> | ||
33 | #include <linux/string.h> | ||
34 | #include <linux/workqueue.h> | ||
35 | |||
32 | #include <asm/system.h> | 36 | #include <asm/system.h> |
33 | #include <linux/ctype.h> | 37 | |
34 | #include "fw-transaction.h" | ||
35 | #include "fw-topology.h" | ||
36 | #include "fw-device.h" | 38 | #include "fw-device.h" |
39 | #include "fw-topology.h" | ||
40 | #include "fw-transaction.h" | ||
37 | 41 | ||
38 | void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p) | 42 | void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p) |
39 | { | 43 | { |
@@ -132,8 +136,7 @@ static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size) | |||
132 | vendor, model, specifier_id, version); | 136 | vendor, model, specifier_id, version); |
133 | } | 137 | } |
134 | 138 | ||
135 | static int | 139 | static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env) |
136 | fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
137 | { | 140 | { |
138 | struct fw_unit *unit = fw_unit(dev); | 141 | struct fw_unit *unit = fw_unit(dev); |
139 | char modalias[64]; | 142 | char modalias[64]; |
@@ -152,27 +155,6 @@ struct bus_type fw_bus_type = { | |||
152 | }; | 155 | }; |
153 | EXPORT_SYMBOL(fw_bus_type); | 156 | EXPORT_SYMBOL(fw_bus_type); |
154 | 157 | ||
155 | static void fw_device_release(struct device *dev) | ||
156 | { | ||
157 | struct fw_device *device = fw_device(dev); | ||
158 | struct fw_card *card = device->card; | ||
159 | unsigned long flags; | ||
160 | |||
161 | /* | ||
162 | * Take the card lock so we don't set this to NULL while a | ||
163 | * FW_NODE_UPDATED callback is being handled or while the | ||
164 | * bus manager work looks at this node. | ||
165 | */ | ||
166 | spin_lock_irqsave(&card->lock, flags); | ||
167 | device->node->data = NULL; | ||
168 | spin_unlock_irqrestore(&card->lock, flags); | ||
169 | |||
170 | fw_node_put(device->node); | ||
171 | kfree(device->config_rom); | ||
172 | kfree(device); | ||
173 | fw_card_put(card); | ||
174 | } | ||
175 | |||
176 | int fw_device_enable_phys_dma(struct fw_device *device) | 158 | int fw_device_enable_phys_dma(struct fw_device *device) |
177 | { | 159 | { |
178 | int generation = device->generation; | 160 | int generation = device->generation; |
@@ -191,8 +173,8 @@ struct config_rom_attribute { | |||
191 | u32 key; | 173 | u32 key; |
192 | }; | 174 | }; |
193 | 175 | ||
194 | static ssize_t | 176 | static ssize_t show_immediate(struct device *dev, |
195 | show_immediate(struct device *dev, struct device_attribute *dattr, char *buf) | 177 | struct device_attribute *dattr, char *buf) |
196 | { | 178 | { |
197 | struct config_rom_attribute *attr = | 179 | struct config_rom_attribute *attr = |
198 | container_of(dattr, struct config_rom_attribute, attr); | 180 | container_of(dattr, struct config_rom_attribute, attr); |
@@ -223,8 +205,8 @@ show_immediate(struct device *dev, struct device_attribute *dattr, char *buf) | |||
223 | #define IMMEDIATE_ATTR(name, key) \ | 205 | #define IMMEDIATE_ATTR(name, key) \ |
224 | { __ATTR(name, S_IRUGO, show_immediate, NULL), key } | 206 | { __ATTR(name, S_IRUGO, show_immediate, NULL), key } |
225 | 207 | ||
226 | static ssize_t | 208 | static ssize_t show_text_leaf(struct device *dev, |
227 | show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf) | 209 | struct device_attribute *dattr, char *buf) |
228 | { | 210 | { |
229 | struct config_rom_attribute *attr = | 211 | struct config_rom_attribute *attr = |
230 | container_of(dattr, struct config_rom_attribute, attr); | 212 | container_of(dattr, struct config_rom_attribute, attr); |
@@ -293,10 +275,9 @@ static struct config_rom_attribute config_rom_attributes[] = { | |||
293 | TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION), | 275 | TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION), |
294 | }; | 276 | }; |
295 | 277 | ||
296 | static void | 278 | static void init_fw_attribute_group(struct device *dev, |
297 | init_fw_attribute_group(struct device *dev, | 279 | struct device_attribute *attrs, |
298 | struct device_attribute *attrs, | 280 | struct fw_attribute_group *group) |
299 | struct fw_attribute_group *group) | ||
300 | { | 281 | { |
301 | struct device_attribute *attr; | 282 | struct device_attribute *attr; |
302 | int i, j; | 283 | int i, j; |
@@ -319,9 +300,8 @@ init_fw_attribute_group(struct device *dev, | |||
319 | dev->groups = group->groups; | 300 | dev->groups = group->groups; |
320 | } | 301 | } |
321 | 302 | ||
322 | static ssize_t | 303 | static ssize_t modalias_show(struct device *dev, |
323 | modalias_show(struct device *dev, | 304 | struct device_attribute *attr, char *buf) |
324 | struct device_attribute *attr, char *buf) | ||
325 | { | 305 | { |
326 | struct fw_unit *unit = fw_unit(dev); | 306 | struct fw_unit *unit = fw_unit(dev); |
327 | int length; | 307 | int length; |
@@ -332,9 +312,8 @@ modalias_show(struct device *dev, | |||
332 | return length + 1; | 312 | return length + 1; |
333 | } | 313 | } |
334 | 314 | ||
335 | static ssize_t | 315 | static ssize_t rom_index_show(struct device *dev, |
336 | rom_index_show(struct device *dev, | 316 | struct device_attribute *attr, char *buf) |
337 | struct device_attribute *attr, char *buf) | ||
338 | { | 317 | { |
339 | struct fw_device *device = fw_device(dev->parent); | 318 | struct fw_device *device = fw_device(dev->parent); |
340 | struct fw_unit *unit = fw_unit(dev); | 319 | struct fw_unit *unit = fw_unit(dev); |
@@ -349,8 +328,8 @@ static struct device_attribute fw_unit_attributes[] = { | |||
349 | __ATTR_NULL, | 328 | __ATTR_NULL, |
350 | }; | 329 | }; |
351 | 330 | ||
352 | static ssize_t | 331 | static ssize_t config_rom_show(struct device *dev, |
353 | config_rom_show(struct device *dev, struct device_attribute *attr, char *buf) | 332 | struct device_attribute *attr, char *buf) |
354 | { | 333 | { |
355 | struct fw_device *device = fw_device(dev); | 334 | struct fw_device *device = fw_device(dev); |
356 | size_t length; | 335 | size_t length; |
@@ -363,8 +342,8 @@ config_rom_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
363 | return length; | 342 | return length; |
364 | } | 343 | } |
365 | 344 | ||
366 | static ssize_t | 345 | static ssize_t guid_show(struct device *dev, |
367 | guid_show(struct device *dev, struct device_attribute *attr, char *buf) | 346 | struct device_attribute *attr, char *buf) |
368 | { | 347 | { |
369 | struct fw_device *device = fw_device(dev); | 348 | struct fw_device *device = fw_device(dev); |
370 | int ret; | 349 | int ret; |
@@ -383,8 +362,8 @@ static struct device_attribute fw_device_attributes[] = { | |||
383 | __ATTR_NULL, | 362 | __ATTR_NULL, |
384 | }; | 363 | }; |
385 | 364 | ||
386 | static int | 365 | static int read_rom(struct fw_device *device, |
387 | read_rom(struct fw_device *device, int generation, int index, u32 *data) | 366 | int generation, int index, u32 *data) |
388 | { | 367 | { |
389 | int rcode; | 368 | int rcode; |
390 | 369 | ||
@@ -539,7 +518,7 @@ static int read_bus_info_block(struct fw_device *device, int generation) | |||
539 | 518 | ||
540 | kfree(old_rom); | 519 | kfree(old_rom); |
541 | ret = 0; | 520 | ret = 0; |
542 | device->cmc = rom[2] & 1 << 30; | 521 | device->cmc = rom[2] >> 30 & 1; |
543 | out: | 522 | out: |
544 | kfree(rom); | 523 | kfree(rom); |
545 | 524 | ||
@@ -679,11 +658,53 @@ static void fw_device_shutdown(struct work_struct *work) | |||
679 | fw_device_put(device); | 658 | fw_device_put(device); |
680 | } | 659 | } |
681 | 660 | ||
661 | static void fw_device_release(struct device *dev) | ||
662 | { | ||
663 | struct fw_device *device = fw_device(dev); | ||
664 | struct fw_card *card = device->card; | ||
665 | unsigned long flags; | ||
666 | |||
667 | /* | ||
668 | * Take the card lock so we don't set this to NULL while a | ||
669 | * FW_NODE_UPDATED callback is being handled or while the | ||
670 | * bus manager work looks at this node. | ||
671 | */ | ||
672 | spin_lock_irqsave(&card->lock, flags); | ||
673 | device->node->data = NULL; | ||
674 | spin_unlock_irqrestore(&card->lock, flags); | ||
675 | |||
676 | fw_node_put(device->node); | ||
677 | kfree(device->config_rom); | ||
678 | kfree(device); | ||
679 | fw_card_put(card); | ||
680 | } | ||
681 | |||
682 | static struct device_type fw_device_type = { | 682 | static struct device_type fw_device_type = { |
683 | .release = fw_device_release, | 683 | .release = fw_device_release, |
684 | }; | 684 | }; |
685 | 685 | ||
686 | static void fw_device_update(struct work_struct *work); | 686 | static int update_unit(struct device *dev, void *data) |
687 | { | ||
688 | struct fw_unit *unit = fw_unit(dev); | ||
689 | struct fw_driver *driver = (struct fw_driver *)dev->driver; | ||
690 | |||
691 | if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) { | ||
692 | down(&dev->sem); | ||
693 | driver->update(unit); | ||
694 | up(&dev->sem); | ||
695 | } | ||
696 | |||
697 | return 0; | ||
698 | } | ||
699 | |||
700 | static void fw_device_update(struct work_struct *work) | ||
701 | { | ||
702 | struct fw_device *device = | ||
703 | container_of(work, struct fw_device, work.work); | ||
704 | |||
705 | fw_device_cdev_update(device); | ||
706 | device_for_each_child(&device->device, NULL, update_unit); | ||
707 | } | ||
687 | 708 | ||
688 | /* | 709 | /* |
689 | * If a device was pending for deletion because its node went away but its | 710 | * If a device was pending for deletion because its node went away but its |
@@ -735,12 +756,50 @@ static int lookup_existing_device(struct device *dev, void *data) | |||
735 | return match; | 756 | return match; |
736 | } | 757 | } |
737 | 758 | ||
759 | enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, }; | ||
760 | |||
761 | void fw_device_set_broadcast_channel(struct fw_device *device, int generation) | ||
762 | { | ||
763 | struct fw_card *card = device->card; | ||
764 | __be32 data; | ||
765 | int rcode; | ||
766 | |||
767 | if (!card->broadcast_channel_allocated) | ||
768 | return; | ||
769 | |||
770 | if (device->bc_implemented == BC_UNKNOWN) { | ||
771 | rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST, | ||
772 | device->node_id, generation, device->max_speed, | ||
773 | CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL, | ||
774 | &data, 4); | ||
775 | switch (rcode) { | ||
776 | case RCODE_COMPLETE: | ||
777 | if (data & cpu_to_be32(1 << 31)) { | ||
778 | device->bc_implemented = BC_IMPLEMENTED; | ||
779 | break; | ||
780 | } | ||
781 | /* else fall through to case address error */ | ||
782 | case RCODE_ADDRESS_ERROR: | ||
783 | device->bc_implemented = BC_UNIMPLEMENTED; | ||
784 | } | ||
785 | } | ||
786 | |||
787 | if (device->bc_implemented == BC_IMPLEMENTED) { | ||
788 | data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL | | ||
789 | BROADCAST_CHANNEL_VALID); | ||
790 | fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, | ||
791 | device->node_id, generation, device->max_speed, | ||
792 | CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL, | ||
793 | &data, 4); | ||
794 | } | ||
795 | } | ||
796 | |||
738 | static void fw_device_init(struct work_struct *work) | 797 | static void fw_device_init(struct work_struct *work) |
739 | { | 798 | { |
740 | struct fw_device *device = | 799 | struct fw_device *device = |
741 | container_of(work, struct fw_device, work.work); | 800 | container_of(work, struct fw_device, work.work); |
742 | struct device *revived_dev; | 801 | struct device *revived_dev; |
743 | int minor, err; | 802 | int minor, ret; |
744 | 803 | ||
745 | /* | 804 | /* |
746 | * All failure paths here set node->data to NULL, so that we | 805 | * All failure paths here set node->data to NULL, so that we |
@@ -776,12 +835,12 @@ static void fw_device_init(struct work_struct *work) | |||
776 | 835 | ||
777 | fw_device_get(device); | 836 | fw_device_get(device); |
778 | down_write(&fw_device_rwsem); | 837 | down_write(&fw_device_rwsem); |
779 | err = idr_pre_get(&fw_device_idr, GFP_KERNEL) ? | 838 | ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ? |
780 | idr_get_new(&fw_device_idr, device, &minor) : | 839 | idr_get_new(&fw_device_idr, device, &minor) : |
781 | -ENOMEM; | 840 | -ENOMEM; |
782 | up_write(&fw_device_rwsem); | 841 | up_write(&fw_device_rwsem); |
783 | 842 | ||
784 | if (err < 0) | 843 | if (ret < 0) |
785 | goto error; | 844 | goto error; |
786 | 845 | ||
787 | device->device.bus = &fw_bus_type; | 846 | device->device.bus = &fw_bus_type; |
@@ -828,6 +887,8 @@ static void fw_device_init(struct work_struct *work) | |||
828 | device->config_rom[3], device->config_rom[4], | 887 | device->config_rom[3], device->config_rom[4], |
829 | 1 << device->max_speed); | 888 | 1 << device->max_speed); |
830 | device->config_rom_retries = 0; | 889 | device->config_rom_retries = 0; |
890 | |||
891 | fw_device_set_broadcast_channel(device, device->generation); | ||
831 | } | 892 | } |
832 | 893 | ||
833 | /* | 894 | /* |
@@ -851,29 +912,6 @@ static void fw_device_init(struct work_struct *work) | |||
851 | put_device(&device->device); /* our reference */ | 912 | put_device(&device->device); /* our reference */ |
852 | } | 913 | } |
853 | 914 | ||
854 | static int update_unit(struct device *dev, void *data) | ||
855 | { | ||
856 | struct fw_unit *unit = fw_unit(dev); | ||
857 | struct fw_driver *driver = (struct fw_driver *)dev->driver; | ||
858 | |||
859 | if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) { | ||
860 | down(&dev->sem); | ||
861 | driver->update(unit); | ||
862 | up(&dev->sem); | ||
863 | } | ||
864 | |||
865 | return 0; | ||
866 | } | ||
867 | |||
868 | static void fw_device_update(struct work_struct *work) | ||
869 | { | ||
870 | struct fw_device *device = | ||
871 | container_of(work, struct fw_device, work.work); | ||
872 | |||
873 | fw_device_cdev_update(device); | ||
874 | device_for_each_child(&device->device, NULL, update_unit); | ||
875 | } | ||
876 | |||
877 | enum { | 915 | enum { |
878 | REREAD_BIB_ERROR, | 916 | REREAD_BIB_ERROR, |
879 | REREAD_BIB_GONE, | 917 | REREAD_BIB_GONE, |
@@ -894,7 +932,7 @@ static int reread_bus_info_block(struct fw_device *device, int generation) | |||
894 | if (i == 0 && q == 0) | 932 | if (i == 0 && q == 0) |
895 | return REREAD_BIB_GONE; | 933 | return REREAD_BIB_GONE; |
896 | 934 | ||
897 | if (i > device->config_rom_length || q != device->config_rom[i]) | 935 | if (q != device->config_rom[i]) |
898 | return REREAD_BIB_CHANGED; | 936 | return REREAD_BIB_CHANGED; |
899 | } | 937 | } |
900 | 938 | ||
@@ -1004,6 +1042,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) | |||
1004 | device->node = fw_node_get(node); | 1042 | device->node = fw_node_get(node); |
1005 | device->node_id = node->node_id; | 1043 | device->node_id = node->node_id; |
1006 | device->generation = card->generation; | 1044 | device->generation = card->generation; |
1045 | mutex_init(&device->client_list_mutex); | ||
1007 | INIT_LIST_HEAD(&device->client_list); | 1046 | INIT_LIST_HEAD(&device->client_list); |
1008 | 1047 | ||
1009 | /* | 1048 | /* |
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h index 8ef6ec2ca21c..97588937c018 100644 --- a/drivers/firewire/fw-device.h +++ b/drivers/firewire/fw-device.h | |||
@@ -19,10 +19,17 @@ | |||
19 | #ifndef __fw_device_h | 19 | #ifndef __fw_device_h |
20 | #define __fw_device_h | 20 | #define __fw_device_h |
21 | 21 | ||
22 | #include <linux/device.h> | ||
22 | #include <linux/fs.h> | 23 | #include <linux/fs.h> |
23 | #include <linux/cdev.h> | ||
24 | #include <linux/idr.h> | 24 | #include <linux/idr.h> |
25 | #include <linux/kernel.h> | ||
26 | #include <linux/list.h> | ||
27 | #include <linux/mutex.h> | ||
25 | #include <linux/rwsem.h> | 28 | #include <linux/rwsem.h> |
29 | #include <linux/sysfs.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/workqueue.h> | ||
32 | |||
26 | #include <asm/atomic.h> | 33 | #include <asm/atomic.h> |
27 | 34 | ||
28 | enum fw_device_state { | 35 | enum fw_device_state { |
@@ -38,6 +45,9 @@ struct fw_attribute_group { | |||
38 | struct attribute *attrs[11]; | 45 | struct attribute *attrs[11]; |
39 | }; | 46 | }; |
40 | 47 | ||
48 | struct fw_node; | ||
49 | struct fw_card; | ||
50 | |||
41 | /* | 51 | /* |
42 | * Note, fw_device.generation always has to be read before fw_device.node_id. | 52 | * Note, fw_device.generation always has to be read before fw_device.node_id. |
43 | * Use SMP memory barriers to ensure this. Otherwise requests will be sent | 53 | * Use SMP memory barriers to ensure this. Otherwise requests will be sent |
@@ -61,13 +71,18 @@ struct fw_device { | |||
61 | int node_id; | 71 | int node_id; |
62 | int generation; | 72 | int generation; |
63 | unsigned max_speed; | 73 | unsigned max_speed; |
64 | bool cmc; | ||
65 | struct fw_card *card; | 74 | struct fw_card *card; |
66 | struct device device; | 75 | struct device device; |
76 | |||
77 | struct mutex client_list_mutex; | ||
67 | struct list_head client_list; | 78 | struct list_head client_list; |
79 | |||
68 | u32 *config_rom; | 80 | u32 *config_rom; |
69 | size_t config_rom_length; | 81 | size_t config_rom_length; |
70 | int config_rom_retries; | 82 | int config_rom_retries; |
83 | unsigned cmc:1; | ||
84 | unsigned bc_implemented:2; | ||
85 | |||
71 | struct delayed_work work; | 86 | struct delayed_work work; |
72 | struct fw_attribute_group attribute_group; | 87 | struct fw_attribute_group attribute_group; |
73 | }; | 88 | }; |
@@ -96,6 +111,7 @@ static inline void fw_device_put(struct fw_device *device) | |||
96 | 111 | ||
97 | struct fw_device *fw_device_get_by_devt(dev_t devt); | 112 | struct fw_device *fw_device_get_by_devt(dev_t devt); |
98 | int fw_device_enable_phys_dma(struct fw_device *device); | 113 | int fw_device_enable_phys_dma(struct fw_device *device); |
114 | void fw_device_set_broadcast_channel(struct fw_device *device, int generation); | ||
99 | 115 | ||
100 | void fw_device_cdev_update(struct fw_device *device); | 116 | void fw_device_cdev_update(struct fw_device *device); |
101 | void fw_device_cdev_remove(struct fw_device *device); | 117 | void fw_device_cdev_remove(struct fw_device *device); |
@@ -176,8 +192,7 @@ struct fw_driver { | |||
176 | const struct fw_device_id *id_table; | 192 | const struct fw_device_id *id_table; |
177 | }; | 193 | }; |
178 | 194 | ||
179 | static inline struct fw_driver * | 195 | static inline struct fw_driver *fw_driver(struct device_driver *drv) |
180 | fw_driver(struct device_driver *drv) | ||
181 | { | 196 | { |
182 | return container_of(drv, struct fw_driver, driver); | 197 | return container_of(drv, struct fw_driver, driver); |
183 | } | 198 | } |
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c index e14c03dc0065..2baf1007253e 100644 --- a/drivers/firewire/fw-iso.c +++ b/drivers/firewire/fw-iso.c | |||
@@ -1,5 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Isochronous IO functionality | 2 | * Isochronous I/O functionality: |
3 | * - Isochronous DMA context management | ||
4 | * - Isochronous bus resource management (channels, bandwidth), client side | ||
3 | * | 5 | * |
4 | * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net> | 6 | * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net> |
5 | * | 7 | * |
@@ -18,21 +20,25 @@ | |||
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 20 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
19 | */ | 21 | */ |
20 | 22 | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/dma-mapping.h> | 23 | #include <linux/dma-mapping.h> |
24 | #include <linux/vmalloc.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/firewire-constants.h> | ||
26 | #include <linux/kernel.h> | ||
25 | #include <linux/mm.h> | 27 | #include <linux/mm.h> |
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/vmalloc.h> | ||
26 | 30 | ||
27 | #include "fw-transaction.h" | ||
28 | #include "fw-topology.h" | 31 | #include "fw-topology.h" |
29 | #include "fw-device.h" | 32 | #include "fw-transaction.h" |
30 | 33 | ||
31 | int | 34 | /* |
32 | fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, | 35 | * Isochronous DMA context management |
33 | int page_count, enum dma_data_direction direction) | 36 | */ |
37 | |||
38 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, | ||
39 | int page_count, enum dma_data_direction direction) | ||
34 | { | 40 | { |
35 | int i, j, retval = -ENOMEM; | 41 | int i, j; |
36 | dma_addr_t address; | 42 | dma_addr_t address; |
37 | 43 | ||
38 | buffer->page_count = page_count; | 44 | buffer->page_count = page_count; |
@@ -69,19 +75,21 @@ fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, | |||
69 | kfree(buffer->pages); | 75 | kfree(buffer->pages); |
70 | out: | 76 | out: |
71 | buffer->pages = NULL; | 77 | buffer->pages = NULL; |
72 | return retval; | 78 | |
79 | return -ENOMEM; | ||
73 | } | 80 | } |
74 | 81 | ||
75 | int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma) | 82 | int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma) |
76 | { | 83 | { |
77 | unsigned long uaddr; | 84 | unsigned long uaddr; |
78 | int i, retval; | 85 | int i, err; |
79 | 86 | ||
80 | uaddr = vma->vm_start; | 87 | uaddr = vma->vm_start; |
81 | for (i = 0; i < buffer->page_count; i++) { | 88 | for (i = 0; i < buffer->page_count; i++) { |
82 | retval = vm_insert_page(vma, uaddr, buffer->pages[i]); | 89 | err = vm_insert_page(vma, uaddr, buffer->pages[i]); |
83 | if (retval) | 90 | if (err) |
84 | return retval; | 91 | return err; |
92 | |||
85 | uaddr += PAGE_SIZE; | 93 | uaddr += PAGE_SIZE; |
86 | } | 94 | } |
87 | 95 | ||
@@ -105,14 +113,14 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, | |||
105 | buffer->pages = NULL; | 113 | buffer->pages = NULL; |
106 | } | 114 | } |
107 | 115 | ||
108 | struct fw_iso_context * | 116 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, |
109 | fw_iso_context_create(struct fw_card *card, int type, | 117 | int type, int channel, int speed, size_t header_size, |
110 | int channel, int speed, size_t header_size, | 118 | fw_iso_callback_t callback, void *callback_data) |
111 | fw_iso_callback_t callback, void *callback_data) | ||
112 | { | 119 | { |
113 | struct fw_iso_context *ctx; | 120 | struct fw_iso_context *ctx; |
114 | 121 | ||
115 | ctx = card->driver->allocate_iso_context(card, type, header_size); | 122 | ctx = card->driver->allocate_iso_context(card, |
123 | type, channel, header_size); | ||
116 | if (IS_ERR(ctx)) | 124 | if (IS_ERR(ctx)) |
117 | return ctx; | 125 | return ctx; |
118 | 126 | ||
@@ -134,25 +142,186 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx) | |||
134 | card->driver->free_iso_context(ctx); | 142 | card->driver->free_iso_context(ctx); |
135 | } | 143 | } |
136 | 144 | ||
137 | int | 145 | int fw_iso_context_start(struct fw_iso_context *ctx, |
138 | fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags) | 146 | int cycle, int sync, int tags) |
139 | { | 147 | { |
140 | return ctx->card->driver->start_iso(ctx, cycle, sync, tags); | 148 | return ctx->card->driver->start_iso(ctx, cycle, sync, tags); |
141 | } | 149 | } |
142 | 150 | ||
143 | int | 151 | int fw_iso_context_queue(struct fw_iso_context *ctx, |
144 | fw_iso_context_queue(struct fw_iso_context *ctx, | 152 | struct fw_iso_packet *packet, |
145 | struct fw_iso_packet *packet, | 153 | struct fw_iso_buffer *buffer, |
146 | struct fw_iso_buffer *buffer, | 154 | unsigned long payload) |
147 | unsigned long payload) | ||
148 | { | 155 | { |
149 | struct fw_card *card = ctx->card; | 156 | struct fw_card *card = ctx->card; |
150 | 157 | ||
151 | return card->driver->queue_iso(ctx, packet, buffer, payload); | 158 | return card->driver->queue_iso(ctx, packet, buffer, payload); |
152 | } | 159 | } |
153 | 160 | ||
154 | int | 161 | int fw_iso_context_stop(struct fw_iso_context *ctx) |
155 | fw_iso_context_stop(struct fw_iso_context *ctx) | ||
156 | { | 162 | { |
157 | return ctx->card->driver->stop_iso(ctx); | 163 | return ctx->card->driver->stop_iso(ctx); |
158 | } | 164 | } |
165 | |||
166 | /* | ||
167 | * Isochronous bus resource management (channels, bandwidth), client side | ||
168 | */ | ||
169 | |||
170 | static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, | ||
171 | int bandwidth, bool allocate) | ||
172 | { | ||
173 | __be32 data[2]; | ||
174 | int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; | ||
175 | |||
176 | /* | ||
177 | * On a 1394a IRM with low contention, try < 1 is enough. | ||
178 | * On a 1394-1995 IRM, we need at least try < 2. | ||
179 | * Let's just do try < 5. | ||
180 | */ | ||
181 | for (try = 0; try < 5; try++) { | ||
182 | new = allocate ? old - bandwidth : old + bandwidth; | ||
183 | if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) | ||
184 | break; | ||
185 | |||
186 | data[0] = cpu_to_be32(old); | ||
187 | data[1] = cpu_to_be32(new); | ||
188 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, | ||
189 | irm_id, generation, SCODE_100, | ||
190 | CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE, | ||
191 | data, sizeof(data))) { | ||
192 | case RCODE_GENERATION: | ||
193 | /* A generation change frees all bandwidth. */ | ||
194 | return allocate ? -EAGAIN : bandwidth; | ||
195 | |||
196 | case RCODE_COMPLETE: | ||
197 | if (be32_to_cpup(data) == old) | ||
198 | return bandwidth; | ||
199 | |||
200 | old = be32_to_cpup(data); | ||
201 | /* Fall through. */ | ||
202 | } | ||
203 | } | ||
204 | |||
205 | return -EIO; | ||
206 | } | ||
207 | |||
208 | static int manage_channel(struct fw_card *card, int irm_id, int generation, | ||
209 | u32 channels_mask, u64 offset, bool allocate) | ||
210 | { | ||
211 | __be32 data[2], c, all, old; | ||
212 | int i, retry = 5; | ||
213 | |||
214 | old = all = allocate ? cpu_to_be32(~0) : 0; | ||
215 | |||
216 | for (i = 0; i < 32; i++) { | ||
217 | if (!(channels_mask & 1 << i)) | ||
218 | continue; | ||
219 | |||
220 | c = cpu_to_be32(1 << (31 - i)); | ||
221 | if ((old & c) != (all & c)) | ||
222 | continue; | ||
223 | |||
224 | data[0] = old; | ||
225 | data[1] = old ^ c; | ||
226 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, | ||
227 | irm_id, generation, SCODE_100, | ||
228 | offset, data, sizeof(data))) { | ||
229 | case RCODE_GENERATION: | ||
230 | /* A generation change frees all channels. */ | ||
231 | return allocate ? -EAGAIN : i; | ||
232 | |||
233 | case RCODE_COMPLETE: | ||
234 | if (data[0] == old) | ||
235 | return i; | ||
236 | |||
237 | old = data[0]; | ||
238 | |||
239 | /* Is the IRM 1394a-2000 compliant? */ | ||
240 | if ((data[0] & c) == (data[1] & c)) | ||
241 | continue; | ||
242 | |||
243 | /* 1394-1995 IRM, fall through to retry. */ | ||
244 | default: | ||
245 | if (retry--) | ||
246 | i--; | ||
247 | } | ||
248 | } | ||
249 | |||
250 | return -EIO; | ||
251 | } | ||
252 | |||
253 | static void deallocate_channel(struct fw_card *card, int irm_id, | ||
254 | int generation, int channel) | ||
255 | { | ||
256 | u32 mask; | ||
257 | u64 offset; | ||
258 | |||
259 | mask = channel < 32 ? 1 << channel : 1 << (channel - 32); | ||
260 | offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : | ||
261 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; | ||
262 | |||
263 | manage_channel(card, irm_id, generation, mask, offset, false); | ||
264 | } | ||
265 | |||
266 | /** | ||
267 | * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth | ||
268 | * | ||
269 | * In parameters: card, generation, channels_mask, bandwidth, allocate | ||
270 | * Out parameters: channel, bandwidth | ||
271 | * This function blocks (sleeps) during communication with the IRM. | ||
272 | * | ||
273 | * Allocates or deallocates at most one channel out of channels_mask. | ||
274 | * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0. | ||
275 | * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for | ||
276 | * channel 0 and LSB for channel 63.) | ||
277 | * Allocates or deallocates as many bandwidth allocation units as specified. | ||
278 | * | ||
279 | * Returns channel < 0 if no channel was allocated or deallocated. | ||
280 | * Returns bandwidth = 0 if no bandwidth was allocated or deallocated. | ||
281 | * | ||
282 | * If generation is stale, deallocations succeed but allocations fail with | ||
283 | * channel = -EAGAIN. | ||
284 | * | ||
285 | * If channel allocation fails, no bandwidth will be allocated either. | ||
286 | * If bandwidth allocation fails, no channel will be allocated either. | ||
287 | * But deallocations of channel and bandwidth are tried independently | ||
288 | * of each other's success. | ||
289 | */ | ||
290 | void fw_iso_resource_manage(struct fw_card *card, int generation, | ||
291 | u64 channels_mask, int *channel, int *bandwidth, | ||
292 | bool allocate) | ||
293 | { | ||
294 | u32 channels_hi = channels_mask; /* channels 31...0 */ | ||
295 | u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ | ||
296 | int irm_id, ret, c = -EINVAL; | ||
297 | |||
298 | spin_lock_irq(&card->lock); | ||
299 | irm_id = card->irm_node->node_id; | ||
300 | spin_unlock_irq(&card->lock); | ||
301 | |||
302 | if (channels_hi) | ||
303 | c = manage_channel(card, irm_id, generation, channels_hi, | ||
304 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate); | ||
305 | if (channels_lo && c < 0) { | ||
306 | c = manage_channel(card, irm_id, generation, channels_lo, | ||
307 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate); | ||
308 | if (c >= 0) | ||
309 | c += 32; | ||
310 | } | ||
311 | *channel = c; | ||
312 | |||
313 | if (allocate && channels_mask != 0 && c < 0) | ||
314 | *bandwidth = 0; | ||
315 | |||
316 | if (*bandwidth == 0) | ||
317 | return; | ||
318 | |||
319 | ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate); | ||
320 | if (ret < 0) | ||
321 | *bandwidth = 0; | ||
322 | |||
323 | if (allocate && ret < 0 && c >= 0) { | ||
324 | deallocate_channel(card, irm_id, generation, c); | ||
325 | *channel = ret; | ||
326 | } | ||
327 | } | ||
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 6d19828a93a5..1180d0be0bb4 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c | |||
@@ -205,6 +205,7 @@ struct fw_ohci { | |||
205 | 205 | ||
206 | u32 it_context_mask; | 206 | u32 it_context_mask; |
207 | struct iso_context *it_context_list; | 207 | struct iso_context *it_context_list; |
208 | u64 ir_context_channels; | ||
208 | u32 ir_context_mask; | 209 | u32 ir_context_mask; |
209 | struct iso_context *ir_context_list; | 210 | struct iso_context *ir_context_list; |
210 | }; | 211 | }; |
@@ -441,9 +442,8 @@ static inline void flush_writes(const struct fw_ohci *ohci) | |||
441 | reg_read(ohci, OHCI1394_Version); | 442 | reg_read(ohci, OHCI1394_Version); |
442 | } | 443 | } |
443 | 444 | ||
444 | static int | 445 | static int ohci_update_phy_reg(struct fw_card *card, int addr, |
445 | ohci_update_phy_reg(struct fw_card *card, int addr, | 446 | int clear_bits, int set_bits) |
446 | int clear_bits, int set_bits) | ||
447 | { | 447 | { |
448 | struct fw_ohci *ohci = fw_ohci(card); | 448 | struct fw_ohci *ohci = fw_ohci(card); |
449 | u32 val, old; | 449 | u32 val, old; |
@@ -658,8 +658,8 @@ static void ar_context_tasklet(unsigned long data) | |||
658 | } | 658 | } |
659 | } | 659 | } |
660 | 660 | ||
661 | static int | 661 | static int ar_context_init(struct ar_context *ctx, |
662 | ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs) | 662 | struct fw_ohci *ohci, u32 regs) |
663 | { | 663 | { |
664 | struct ar_buffer ab; | 664 | struct ar_buffer ab; |
665 | 665 | ||
@@ -690,8 +690,7 @@ static void ar_context_run(struct ar_context *ctx) | |||
690 | flush_writes(ctx->ohci); | 690 | flush_writes(ctx->ohci); |
691 | } | 691 | } |
692 | 692 | ||
693 | static struct descriptor * | 693 | static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) |
694 | find_branch_descriptor(struct descriptor *d, int z) | ||
695 | { | 694 | { |
696 | int b, key; | 695 | int b, key; |
697 | 696 | ||
@@ -751,8 +750,7 @@ static void context_tasklet(unsigned long data) | |||
751 | * Allocate a new buffer and add it to the list of free buffers for this | 750 | * Allocate a new buffer and add it to the list of free buffers for this |
752 | * context. Must be called with ohci->lock held. | 751 | * context. Must be called with ohci->lock held. |
753 | */ | 752 | */ |
754 | static int | 753 | static int context_add_buffer(struct context *ctx) |
755 | context_add_buffer(struct context *ctx) | ||
756 | { | 754 | { |
757 | struct descriptor_buffer *desc; | 755 | struct descriptor_buffer *desc; |
758 | dma_addr_t uninitialized_var(bus_addr); | 756 | dma_addr_t uninitialized_var(bus_addr); |
@@ -781,9 +779,8 @@ context_add_buffer(struct context *ctx) | |||
781 | return 0; | 779 | return 0; |
782 | } | 780 | } |
783 | 781 | ||
784 | static int | 782 | static int context_init(struct context *ctx, struct fw_ohci *ohci, |
785 | context_init(struct context *ctx, struct fw_ohci *ohci, | 783 | u32 regs, descriptor_callback_t callback) |
786 | u32 regs, descriptor_callback_t callback) | ||
787 | { | 784 | { |
788 | ctx->ohci = ohci; | 785 | ctx->ohci = ohci; |
789 | ctx->regs = regs; | 786 | ctx->regs = regs; |
@@ -814,8 +811,7 @@ context_init(struct context *ctx, struct fw_ohci *ohci, | |||
814 | return 0; | 811 | return 0; |
815 | } | 812 | } |
816 | 813 | ||
817 | static void | 814 | static void context_release(struct context *ctx) |
818 | context_release(struct context *ctx) | ||
819 | { | 815 | { |
820 | struct fw_card *card = &ctx->ohci->card; | 816 | struct fw_card *card = &ctx->ohci->card; |
821 | struct descriptor_buffer *desc, *tmp; | 817 | struct descriptor_buffer *desc, *tmp; |
@@ -827,8 +823,8 @@ context_release(struct context *ctx) | |||
827 | } | 823 | } |
828 | 824 | ||
829 | /* Must be called with ohci->lock held */ | 825 | /* Must be called with ohci->lock held */ |
830 | static struct descriptor * | 826 | static struct descriptor *context_get_descriptors(struct context *ctx, |
831 | context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus) | 827 | int z, dma_addr_t *d_bus) |
832 | { | 828 | { |
833 | struct descriptor *d = NULL; | 829 | struct descriptor *d = NULL; |
834 | struct descriptor_buffer *desc = ctx->buffer_tail; | 830 | struct descriptor_buffer *desc = ctx->buffer_tail; |
@@ -912,8 +908,8 @@ struct driver_data { | |||
912 | * Must always be called with the ochi->lock held to ensure proper | 908 | * Must always be called with the ochi->lock held to ensure proper |
913 | * generation handling and locking around packet queue manipulation. | 909 | * generation handling and locking around packet queue manipulation. |
914 | */ | 910 | */ |
915 | static int | 911 | static int at_context_queue_packet(struct context *ctx, |
916 | at_context_queue_packet(struct context *ctx, struct fw_packet *packet) | 912 | struct fw_packet *packet) |
917 | { | 913 | { |
918 | struct fw_ohci *ohci = ctx->ohci; | 914 | struct fw_ohci *ohci = ctx->ohci; |
919 | dma_addr_t d_bus, uninitialized_var(payload_bus); | 915 | dma_addr_t d_bus, uninitialized_var(payload_bus); |
@@ -940,7 +936,9 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet) | |||
940 | */ | 936 | */ |
941 | 937 | ||
942 | header = (__le32 *) &d[1]; | 938 | header = (__le32 *) &d[1]; |
943 | if (packet->header_length > 8) { | 939 | switch (packet->header_length) { |
940 | case 16: | ||
941 | case 12: | ||
944 | header[0] = cpu_to_le32((packet->header[0] & 0xffff) | | 942 | header[0] = cpu_to_le32((packet->header[0] & 0xffff) | |
945 | (packet->speed << 16)); | 943 | (packet->speed << 16)); |
946 | header[1] = cpu_to_le32((packet->header[1] & 0xffff) | | 944 | header[1] = cpu_to_le32((packet->header[1] & 0xffff) | |
@@ -954,12 +952,27 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet) | |||
954 | header[3] = (__force __le32) packet->header[3]; | 952 | header[3] = (__force __le32) packet->header[3]; |
955 | 953 | ||
956 | d[0].req_count = cpu_to_le16(packet->header_length); | 954 | d[0].req_count = cpu_to_le16(packet->header_length); |
957 | } else { | 955 | break; |
956 | |||
957 | case 8: | ||
958 | header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | | 958 | header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | |
959 | (packet->speed << 16)); | 959 | (packet->speed << 16)); |
960 | header[1] = cpu_to_le32(packet->header[0]); | 960 | header[1] = cpu_to_le32(packet->header[0]); |
961 | header[2] = cpu_to_le32(packet->header[1]); | 961 | header[2] = cpu_to_le32(packet->header[1]); |
962 | d[0].req_count = cpu_to_le16(12); | 962 | d[0].req_count = cpu_to_le16(12); |
963 | break; | ||
964 | |||
965 | case 4: | ||
966 | header[0] = cpu_to_le32((packet->header[0] & 0xffff) | | ||
967 | (packet->speed << 16)); | ||
968 | header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); | ||
969 | d[0].req_count = cpu_to_le16(8); | ||
970 | break; | ||
971 | |||
972 | default: | ||
973 | /* BUG(); */ | ||
974 | packet->ack = RCODE_SEND_ERROR; | ||
975 | return -1; | ||
963 | } | 976 | } |
964 | 977 | ||
965 | driver_data = (struct driver_data *) &d[3]; | 978 | driver_data = (struct driver_data *) &d[3]; |
@@ -1095,8 +1108,8 @@ static int handle_at_packet(struct context *context, | |||
1095 | #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) | 1108 | #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) |
1096 | #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) | 1109 | #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) |
1097 | 1110 | ||
1098 | static void | 1111 | static void handle_local_rom(struct fw_ohci *ohci, |
1099 | handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) | 1112 | struct fw_packet *packet, u32 csr) |
1100 | { | 1113 | { |
1101 | struct fw_packet response; | 1114 | struct fw_packet response; |
1102 | int tcode, length, i; | 1115 | int tcode, length, i; |
@@ -1122,8 +1135,8 @@ handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) | |||
1122 | fw_core_handle_response(&ohci->card, &response); | 1135 | fw_core_handle_response(&ohci->card, &response); |
1123 | } | 1136 | } |
1124 | 1137 | ||
1125 | static void | 1138 | static void handle_local_lock(struct fw_ohci *ohci, |
1126 | handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) | 1139 | struct fw_packet *packet, u32 csr) |
1127 | { | 1140 | { |
1128 | struct fw_packet response; | 1141 | struct fw_packet response; |
1129 | int tcode, length, ext_tcode, sel; | 1142 | int tcode, length, ext_tcode, sel; |
@@ -1164,8 +1177,7 @@ handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) | |||
1164 | fw_core_handle_response(&ohci->card, &response); | 1177 | fw_core_handle_response(&ohci->card, &response); |
1165 | } | 1178 | } |
1166 | 1179 | ||
1167 | static void | 1180 | static void handle_local_request(struct context *ctx, struct fw_packet *packet) |
1168 | handle_local_request(struct context *ctx, struct fw_packet *packet) | ||
1169 | { | 1181 | { |
1170 | u64 offset; | 1182 | u64 offset; |
1171 | u32 csr; | 1183 | u32 csr; |
@@ -1205,11 +1217,10 @@ handle_local_request(struct context *ctx, struct fw_packet *packet) | |||
1205 | } | 1217 | } |
1206 | } | 1218 | } |
1207 | 1219 | ||
1208 | static void | 1220 | static void at_context_transmit(struct context *ctx, struct fw_packet *packet) |
1209 | at_context_transmit(struct context *ctx, struct fw_packet *packet) | ||
1210 | { | 1221 | { |
1211 | unsigned long flags; | 1222 | unsigned long flags; |
1212 | int retval; | 1223 | int ret; |
1213 | 1224 | ||
1214 | spin_lock_irqsave(&ctx->ohci->lock, flags); | 1225 | spin_lock_irqsave(&ctx->ohci->lock, flags); |
1215 | 1226 | ||
@@ -1220,10 +1231,10 @@ at_context_transmit(struct context *ctx, struct fw_packet *packet) | |||
1220 | return; | 1231 | return; |
1221 | } | 1232 | } |
1222 | 1233 | ||
1223 | retval = at_context_queue_packet(ctx, packet); | 1234 | ret = at_context_queue_packet(ctx, packet); |
1224 | spin_unlock_irqrestore(&ctx->ohci->lock, flags); | 1235 | spin_unlock_irqrestore(&ctx->ohci->lock, flags); |
1225 | 1236 | ||
1226 | if (retval < 0) | 1237 | if (ret < 0) |
1227 | packet->callback(packet, &ctx->ohci->card, packet->ack); | 1238 | packet->callback(packet, &ctx->ohci->card, packet->ack); |
1228 | 1239 | ||
1229 | } | 1240 | } |
@@ -1590,12 +1601,12 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length) | |||
1590 | return 0; | 1601 | return 0; |
1591 | } | 1602 | } |
1592 | 1603 | ||
1593 | static int | 1604 | static int ohci_set_config_rom(struct fw_card *card, |
1594 | ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) | 1605 | u32 *config_rom, size_t length) |
1595 | { | 1606 | { |
1596 | struct fw_ohci *ohci; | 1607 | struct fw_ohci *ohci; |
1597 | unsigned long flags; | 1608 | unsigned long flags; |
1598 | int retval = -EBUSY; | 1609 | int ret = -EBUSY; |
1599 | __be32 *next_config_rom; | 1610 | __be32 *next_config_rom; |
1600 | dma_addr_t uninitialized_var(next_config_rom_bus); | 1611 | dma_addr_t uninitialized_var(next_config_rom_bus); |
1601 | 1612 | ||
@@ -1649,7 +1660,7 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) | |||
1649 | 1660 | ||
1650 | reg_write(ohci, OHCI1394_ConfigROMmap, | 1661 | reg_write(ohci, OHCI1394_ConfigROMmap, |
1651 | ohci->next_config_rom_bus); | 1662 | ohci->next_config_rom_bus); |
1652 | retval = 0; | 1663 | ret = 0; |
1653 | } | 1664 | } |
1654 | 1665 | ||
1655 | spin_unlock_irqrestore(&ohci->lock, flags); | 1666 | spin_unlock_irqrestore(&ohci->lock, flags); |
@@ -1661,13 +1672,13 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) | |||
1661 | * controller could need to access it before the bus reset | 1672 | * controller could need to access it before the bus reset |
1662 | * takes effect. | 1673 | * takes effect. |
1663 | */ | 1674 | */ |
1664 | if (retval == 0) | 1675 | if (ret == 0) |
1665 | fw_core_initiate_bus_reset(&ohci->card, 1); | 1676 | fw_core_initiate_bus_reset(&ohci->card, 1); |
1666 | else | 1677 | else |
1667 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, | 1678 | dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, |
1668 | next_config_rom, next_config_rom_bus); | 1679 | next_config_rom, next_config_rom_bus); |
1669 | 1680 | ||
1670 | return retval; | 1681 | return ret; |
1671 | } | 1682 | } |
1672 | 1683 | ||
1673 | static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) | 1684 | static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) |
@@ -1689,7 +1700,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) | |||
1689 | struct fw_ohci *ohci = fw_ohci(card); | 1700 | struct fw_ohci *ohci = fw_ohci(card); |
1690 | struct context *ctx = &ohci->at_request_ctx; | 1701 | struct context *ctx = &ohci->at_request_ctx; |
1691 | struct driver_data *driver_data = packet->driver_data; | 1702 | struct driver_data *driver_data = packet->driver_data; |
1692 | int retval = -ENOENT; | 1703 | int ret = -ENOENT; |
1693 | 1704 | ||
1694 | tasklet_disable(&ctx->tasklet); | 1705 | tasklet_disable(&ctx->tasklet); |
1695 | 1706 | ||
@@ -1704,23 +1715,22 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) | |||
1704 | driver_data->packet = NULL; | 1715 | driver_data->packet = NULL; |
1705 | packet->ack = RCODE_CANCELLED; | 1716 | packet->ack = RCODE_CANCELLED; |
1706 | packet->callback(packet, &ohci->card, packet->ack); | 1717 | packet->callback(packet, &ohci->card, packet->ack); |
1707 | retval = 0; | 1718 | ret = 0; |
1708 | |||
1709 | out: | 1719 | out: |
1710 | tasklet_enable(&ctx->tasklet); | 1720 | tasklet_enable(&ctx->tasklet); |
1711 | 1721 | ||
1712 | return retval; | 1722 | return ret; |
1713 | } | 1723 | } |
1714 | 1724 | ||
1715 | static int | 1725 | static int ohci_enable_phys_dma(struct fw_card *card, |
1716 | ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) | 1726 | int node_id, int generation) |
1717 | { | 1727 | { |
1718 | #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA | 1728 | #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA |
1719 | return 0; | 1729 | return 0; |
1720 | #else | 1730 | #else |
1721 | struct fw_ohci *ohci = fw_ohci(card); | 1731 | struct fw_ohci *ohci = fw_ohci(card); |
1722 | unsigned long flags; | 1732 | unsigned long flags; |
1723 | int n, retval = 0; | 1733 | int n, ret = 0; |
1724 | 1734 | ||
1725 | /* | 1735 | /* |
1726 | * FIXME: Make sure this bitmask is cleared when we clear the busReset | 1736 | * FIXME: Make sure this bitmask is cleared when we clear the busReset |
@@ -1730,7 +1740,7 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) | |||
1730 | spin_lock_irqsave(&ohci->lock, flags); | 1740 | spin_lock_irqsave(&ohci->lock, flags); |
1731 | 1741 | ||
1732 | if (ohci->generation != generation) { | 1742 | if (ohci->generation != generation) { |
1733 | retval = -ESTALE; | 1743 | ret = -ESTALE; |
1734 | goto out; | 1744 | goto out; |
1735 | } | 1745 | } |
1736 | 1746 | ||
@@ -1748,12 +1758,12 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) | |||
1748 | flush_writes(ohci); | 1758 | flush_writes(ohci); |
1749 | out: | 1759 | out: |
1750 | spin_unlock_irqrestore(&ohci->lock, flags); | 1760 | spin_unlock_irqrestore(&ohci->lock, flags); |
1751 | return retval; | 1761 | |
1762 | return ret; | ||
1752 | #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ | 1763 | #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ |
1753 | } | 1764 | } |
1754 | 1765 | ||
1755 | static u64 | 1766 | static u64 ohci_get_bus_time(struct fw_card *card) |
1756 | ohci_get_bus_time(struct fw_card *card) | ||
1757 | { | 1767 | { |
1758 | struct fw_ohci *ohci = fw_ohci(card); | 1768 | struct fw_ohci *ohci = fw_ohci(card); |
1759 | u32 cycle_time; | 1769 | u32 cycle_time; |
@@ -1765,6 +1775,28 @@ ohci_get_bus_time(struct fw_card *card) | |||
1765 | return bus_time; | 1775 | return bus_time; |
1766 | } | 1776 | } |
1767 | 1777 | ||
1778 | static void copy_iso_headers(struct iso_context *ctx, void *p) | ||
1779 | { | ||
1780 | int i = ctx->header_length; | ||
1781 | |||
1782 | if (i + ctx->base.header_size > PAGE_SIZE) | ||
1783 | return; | ||
1784 | |||
1785 | /* | ||
1786 | * The iso header is byteswapped to little endian by | ||
1787 | * the controller, but the remaining header quadlets | ||
1788 | * are big endian. We want to present all the headers | ||
1789 | * as big endian, so we have to swap the first quadlet. | ||
1790 | */ | ||
1791 | if (ctx->base.header_size > 0) | ||
1792 | *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); | ||
1793 | if (ctx->base.header_size > 4) | ||
1794 | *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p); | ||
1795 | if (ctx->base.header_size > 8) | ||
1796 | memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8); | ||
1797 | ctx->header_length += ctx->base.header_size; | ||
1798 | } | ||
1799 | |||
1768 | static int handle_ir_dualbuffer_packet(struct context *context, | 1800 | static int handle_ir_dualbuffer_packet(struct context *context, |
1769 | struct descriptor *d, | 1801 | struct descriptor *d, |
1770 | struct descriptor *last) | 1802 | struct descriptor *last) |
@@ -1775,7 +1807,6 @@ static int handle_ir_dualbuffer_packet(struct context *context, | |||
1775 | __le32 *ir_header; | 1807 | __le32 *ir_header; |
1776 | size_t header_length; | 1808 | size_t header_length; |
1777 | void *p, *end; | 1809 | void *p, *end; |
1778 | int i; | ||
1779 | 1810 | ||
1780 | if (db->first_res_count != 0 && db->second_res_count != 0) { | 1811 | if (db->first_res_count != 0 && db->second_res_count != 0) { |
1781 | if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { | 1812 | if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { |
@@ -1788,25 +1819,14 @@ static int handle_ir_dualbuffer_packet(struct context *context, | |||
1788 | header_length = le16_to_cpu(db->first_req_count) - | 1819 | header_length = le16_to_cpu(db->first_req_count) - |
1789 | le16_to_cpu(db->first_res_count); | 1820 | le16_to_cpu(db->first_res_count); |
1790 | 1821 | ||
1791 | i = ctx->header_length; | ||
1792 | p = db + 1; | 1822 | p = db + 1; |
1793 | end = p + header_length; | 1823 | end = p + header_length; |
1794 | while (p < end && i + ctx->base.header_size <= PAGE_SIZE) { | 1824 | while (p < end) { |
1795 | /* | 1825 | copy_iso_headers(ctx, p); |
1796 | * The iso header is byteswapped to little endian by | ||
1797 | * the controller, but the remaining header quadlets | ||
1798 | * are big endian. We want to present all the headers | ||
1799 | * as big endian, so we have to swap the first | ||
1800 | * quadlet. | ||
1801 | */ | ||
1802 | *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); | ||
1803 | memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); | ||
1804 | i += ctx->base.header_size; | ||
1805 | ctx->excess_bytes += | 1826 | ctx->excess_bytes += |
1806 | (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff; | 1827 | (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff; |
1807 | p += ctx->base.header_size + 4; | 1828 | p += max(ctx->base.header_size, (size_t)8); |
1808 | } | 1829 | } |
1809 | ctx->header_length = i; | ||
1810 | 1830 | ||
1811 | ctx->excess_bytes -= le16_to_cpu(db->second_req_count) - | 1831 | ctx->excess_bytes -= le16_to_cpu(db->second_req_count) - |
1812 | le16_to_cpu(db->second_res_count); | 1832 | le16_to_cpu(db->second_res_count); |
@@ -1832,7 +1852,6 @@ static int handle_ir_packet_per_buffer(struct context *context, | |||
1832 | struct descriptor *pd; | 1852 | struct descriptor *pd; |
1833 | __le32 *ir_header; | 1853 | __le32 *ir_header; |
1834 | void *p; | 1854 | void *p; |
1835 | int i; | ||
1836 | 1855 | ||
1837 | for (pd = d; pd <= last; pd++) { | 1856 | for (pd = d; pd <= last; pd++) { |
1838 | if (pd->transfer_status) | 1857 | if (pd->transfer_status) |
@@ -1842,21 +1861,8 @@ static int handle_ir_packet_per_buffer(struct context *context, | |||
1842 | /* Descriptor(s) not done yet, stop iteration */ | 1861 | /* Descriptor(s) not done yet, stop iteration */ |
1843 | return 0; | 1862 | return 0; |
1844 | 1863 | ||
1845 | i = ctx->header_length; | 1864 | p = last + 1; |
1846 | p = last + 1; | 1865 | copy_iso_headers(ctx, p); |
1847 | |||
1848 | if (ctx->base.header_size > 0 && | ||
1849 | i + ctx->base.header_size <= PAGE_SIZE) { | ||
1850 | /* | ||
1851 | * The iso header is byteswapped to little endian by | ||
1852 | * the controller, but the remaining header quadlets | ||
1853 | * are big endian. We want to present all the headers | ||
1854 | * as big endian, so we have to swap the first quadlet. | ||
1855 | */ | ||
1856 | *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); | ||
1857 | memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); | ||
1858 | ctx->header_length += ctx->base.header_size; | ||
1859 | } | ||
1860 | 1866 | ||
1861 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { | 1867 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { |
1862 | ir_header = (__le32 *) p; | 1868 | ir_header = (__le32 *) p; |
@@ -1888,21 +1894,24 @@ static int handle_it_packet(struct context *context, | |||
1888 | return 1; | 1894 | return 1; |
1889 | } | 1895 | } |
1890 | 1896 | ||
1891 | static struct fw_iso_context * | 1897 | static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, |
1892 | ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) | 1898 | int type, int channel, size_t header_size) |
1893 | { | 1899 | { |
1894 | struct fw_ohci *ohci = fw_ohci(card); | 1900 | struct fw_ohci *ohci = fw_ohci(card); |
1895 | struct iso_context *ctx, *list; | 1901 | struct iso_context *ctx, *list; |
1896 | descriptor_callback_t callback; | 1902 | descriptor_callback_t callback; |
1903 | u64 *channels, dont_care = ~0ULL; | ||
1897 | u32 *mask, regs; | 1904 | u32 *mask, regs; |
1898 | unsigned long flags; | 1905 | unsigned long flags; |
1899 | int index, retval = -ENOMEM; | 1906 | int index, ret = -ENOMEM; |
1900 | 1907 | ||
1901 | if (type == FW_ISO_CONTEXT_TRANSMIT) { | 1908 | if (type == FW_ISO_CONTEXT_TRANSMIT) { |
1909 | channels = &dont_care; | ||
1902 | mask = &ohci->it_context_mask; | 1910 | mask = &ohci->it_context_mask; |
1903 | list = ohci->it_context_list; | 1911 | list = ohci->it_context_list; |
1904 | callback = handle_it_packet; | 1912 | callback = handle_it_packet; |
1905 | } else { | 1913 | } else { |
1914 | channels = &ohci->ir_context_channels; | ||
1906 | mask = &ohci->ir_context_mask; | 1915 | mask = &ohci->ir_context_mask; |
1907 | list = ohci->ir_context_list; | 1916 | list = ohci->ir_context_list; |
1908 | if (ohci->use_dualbuffer) | 1917 | if (ohci->use_dualbuffer) |
@@ -1912,9 +1921,11 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) | |||
1912 | } | 1921 | } |
1913 | 1922 | ||
1914 | spin_lock_irqsave(&ohci->lock, flags); | 1923 | spin_lock_irqsave(&ohci->lock, flags); |
1915 | index = ffs(*mask) - 1; | 1924 | index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; |
1916 | if (index >= 0) | 1925 | if (index >= 0) { |
1926 | *channels &= ~(1ULL << channel); | ||
1917 | *mask &= ~(1 << index); | 1927 | *mask &= ~(1 << index); |
1928 | } | ||
1918 | spin_unlock_irqrestore(&ohci->lock, flags); | 1929 | spin_unlock_irqrestore(&ohci->lock, flags); |
1919 | 1930 | ||
1920 | if (index < 0) | 1931 | if (index < 0) |
@@ -1932,8 +1943,8 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) | |||
1932 | if (ctx->header == NULL) | 1943 | if (ctx->header == NULL) |
1933 | goto out; | 1944 | goto out; |
1934 | 1945 | ||
1935 | retval = context_init(&ctx->context, ohci, regs, callback); | 1946 | ret = context_init(&ctx->context, ohci, regs, callback); |
1936 | if (retval < 0) | 1947 | if (ret < 0) |
1937 | goto out_with_header; | 1948 | goto out_with_header; |
1938 | 1949 | ||
1939 | return &ctx->base; | 1950 | return &ctx->base; |
@@ -1945,7 +1956,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) | |||
1945 | *mask |= 1 << index; | 1956 | *mask |= 1 << index; |
1946 | spin_unlock_irqrestore(&ohci->lock, flags); | 1957 | spin_unlock_irqrestore(&ohci->lock, flags); |
1947 | 1958 | ||
1948 | return ERR_PTR(retval); | 1959 | return ERR_PTR(ret); |
1949 | } | 1960 | } |
1950 | 1961 | ||
1951 | static int ohci_start_iso(struct fw_iso_context *base, | 1962 | static int ohci_start_iso(struct fw_iso_context *base, |
@@ -2024,16 +2035,16 @@ static void ohci_free_iso_context(struct fw_iso_context *base) | |||
2024 | } else { | 2035 | } else { |
2025 | index = ctx - ohci->ir_context_list; | 2036 | index = ctx - ohci->ir_context_list; |
2026 | ohci->ir_context_mask |= 1 << index; | 2037 | ohci->ir_context_mask |= 1 << index; |
2038 | ohci->ir_context_channels |= 1ULL << base->channel; | ||
2027 | } | 2039 | } |
2028 | 2040 | ||
2029 | spin_unlock_irqrestore(&ohci->lock, flags); | 2041 | spin_unlock_irqrestore(&ohci->lock, flags); |
2030 | } | 2042 | } |
2031 | 2043 | ||
2032 | static int | 2044 | static int ohci_queue_iso_transmit(struct fw_iso_context *base, |
2033 | ohci_queue_iso_transmit(struct fw_iso_context *base, | 2045 | struct fw_iso_packet *packet, |
2034 | struct fw_iso_packet *packet, | 2046 | struct fw_iso_buffer *buffer, |
2035 | struct fw_iso_buffer *buffer, | 2047 | unsigned long payload) |
2036 | unsigned long payload) | ||
2037 | { | 2048 | { |
2038 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2049 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2039 | struct descriptor *d, *last, *pd; | 2050 | struct descriptor *d, *last, *pd; |
@@ -2128,11 +2139,10 @@ ohci_queue_iso_transmit(struct fw_iso_context *base, | |||
2128 | return 0; | 2139 | return 0; |
2129 | } | 2140 | } |
2130 | 2141 | ||
2131 | static int | 2142 | static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, |
2132 | ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, | 2143 | struct fw_iso_packet *packet, |
2133 | struct fw_iso_packet *packet, | 2144 | struct fw_iso_buffer *buffer, |
2134 | struct fw_iso_buffer *buffer, | 2145 | unsigned long payload) |
2135 | unsigned long payload) | ||
2136 | { | 2146 | { |
2137 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2147 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2138 | struct db_descriptor *db = NULL; | 2148 | struct db_descriptor *db = NULL; |
@@ -2151,11 +2161,11 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, | |||
2151 | z = 2; | 2161 | z = 2; |
2152 | 2162 | ||
2153 | /* | 2163 | /* |
2154 | * The OHCI controller puts the status word in the header | 2164 | * The OHCI controller puts the isochronous header and trailer in the |
2155 | * buffer too, so we need 4 extra bytes per packet. | 2165 | * buffer, so we need at least 8 bytes. |
2156 | */ | 2166 | */ |
2157 | packet_count = p->header_length / ctx->base.header_size; | 2167 | packet_count = p->header_length / ctx->base.header_size; |
2158 | header_size = packet_count * (ctx->base.header_size + 4); | 2168 | header_size = packet_count * max(ctx->base.header_size, (size_t)8); |
2159 | 2169 | ||
2160 | /* Get header size in number of descriptors. */ | 2170 | /* Get header size in number of descriptors. */ |
2161 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); | 2171 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); |
@@ -2173,7 +2183,8 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, | |||
2173 | db = (struct db_descriptor *) d; | 2183 | db = (struct db_descriptor *) d; |
2174 | db->control = cpu_to_le16(DESCRIPTOR_STATUS | | 2184 | db->control = cpu_to_le16(DESCRIPTOR_STATUS | |
2175 | DESCRIPTOR_BRANCH_ALWAYS); | 2185 | DESCRIPTOR_BRANCH_ALWAYS); |
2176 | db->first_size = cpu_to_le16(ctx->base.header_size + 4); | 2186 | db->first_size = |
2187 | cpu_to_le16(max(ctx->base.header_size, (size_t)8)); | ||
2177 | if (p->skip && rest == p->payload_length) { | 2188 | if (p->skip && rest == p->payload_length) { |
2178 | db->control |= cpu_to_le16(DESCRIPTOR_WAIT); | 2189 | db->control |= cpu_to_le16(DESCRIPTOR_WAIT); |
2179 | db->first_req_count = db->first_size; | 2190 | db->first_req_count = db->first_size; |
@@ -2208,11 +2219,10 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, | |||
2208 | return 0; | 2219 | return 0; |
2209 | } | 2220 | } |
2210 | 2221 | ||
2211 | static int | 2222 | static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, |
2212 | ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | 2223 | struct fw_iso_packet *packet, |
2213 | struct fw_iso_packet *packet, | 2224 | struct fw_iso_buffer *buffer, |
2214 | struct fw_iso_buffer *buffer, | 2225 | unsigned long payload) |
2215 | unsigned long payload) | ||
2216 | { | 2226 | { |
2217 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2227 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2218 | struct descriptor *d = NULL, *pd = NULL; | 2228 | struct descriptor *d = NULL, *pd = NULL; |
@@ -2223,11 +2233,11 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | |||
2223 | int page, offset, packet_count, header_size, payload_per_buffer; | 2233 | int page, offset, packet_count, header_size, payload_per_buffer; |
2224 | 2234 | ||
2225 | /* | 2235 | /* |
2226 | * The OHCI controller puts the status word in the | 2236 | * The OHCI controller puts the isochronous header and trailer in the |
2227 | * buffer too, so we need 4 extra bytes per packet. | 2237 | * buffer, so we need at least 8 bytes. |
2228 | */ | 2238 | */ |
2229 | packet_count = p->header_length / ctx->base.header_size; | 2239 | packet_count = p->header_length / ctx->base.header_size; |
2230 | header_size = ctx->base.header_size + 4; | 2240 | header_size = max(ctx->base.header_size, (size_t)8); |
2231 | 2241 | ||
2232 | /* Get header size in number of descriptors. */ | 2242 | /* Get header size in number of descriptors. */ |
2233 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); | 2243 | header_z = DIV_ROUND_UP(header_size, sizeof(*d)); |
@@ -2286,29 +2296,27 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | |||
2286 | return 0; | 2296 | return 0; |
2287 | } | 2297 | } |
2288 | 2298 | ||
2289 | static int | 2299 | static int ohci_queue_iso(struct fw_iso_context *base, |
2290 | ohci_queue_iso(struct fw_iso_context *base, | 2300 | struct fw_iso_packet *packet, |
2291 | struct fw_iso_packet *packet, | 2301 | struct fw_iso_buffer *buffer, |
2292 | struct fw_iso_buffer *buffer, | 2302 | unsigned long payload) |
2293 | unsigned long payload) | ||
2294 | { | 2303 | { |
2295 | struct iso_context *ctx = container_of(base, struct iso_context, base); | 2304 | struct iso_context *ctx = container_of(base, struct iso_context, base); |
2296 | unsigned long flags; | 2305 | unsigned long flags; |
2297 | int retval; | 2306 | int ret; |
2298 | 2307 | ||
2299 | spin_lock_irqsave(&ctx->context.ohci->lock, flags); | 2308 | spin_lock_irqsave(&ctx->context.ohci->lock, flags); |
2300 | if (base->type == FW_ISO_CONTEXT_TRANSMIT) | 2309 | if (base->type == FW_ISO_CONTEXT_TRANSMIT) |
2301 | retval = ohci_queue_iso_transmit(base, packet, buffer, payload); | 2310 | ret = ohci_queue_iso_transmit(base, packet, buffer, payload); |
2302 | else if (ctx->context.ohci->use_dualbuffer) | 2311 | else if (ctx->context.ohci->use_dualbuffer) |
2303 | retval = ohci_queue_iso_receive_dualbuffer(base, packet, | 2312 | ret = ohci_queue_iso_receive_dualbuffer(base, packet, |
2304 | buffer, payload); | 2313 | buffer, payload); |
2305 | else | 2314 | else |
2306 | retval = ohci_queue_iso_receive_packet_per_buffer(base, packet, | 2315 | ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, |
2307 | buffer, | 2316 | buffer, payload); |
2308 | payload); | ||
2309 | spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); | 2317 | spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); |
2310 | 2318 | ||
2311 | return retval; | 2319 | return ret; |
2312 | } | 2320 | } |
2313 | 2321 | ||
2314 | static const struct fw_card_driver ohci_driver = { | 2322 | static const struct fw_card_driver ohci_driver = { |
@@ -2357,8 +2365,8 @@ static void ohci_pmac_off(struct pci_dev *dev) | |||
2357 | #define ohci_pmac_off(dev) | 2365 | #define ohci_pmac_off(dev) |
2358 | #endif /* CONFIG_PPC_PMAC */ | 2366 | #endif /* CONFIG_PPC_PMAC */ |
2359 | 2367 | ||
2360 | static int __devinit | 2368 | static int __devinit pci_probe(struct pci_dev *dev, |
2361 | pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) | 2369 | const struct pci_device_id *ent) |
2362 | { | 2370 | { |
2363 | struct fw_ohci *ohci; | 2371 | struct fw_ohci *ohci; |
2364 | u32 bus_options, max_receive, link_speed, version; | 2372 | u32 bus_options, max_receive, link_speed, version; |
@@ -2440,6 +2448,7 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) | |||
2440 | ohci->it_context_list = kzalloc(size, GFP_KERNEL); | 2448 | ohci->it_context_list = kzalloc(size, GFP_KERNEL); |
2441 | 2449 | ||
2442 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); | 2450 | reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); |
2451 | ohci->ir_context_channels = ~0ULL; | ||
2443 | ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); | 2452 | ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); |
2444 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); | 2453 | reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); |
2445 | size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask); | 2454 | size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask); |
@@ -2467,11 +2476,12 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) | |||
2467 | reg_read(ohci, OHCI1394_GUIDLo); | 2476 | reg_read(ohci, OHCI1394_GUIDLo); |
2468 | 2477 | ||
2469 | err = fw_card_add(&ohci->card, max_receive, link_speed, guid); | 2478 | err = fw_card_add(&ohci->card, max_receive, link_speed, guid); |
2470 | if (err < 0) | 2479 | if (err) |
2471 | goto fail_self_id; | 2480 | goto fail_self_id; |
2472 | 2481 | ||
2473 | fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", | 2482 | fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", |
2474 | dev_name(&dev->dev), version >> 16, version & 0xff); | 2483 | dev_name(&dev->dev), version >> 16, version & 0xff); |
2484 | |||
2475 | return 0; | 2485 | return 0; |
2476 | 2486 | ||
2477 | fail_self_id: | 2487 | fail_self_id: |
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index c71c4419d9e8..2bcf51557c72 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c | |||
@@ -392,20 +392,18 @@ static const struct { | |||
392 | } | 392 | } |
393 | }; | 393 | }; |
394 | 394 | ||
395 | static void | 395 | static void free_orb(struct kref *kref) |
396 | free_orb(struct kref *kref) | ||
397 | { | 396 | { |
398 | struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref); | 397 | struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref); |
399 | 398 | ||
400 | kfree(orb); | 399 | kfree(orb); |
401 | } | 400 | } |
402 | 401 | ||
403 | static void | 402 | static void sbp2_status_write(struct fw_card *card, struct fw_request *request, |
404 | sbp2_status_write(struct fw_card *card, struct fw_request *request, | 403 | int tcode, int destination, int source, |
405 | int tcode, int destination, int source, | 404 | int generation, int speed, |
406 | int generation, int speed, | 405 | unsigned long long offset, |
407 | unsigned long long offset, | 406 | void *payload, size_t length, void *callback_data) |
408 | void *payload, size_t length, void *callback_data) | ||
409 | { | 407 | { |
410 | struct sbp2_logical_unit *lu = callback_data; | 408 | struct sbp2_logical_unit *lu = callback_data; |
411 | struct sbp2_orb *orb; | 409 | struct sbp2_orb *orb; |
@@ -451,9 +449,8 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request, | |||
451 | fw_send_response(card, request, RCODE_COMPLETE); | 449 | fw_send_response(card, request, RCODE_COMPLETE); |
452 | } | 450 | } |
453 | 451 | ||
454 | static void | 452 | static void complete_transaction(struct fw_card *card, int rcode, |
455 | complete_transaction(struct fw_card *card, int rcode, | 453 | void *payload, size_t length, void *data) |
456 | void *payload, size_t length, void *data) | ||
457 | { | 454 | { |
458 | struct sbp2_orb *orb = data; | 455 | struct sbp2_orb *orb = data; |
459 | unsigned long flags; | 456 | unsigned long flags; |
@@ -482,9 +479,8 @@ complete_transaction(struct fw_card *card, int rcode, | |||
482 | kref_put(&orb->kref, free_orb); | 479 | kref_put(&orb->kref, free_orb); |
483 | } | 480 | } |
484 | 481 | ||
485 | static void | 482 | static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, |
486 | sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, | 483 | int node_id, int generation, u64 offset) |
487 | int node_id, int generation, u64 offset) | ||
488 | { | 484 | { |
489 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | 485 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); |
490 | unsigned long flags; | 486 | unsigned long flags; |
@@ -531,8 +527,8 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) | |||
531 | return retval; | 527 | return retval; |
532 | } | 528 | } |
533 | 529 | ||
534 | static void | 530 | static void complete_management_orb(struct sbp2_orb *base_orb, |
535 | complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | 531 | struct sbp2_status *status) |
536 | { | 532 | { |
537 | struct sbp2_management_orb *orb = | 533 | struct sbp2_management_orb *orb = |
538 | container_of(base_orb, struct sbp2_management_orb, base); | 534 | container_of(base_orb, struct sbp2_management_orb, base); |
@@ -542,10 +538,9 @@ complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | |||
542 | complete(&orb->done); | 538 | complete(&orb->done); |
543 | } | 539 | } |
544 | 540 | ||
545 | static int | 541 | static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, |
546 | sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | 542 | int generation, int function, |
547 | int generation, int function, int lun_or_login_id, | 543 | int lun_or_login_id, void *response) |
548 | void *response) | ||
549 | { | 544 | { |
550 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | 545 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); |
551 | struct sbp2_management_orb *orb; | 546 | struct sbp2_management_orb *orb; |
@@ -652,9 +647,8 @@ static void sbp2_agent_reset(struct sbp2_logical_unit *lu) | |||
652 | &d, sizeof(d)); | 647 | &d, sizeof(d)); |
653 | } | 648 | } |
654 | 649 | ||
655 | static void | 650 | static void complete_agent_reset_write_no_wait(struct fw_card *card, |
656 | complete_agent_reset_write_no_wait(struct fw_card *card, int rcode, | 651 | int rcode, void *payload, size_t length, void *data) |
657 | void *payload, size_t length, void *data) | ||
658 | { | 652 | { |
659 | kfree(data); | 653 | kfree(data); |
660 | } | 654 | } |
@@ -1299,8 +1293,7 @@ static void sbp2_unmap_scatterlist(struct device *card_device, | |||
1299 | sizeof(orb->page_table), DMA_TO_DEVICE); | 1293 | sizeof(orb->page_table), DMA_TO_DEVICE); |
1300 | } | 1294 | } |
1301 | 1295 | ||
1302 | static unsigned int | 1296 | static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) |
1303 | sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) | ||
1304 | { | 1297 | { |
1305 | int sam_status; | 1298 | int sam_status; |
1306 | 1299 | ||
@@ -1337,8 +1330,8 @@ sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) | |||
1337 | } | 1330 | } |
1338 | } | 1331 | } |
1339 | 1332 | ||
1340 | static void | 1333 | static void complete_command_orb(struct sbp2_orb *base_orb, |
1341 | complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | 1334 | struct sbp2_status *status) |
1342 | { | 1335 | { |
1343 | struct sbp2_command_orb *orb = | 1336 | struct sbp2_command_orb *orb = |
1344 | container_of(base_orb, struct sbp2_command_orb, base); | 1337 | container_of(base_orb, struct sbp2_command_orb, base); |
@@ -1384,9 +1377,8 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | |||
1384 | orb->done(orb->cmd); | 1377 | orb->done(orb->cmd); |
1385 | } | 1378 | } |
1386 | 1379 | ||
1387 | static int | 1380 | static int sbp2_map_scatterlist(struct sbp2_command_orb *orb, |
1388 | sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device, | 1381 | struct fw_device *device, struct sbp2_logical_unit *lu) |
1389 | struct sbp2_logical_unit *lu) | ||
1390 | { | 1382 | { |
1391 | struct scatterlist *sg = scsi_sglist(orb->cmd); | 1383 | struct scatterlist *sg = scsi_sglist(orb->cmd); |
1392 | int i, n; | 1384 | int i, n; |
@@ -1584,9 +1576,8 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd) | |||
1584 | * This is the concatenation of target port identifier and logical unit | 1576 | * This is the concatenation of target port identifier and logical unit |
1585 | * identifier as per SAM-2...SAM-4 annex A. | 1577 | * identifier as per SAM-2...SAM-4 annex A. |
1586 | */ | 1578 | */ |
1587 | static ssize_t | 1579 | static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, |
1588 | sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr, | 1580 | struct device_attribute *attr, char *buf) |
1589 | char *buf) | ||
1590 | { | 1581 | { |
1591 | struct scsi_device *sdev = to_scsi_device(dev); | 1582 | struct scsi_device *sdev = to_scsi_device(dev); |
1592 | struct sbp2_logical_unit *lu; | 1583 | struct sbp2_logical_unit *lu; |
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c index 8dd6703b55cd..d0deecc4de93 100644 --- a/drivers/firewire/fw-topology.c +++ b/drivers/firewire/fw-topology.c | |||
@@ -314,9 +314,8 @@ typedef void (*fw_node_callback_t)(struct fw_card * card, | |||
314 | struct fw_node * node, | 314 | struct fw_node * node, |
315 | struct fw_node * parent); | 315 | struct fw_node * parent); |
316 | 316 | ||
317 | static void | 317 | static void for_each_fw_node(struct fw_card *card, struct fw_node *root, |
318 | for_each_fw_node(struct fw_card *card, struct fw_node *root, | 318 | fw_node_callback_t callback) |
319 | fw_node_callback_t callback) | ||
320 | { | 319 | { |
321 | struct list_head list; | 320 | struct list_head list; |
322 | struct fw_node *node, *next, *child, *parent; | 321 | struct fw_node *node, *next, *child, *parent; |
@@ -349,9 +348,8 @@ for_each_fw_node(struct fw_card *card, struct fw_node *root, | |||
349 | fw_node_put(node); | 348 | fw_node_put(node); |
350 | } | 349 | } |
351 | 350 | ||
352 | static void | 351 | static void report_lost_node(struct fw_card *card, |
353 | report_lost_node(struct fw_card *card, | 352 | struct fw_node *node, struct fw_node *parent) |
354 | struct fw_node *node, struct fw_node *parent) | ||
355 | { | 353 | { |
356 | fw_node_event(card, node, FW_NODE_DESTROYED); | 354 | fw_node_event(card, node, FW_NODE_DESTROYED); |
357 | fw_node_put(node); | 355 | fw_node_put(node); |
@@ -360,9 +358,8 @@ report_lost_node(struct fw_card *card, | |||
360 | card->bm_retries = 0; | 358 | card->bm_retries = 0; |
361 | } | 359 | } |
362 | 360 | ||
363 | static void | 361 | static void report_found_node(struct fw_card *card, |
364 | report_found_node(struct fw_card *card, | 362 | struct fw_node *node, struct fw_node *parent) |
365 | struct fw_node *node, struct fw_node *parent) | ||
366 | { | 363 | { |
367 | int b_path = (node->phy_speed == SCODE_BETA); | 364 | int b_path = (node->phy_speed == SCODE_BETA); |
368 | 365 | ||
@@ -415,8 +412,7 @@ static void move_tree(struct fw_node *node0, struct fw_node *node1, int port) | |||
415 | * found, lost or updated. Update the nodes in the card topology tree | 412 | * found, lost or updated. Update the nodes in the card topology tree |
416 | * as we go. | 413 | * as we go. |
417 | */ | 414 | */ |
418 | static void | 415 | static void update_tree(struct fw_card *card, struct fw_node *root) |
419 | update_tree(struct fw_card *card, struct fw_node *root) | ||
420 | { | 416 | { |
421 | struct list_head list0, list1; | 417 | struct list_head list0, list1; |
422 | struct fw_node *node0, *node1, *next1; | 418 | struct fw_node *node0, *node1, *next1; |
@@ -497,8 +493,8 @@ update_tree(struct fw_card *card, struct fw_node *root) | |||
497 | } | 493 | } |
498 | } | 494 | } |
499 | 495 | ||
500 | static void | 496 | static void update_topology_map(struct fw_card *card, |
501 | update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count) | 497 | u32 *self_ids, int self_id_count) |
502 | { | 498 | { |
503 | int node_count; | 499 | int node_count; |
504 | 500 | ||
@@ -510,10 +506,8 @@ update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count) | |||
510 | fw_compute_block_crc(card->topology_map); | 506 | fw_compute_block_crc(card->topology_map); |
511 | } | 507 | } |
512 | 508 | ||
513 | void | 509 | void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, |
514 | fw_core_handle_bus_reset(struct fw_card *card, | 510 | int self_id_count, u32 *self_ids) |
515 | int node_id, int generation, | ||
516 | int self_id_count, u32 * self_ids) | ||
517 | { | 511 | { |
518 | struct fw_node *local_node; | 512 | struct fw_node *local_node; |
519 | unsigned long flags; | 513 | unsigned long flags; |
@@ -532,6 +526,7 @@ fw_core_handle_bus_reset(struct fw_card *card, | |||
532 | 526 | ||
533 | spin_lock_irqsave(&card->lock, flags); | 527 | spin_lock_irqsave(&card->lock, flags); |
534 | 528 | ||
529 | card->broadcast_channel_allocated = false; | ||
535 | card->node_id = node_id; | 530 | card->node_id = node_id; |
536 | /* | 531 | /* |
537 | * Update node_id before generation to prevent anybody from using | 532 | * Update node_id before generation to prevent anybody from using |
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h index addb9f8ea776..3c497bb4fae4 100644 --- a/drivers/firewire/fw-topology.h +++ b/drivers/firewire/fw-topology.h | |||
@@ -19,6 +19,11 @@ | |||
19 | #ifndef __fw_topology_h | 19 | #ifndef __fw_topology_h |
20 | #define __fw_topology_h | 20 | #define __fw_topology_h |
21 | 21 | ||
22 | #include <linux/list.h> | ||
23 | #include <linux/slab.h> | ||
24 | |||
25 | #include <asm/atomic.h> | ||
26 | |||
22 | enum { | 27 | enum { |
23 | FW_NODE_CREATED, | 28 | FW_NODE_CREATED, |
24 | FW_NODE_UPDATED, | 29 | FW_NODE_UPDATED, |
@@ -51,26 +56,22 @@ struct fw_node { | |||
51 | struct fw_node *ports[0]; | 56 | struct fw_node *ports[0]; |
52 | }; | 57 | }; |
53 | 58 | ||
54 | static inline struct fw_node * | 59 | static inline struct fw_node *fw_node_get(struct fw_node *node) |
55 | fw_node_get(struct fw_node *node) | ||
56 | { | 60 | { |
57 | atomic_inc(&node->ref_count); | 61 | atomic_inc(&node->ref_count); |
58 | 62 | ||
59 | return node; | 63 | return node; |
60 | } | 64 | } |
61 | 65 | ||
62 | static inline void | 66 | static inline void fw_node_put(struct fw_node *node) |
63 | fw_node_put(struct fw_node *node) | ||
64 | { | 67 | { |
65 | if (atomic_dec_and_test(&node->ref_count)) | 68 | if (atomic_dec_and_test(&node->ref_count)) |
66 | kfree(node); | 69 | kfree(node); |
67 | } | 70 | } |
68 | 71 | ||
69 | void | 72 | struct fw_card; |
70 | fw_destroy_nodes(struct fw_card *card); | 73 | void fw_destroy_nodes(struct fw_card *card); |
71 | |||
72 | int | ||
73 | fw_compute_block_crc(u32 *block); | ||
74 | 74 | ||
75 | int fw_compute_block_crc(u32 *block); | ||
75 | 76 | ||
76 | #endif /* __fw_topology_h */ | 77 | #endif /* __fw_topology_h */ |
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c index 699ac041f39a..283dac6d327d 100644 --- a/drivers/firewire/fw-transaction.c +++ b/drivers/firewire/fw-transaction.c | |||
@@ -64,10 +64,8 @@ | |||
64 | #define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) | 64 | #define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) |
65 | #define PHY_IDENTIFIER(id) ((id) << 30) | 65 | #define PHY_IDENTIFIER(id) ((id) << 30) |
66 | 66 | ||
67 | static int | 67 | static int close_transaction(struct fw_transaction *transaction, |
68 | close_transaction(struct fw_transaction *transaction, | 68 | struct fw_card *card, int rcode) |
69 | struct fw_card *card, int rcode, | ||
70 | u32 *payload, size_t length) | ||
71 | { | 69 | { |
72 | struct fw_transaction *t; | 70 | struct fw_transaction *t; |
73 | unsigned long flags; | 71 | unsigned long flags; |
@@ -83,7 +81,7 @@ close_transaction(struct fw_transaction *transaction, | |||
83 | spin_unlock_irqrestore(&card->lock, flags); | 81 | spin_unlock_irqrestore(&card->lock, flags); |
84 | 82 | ||
85 | if (&t->link != &card->transaction_list) { | 83 | if (&t->link != &card->transaction_list) { |
86 | t->callback(card, rcode, payload, length, t->callback_data); | 84 | t->callback(card, rcode, NULL, 0, t->callback_data); |
87 | return 0; | 85 | return 0; |
88 | } | 86 | } |
89 | 87 | ||
@@ -94,9 +92,8 @@ close_transaction(struct fw_transaction *transaction, | |||
94 | * Only valid for transactions that are potentially pending (ie have | 92 | * Only valid for transactions that are potentially pending (ie have |
95 | * been sent). | 93 | * been sent). |
96 | */ | 94 | */ |
97 | int | 95 | int fw_cancel_transaction(struct fw_card *card, |
98 | fw_cancel_transaction(struct fw_card *card, | 96 | struct fw_transaction *transaction) |
99 | struct fw_transaction *transaction) | ||
100 | { | 97 | { |
101 | /* | 98 | /* |
102 | * Cancel the packet transmission if it's still queued. That | 99 | * Cancel the packet transmission if it's still queued. That |
@@ -112,20 +109,19 @@ fw_cancel_transaction(struct fw_card *card, | |||
112 | * if the transaction is still pending and remove it in that case. | 109 | * if the transaction is still pending and remove it in that case. |
113 | */ | 110 | */ |
114 | 111 | ||
115 | return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0); | 112 | return close_transaction(transaction, card, RCODE_CANCELLED); |
116 | } | 113 | } |
117 | EXPORT_SYMBOL(fw_cancel_transaction); | 114 | EXPORT_SYMBOL(fw_cancel_transaction); |
118 | 115 | ||
119 | static void | 116 | static void transmit_complete_callback(struct fw_packet *packet, |
120 | transmit_complete_callback(struct fw_packet *packet, | 117 | struct fw_card *card, int status) |
121 | struct fw_card *card, int status) | ||
122 | { | 118 | { |
123 | struct fw_transaction *t = | 119 | struct fw_transaction *t = |
124 | container_of(packet, struct fw_transaction, packet); | 120 | container_of(packet, struct fw_transaction, packet); |
125 | 121 | ||
126 | switch (status) { | 122 | switch (status) { |
127 | case ACK_COMPLETE: | 123 | case ACK_COMPLETE: |
128 | close_transaction(t, card, RCODE_COMPLETE, NULL, 0); | 124 | close_transaction(t, card, RCODE_COMPLETE); |
129 | break; | 125 | break; |
130 | case ACK_PENDING: | 126 | case ACK_PENDING: |
131 | t->timestamp = packet->timestamp; | 127 | t->timestamp = packet->timestamp; |
@@ -133,31 +129,42 @@ transmit_complete_callback(struct fw_packet *packet, | |||
133 | case ACK_BUSY_X: | 129 | case ACK_BUSY_X: |
134 | case ACK_BUSY_A: | 130 | case ACK_BUSY_A: |
135 | case ACK_BUSY_B: | 131 | case ACK_BUSY_B: |
136 | close_transaction(t, card, RCODE_BUSY, NULL, 0); | 132 | close_transaction(t, card, RCODE_BUSY); |
137 | break; | 133 | break; |
138 | case ACK_DATA_ERROR: | 134 | case ACK_DATA_ERROR: |
139 | close_transaction(t, card, RCODE_DATA_ERROR, NULL, 0); | 135 | close_transaction(t, card, RCODE_DATA_ERROR); |
140 | break; | 136 | break; |
141 | case ACK_TYPE_ERROR: | 137 | case ACK_TYPE_ERROR: |
142 | close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0); | 138 | close_transaction(t, card, RCODE_TYPE_ERROR); |
143 | break; | 139 | break; |
144 | default: | 140 | default: |
145 | /* | 141 | /* |
146 | * In this case the ack is really a juju specific | 142 | * In this case the ack is really a juju specific |
147 | * rcode, so just forward that to the callback. | 143 | * rcode, so just forward that to the callback. |
148 | */ | 144 | */ |
149 | close_transaction(t, card, status, NULL, 0); | 145 | close_transaction(t, card, status); |
150 | break; | 146 | break; |
151 | } | 147 | } |
152 | } | 148 | } |
153 | 149 | ||
154 | static void | 150 | static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, |
155 | fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, | ||
156 | int destination_id, int source_id, int generation, int speed, | 151 | int destination_id, int source_id, int generation, int speed, |
157 | unsigned long long offset, void *payload, size_t length) | 152 | unsigned long long offset, void *payload, size_t length) |
158 | { | 153 | { |
159 | int ext_tcode; | 154 | int ext_tcode; |
160 | 155 | ||
156 | if (tcode == TCODE_STREAM_DATA) { | ||
157 | packet->header[0] = | ||
158 | HEADER_DATA_LENGTH(length) | | ||
159 | destination_id | | ||
160 | HEADER_TCODE(TCODE_STREAM_DATA); | ||
161 | packet->header_length = 4; | ||
162 | packet->payload = payload; | ||
163 | packet->payload_length = length; | ||
164 | |||
165 | goto common; | ||
166 | } | ||
167 | |||
161 | if (tcode > 0x10) { | 168 | if (tcode > 0x10) { |
162 | ext_tcode = tcode & ~0x10; | 169 | ext_tcode = tcode & ~0x10; |
163 | tcode = TCODE_LOCK_REQUEST; | 170 | tcode = TCODE_LOCK_REQUEST; |
@@ -204,7 +211,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, | |||
204 | packet->payload_length = 0; | 211 | packet->payload_length = 0; |
205 | break; | 212 | break; |
206 | } | 213 | } |
207 | 214 | common: | |
208 | packet->speed = speed; | 215 | packet->speed = speed; |
209 | packet->generation = generation; | 216 | packet->generation = generation; |
210 | packet->ack = 0; | 217 | packet->ack = 0; |
@@ -246,13 +253,14 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, | |||
246 | * @param callback function to be called when the transaction is completed | 253 | * @param callback function to be called when the transaction is completed |
247 | * @param callback_data pointer to arbitrary data, which will be | 254 | * @param callback_data pointer to arbitrary data, which will be |
248 | * passed to the callback | 255 | * passed to the callback |
256 | * | ||
257 | * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller | ||
258 | * needs to synthesize @destination_id with fw_stream_packet_destination_id(). | ||
249 | */ | 259 | */ |
250 | void | 260 | void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, |
251 | fw_send_request(struct fw_card *card, struct fw_transaction *t, | 261 | int destination_id, int generation, int speed, |
252 | int tcode, int destination_id, int generation, int speed, | 262 | unsigned long long offset, void *payload, size_t length, |
253 | unsigned long long offset, | 263 | fw_transaction_callback_t callback, void *callback_data) |
254 | void *payload, size_t length, | ||
255 | fw_transaction_callback_t callback, void *callback_data) | ||
256 | { | 264 | { |
257 | unsigned long flags; | 265 | unsigned long flags; |
258 | int tlabel; | 266 | int tlabel; |
@@ -322,16 +330,16 @@ static void transaction_callback(struct fw_card *card, int rcode, | |||
322 | * Returns the RCODE. | 330 | * Returns the RCODE. |
323 | */ | 331 | */ |
324 | int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, | 332 | int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, |
325 | int generation, int speed, unsigned long long offset, | 333 | int generation, int speed, unsigned long long offset, |
326 | void *data, size_t length) | 334 | void *payload, size_t length) |
327 | { | 335 | { |
328 | struct transaction_callback_data d; | 336 | struct transaction_callback_data d; |
329 | struct fw_transaction t; | 337 | struct fw_transaction t; |
330 | 338 | ||
331 | init_completion(&d.done); | 339 | init_completion(&d.done); |
332 | d.payload = data; | 340 | d.payload = payload; |
333 | fw_send_request(card, &t, tcode, destination_id, generation, speed, | 341 | fw_send_request(card, &t, tcode, destination_id, generation, speed, |
334 | offset, data, length, transaction_callback, &d); | 342 | offset, payload, length, transaction_callback, &d); |
335 | wait_for_completion(&d.done); | 343 | wait_for_completion(&d.done); |
336 | 344 | ||
337 | return d.rcode; | 345 | return d.rcode; |
@@ -399,9 +407,8 @@ void fw_flush_transactions(struct fw_card *card) | |||
399 | } | 407 | } |
400 | } | 408 | } |
401 | 409 | ||
402 | static struct fw_address_handler * | 410 | static struct fw_address_handler *lookup_overlapping_address_handler( |
403 | lookup_overlapping_address_handler(struct list_head *list, | 411 | struct list_head *list, unsigned long long offset, size_t length) |
404 | unsigned long long offset, size_t length) | ||
405 | { | 412 | { |
406 | struct fw_address_handler *handler; | 413 | struct fw_address_handler *handler; |
407 | 414 | ||
@@ -414,9 +421,8 @@ lookup_overlapping_address_handler(struct list_head *list, | |||
414 | return NULL; | 421 | return NULL; |
415 | } | 422 | } |
416 | 423 | ||
417 | static struct fw_address_handler * | 424 | static struct fw_address_handler *lookup_enclosing_address_handler( |
418 | lookup_enclosing_address_handler(struct list_head *list, | 425 | struct list_head *list, unsigned long long offset, size_t length) |
419 | unsigned long long offset, size_t length) | ||
420 | { | 426 | { |
421 | struct fw_address_handler *handler; | 427 | struct fw_address_handler *handler; |
422 | 428 | ||
@@ -449,36 +455,44 @@ const struct fw_address_region fw_unit_space_region = | |||
449 | #endif /* 0 */ | 455 | #endif /* 0 */ |
450 | 456 | ||
451 | /** | 457 | /** |
452 | * Allocate a range of addresses in the node space of the OHCI | 458 | * fw_core_add_address_handler - register for incoming requests |
453 | * controller. When a request is received that falls within the | 459 | * @handler: callback |
454 | * specified address range, the specified callback is invoked. The | 460 | * @region: region in the IEEE 1212 node space address range |
455 | * parameters passed to the callback give the details of the | 461 | * |
456 | * particular request. | 462 | * region->start, ->end, and handler->length have to be quadlet-aligned. |
463 | * | ||
464 | * When a request is received that falls within the specified address range, | ||
465 | * the specified callback is invoked. The parameters passed to the callback | ||
466 | * give the details of the particular request. | ||
457 | * | 467 | * |
458 | * Return value: 0 on success, non-zero otherwise. | 468 | * Return value: 0 on success, non-zero otherwise. |
459 | * The start offset of the handler's address region is determined by | 469 | * The start offset of the handler's address region is determined by |
460 | * fw_core_add_address_handler() and is returned in handler->offset. | 470 | * fw_core_add_address_handler() and is returned in handler->offset. |
461 | * The offset is quadlet-aligned. | ||
462 | */ | 471 | */ |
463 | int | 472 | int fw_core_add_address_handler(struct fw_address_handler *handler, |
464 | fw_core_add_address_handler(struct fw_address_handler *handler, | 473 | const struct fw_address_region *region) |
465 | const struct fw_address_region *region) | ||
466 | { | 474 | { |
467 | struct fw_address_handler *other; | 475 | struct fw_address_handler *other; |
468 | unsigned long flags; | 476 | unsigned long flags; |
469 | int ret = -EBUSY; | 477 | int ret = -EBUSY; |
470 | 478 | ||
479 | if (region->start & 0xffff000000000003ULL || | ||
480 | region->end & 0xffff000000000003ULL || | ||
481 | region->start >= region->end || | ||
482 | handler->length & 3 || | ||
483 | handler->length == 0) | ||
484 | return -EINVAL; | ||
485 | |||
471 | spin_lock_irqsave(&address_handler_lock, flags); | 486 | spin_lock_irqsave(&address_handler_lock, flags); |
472 | 487 | ||
473 | handler->offset = roundup(region->start, 4); | 488 | handler->offset = region->start; |
474 | while (handler->offset + handler->length <= region->end) { | 489 | while (handler->offset + handler->length <= region->end) { |
475 | other = | 490 | other = |
476 | lookup_overlapping_address_handler(&address_handler_list, | 491 | lookup_overlapping_address_handler(&address_handler_list, |
477 | handler->offset, | 492 | handler->offset, |
478 | handler->length); | 493 | handler->length); |
479 | if (other != NULL) { | 494 | if (other != NULL) { |
480 | handler->offset = | 495 | handler->offset += other->length; |
481 | roundup(other->offset + other->length, 4); | ||
482 | } else { | 496 | } else { |
483 | list_add_tail(&handler->link, &address_handler_list); | 497 | list_add_tail(&handler->link, &address_handler_list); |
484 | ret = 0; | 498 | ret = 0; |
@@ -493,12 +507,7 @@ fw_core_add_address_handler(struct fw_address_handler *handler, | |||
493 | EXPORT_SYMBOL(fw_core_add_address_handler); | 507 | EXPORT_SYMBOL(fw_core_add_address_handler); |
494 | 508 | ||
495 | /** | 509 | /** |
496 | * Deallocate a range of addresses allocated with fw_allocate. This | 510 | * fw_core_remove_address_handler - unregister an address handler |
497 | * will call the associated callback one last time with a the special | ||
498 | * tcode TCODE_DEALLOCATE, to let the client destroy the registered | ||
499 | * callback data. For convenience, the callback parameters offset and | ||
500 | * length are set to the start and the length respectively for the | ||
501 | * deallocated region, payload is set to NULL. | ||
502 | */ | 511 | */ |
503 | void fw_core_remove_address_handler(struct fw_address_handler *handler) | 512 | void fw_core_remove_address_handler(struct fw_address_handler *handler) |
504 | { | 513 | { |
@@ -518,9 +527,8 @@ struct fw_request { | |||
518 | u32 data[0]; | 527 | u32 data[0]; |
519 | }; | 528 | }; |
520 | 529 | ||
521 | static void | 530 | static void free_response_callback(struct fw_packet *packet, |
522 | free_response_callback(struct fw_packet *packet, | 531 | struct fw_card *card, int status) |
523 | struct fw_card *card, int status) | ||
524 | { | 532 | { |
525 | struct fw_request *request; | 533 | struct fw_request *request; |
526 | 534 | ||
@@ -528,9 +536,8 @@ free_response_callback(struct fw_packet *packet, | |||
528 | kfree(request); | 536 | kfree(request); |
529 | } | 537 | } |
530 | 538 | ||
531 | void | 539 | void fw_fill_response(struct fw_packet *response, u32 *request_header, |
532 | fw_fill_response(struct fw_packet *response, u32 *request_header, | 540 | int rcode, void *payload, size_t length) |
533 | int rcode, void *payload, size_t length) | ||
534 | { | 541 | { |
535 | int tcode, tlabel, extended_tcode, source, destination; | 542 | int tcode, tlabel, extended_tcode, source, destination; |
536 | 543 | ||
@@ -588,8 +595,7 @@ fw_fill_response(struct fw_packet *response, u32 *request_header, | |||
588 | } | 595 | } |
589 | EXPORT_SYMBOL(fw_fill_response); | 596 | EXPORT_SYMBOL(fw_fill_response); |
590 | 597 | ||
591 | static struct fw_request * | 598 | static struct fw_request *allocate_request(struct fw_packet *p) |
592 | allocate_request(struct fw_packet *p) | ||
593 | { | 599 | { |
594 | struct fw_request *request; | 600 | struct fw_request *request; |
595 | u32 *data, length; | 601 | u32 *data, length; |
@@ -649,8 +655,8 @@ allocate_request(struct fw_packet *p) | |||
649 | return request; | 655 | return request; |
650 | } | 656 | } |
651 | 657 | ||
652 | void | 658 | void fw_send_response(struct fw_card *card, |
653 | fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) | 659 | struct fw_request *request, int rcode) |
654 | { | 660 | { |
655 | /* unified transaction or broadcast transaction: don't respond */ | 661 | /* unified transaction or broadcast transaction: don't respond */ |
656 | if (request->ack != ACK_PENDING || | 662 | if (request->ack != ACK_PENDING || |
@@ -670,8 +676,7 @@ fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) | |||
670 | } | 676 | } |
671 | EXPORT_SYMBOL(fw_send_response); | 677 | EXPORT_SYMBOL(fw_send_response); |
672 | 678 | ||
673 | void | 679 | void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) |
674 | fw_core_handle_request(struct fw_card *card, struct fw_packet *p) | ||
675 | { | 680 | { |
676 | struct fw_address_handler *handler; | 681 | struct fw_address_handler *handler; |
677 | struct fw_request *request; | 682 | struct fw_request *request; |
@@ -719,8 +724,7 @@ fw_core_handle_request(struct fw_card *card, struct fw_packet *p) | |||
719 | } | 724 | } |
720 | EXPORT_SYMBOL(fw_core_handle_request); | 725 | EXPORT_SYMBOL(fw_core_handle_request); |
721 | 726 | ||
722 | void | 727 | void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) |
723 | fw_core_handle_response(struct fw_card *card, struct fw_packet *p) | ||
724 | { | 728 | { |
725 | struct fw_transaction *t; | 729 | struct fw_transaction *t; |
726 | unsigned long flags; | 730 | unsigned long flags; |
@@ -793,12 +797,10 @@ static const struct fw_address_region topology_map_region = | |||
793 | { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP, | 797 | { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP, |
794 | .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, }; | 798 | .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, }; |
795 | 799 | ||
796 | static void | 800 | static void handle_topology_map(struct fw_card *card, struct fw_request *request, |
797 | handle_topology_map(struct fw_card *card, struct fw_request *request, | 801 | int tcode, int destination, int source, int generation, |
798 | int tcode, int destination, int source, | 802 | int speed, unsigned long long offset, |
799 | int generation, int speed, | 803 | void *payload, size_t length, void *callback_data) |
800 | unsigned long long offset, | ||
801 | void *payload, size_t length, void *callback_data) | ||
802 | { | 804 | { |
803 | int i, start, end; | 805 | int i, start, end; |
804 | __be32 *map; | 806 | __be32 *map; |
@@ -832,12 +834,10 @@ static const struct fw_address_region registers_region = | |||
832 | { .start = CSR_REGISTER_BASE, | 834 | { .start = CSR_REGISTER_BASE, |
833 | .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; | 835 | .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; |
834 | 836 | ||
835 | static void | 837 | static void handle_registers(struct fw_card *card, struct fw_request *request, |
836 | handle_registers(struct fw_card *card, struct fw_request *request, | 838 | int tcode, int destination, int source, int generation, |
837 | int tcode, int destination, int source, | 839 | int speed, unsigned long long offset, |
838 | int generation, int speed, | 840 | void *payload, size_t length, void *callback_data) |
839 | unsigned long long offset, | ||
840 | void *payload, size_t length, void *callback_data) | ||
841 | { | 841 | { |
842 | int reg = offset & ~CSR_REGISTER_BASE; | 842 | int reg = offset & ~CSR_REGISTER_BASE; |
843 | unsigned long long bus_time; | 843 | unsigned long long bus_time; |
@@ -939,11 +939,11 @@ static struct fw_descriptor model_id_descriptor = { | |||
939 | 939 | ||
940 | static int __init fw_core_init(void) | 940 | static int __init fw_core_init(void) |
941 | { | 941 | { |
942 | int retval; | 942 | int ret; |
943 | 943 | ||
944 | retval = bus_register(&fw_bus_type); | 944 | ret = bus_register(&fw_bus_type); |
945 | if (retval < 0) | 945 | if (ret < 0) |
946 | return retval; | 946 | return ret; |
947 | 947 | ||
948 | fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops); | 948 | fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops); |
949 | if (fw_cdev_major < 0) { | 949 | if (fw_cdev_major < 0) { |
@@ -951,19 +951,10 @@ static int __init fw_core_init(void) | |||
951 | return fw_cdev_major; | 951 | return fw_cdev_major; |
952 | } | 952 | } |
953 | 953 | ||
954 | retval = fw_core_add_address_handler(&topology_map, | 954 | fw_core_add_address_handler(&topology_map, &topology_map_region); |
955 | &topology_map_region); | 955 | fw_core_add_address_handler(®isters, ®isters_region); |
956 | BUG_ON(retval < 0); | 956 | fw_core_add_descriptor(&vendor_id_descriptor); |
957 | 957 | fw_core_add_descriptor(&model_id_descriptor); | |
958 | retval = fw_core_add_address_handler(®isters, | ||
959 | ®isters_region); | ||
960 | BUG_ON(retval < 0); | ||
961 | |||
962 | /* Add the vendor textual descriptor. */ | ||
963 | retval = fw_core_add_descriptor(&vendor_id_descriptor); | ||
964 | BUG_ON(retval < 0); | ||
965 | retval = fw_core_add_descriptor(&model_id_descriptor); | ||
966 | BUG_ON(retval < 0); | ||
967 | 958 | ||
968 | return 0; | 959 | return 0; |
969 | } | 960 | } |
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index 1d78e9cc5940..dfa799068f89 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h | |||
@@ -82,14 +82,14 @@ | |||
82 | #define CSR_SPEED_MAP 0x2000 | 82 | #define CSR_SPEED_MAP 0x2000 |
83 | #define CSR_SPEED_MAP_END 0x3000 | 83 | #define CSR_SPEED_MAP_END 0x3000 |
84 | 84 | ||
85 | #define BANDWIDTH_AVAILABLE_INITIAL 4915 | ||
85 | #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) | 86 | #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) |
86 | #define BROADCAST_CHANNEL_VALID (1 << 30) | 87 | #define BROADCAST_CHANNEL_VALID (1 << 30) |
87 | 88 | ||
88 | #define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args) | 89 | #define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args) |
89 | #define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) | 90 | #define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) |
90 | 91 | ||
91 | static inline void | 92 | static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size) |
92 | fw_memcpy_from_be32(void *_dst, void *_src, size_t size) | ||
93 | { | 93 | { |
94 | u32 *dst = _dst; | 94 | u32 *dst = _dst; |
95 | __be32 *src = _src; | 95 | __be32 *src = _src; |
@@ -99,8 +99,7 @@ fw_memcpy_from_be32(void *_dst, void *_src, size_t size) | |||
99 | dst[i] = be32_to_cpu(src[i]); | 99 | dst[i] = be32_to_cpu(src[i]); |
100 | } | 100 | } |
101 | 101 | ||
102 | static inline void | 102 | static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size) |
103 | fw_memcpy_to_be32(void *_dst, void *_src, size_t size) | ||
104 | { | 103 | { |
105 | fw_memcpy_from_be32(_dst, _src, size); | 104 | fw_memcpy_from_be32(_dst, _src, size); |
106 | } | 105 | } |
@@ -125,8 +124,7 @@ typedef void (*fw_packet_callback_t)(struct fw_packet *packet, | |||
125 | struct fw_card *card, int status); | 124 | struct fw_card *card, int status); |
126 | 125 | ||
127 | typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, | 126 | typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, |
128 | void *data, | 127 | void *data, size_t length, |
129 | size_t length, | ||
130 | void *callback_data); | 128 | void *callback_data); |
131 | 129 | ||
132 | /* | 130 | /* |
@@ -141,12 +139,6 @@ typedef void (*fw_address_callback_t)(struct fw_card *card, | |||
141 | void *data, size_t length, | 139 | void *data, size_t length, |
142 | void *callback_data); | 140 | void *callback_data); |
143 | 141 | ||
144 | typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle, | ||
145 | int node_id, int generation, | ||
146 | u32 *self_ids, | ||
147 | int self_id_count, | ||
148 | void *callback_data); | ||
149 | |||
150 | struct fw_packet { | 142 | struct fw_packet { |
151 | int speed; | 143 | int speed; |
152 | int generation; | 144 | int generation; |
@@ -187,12 +179,6 @@ struct fw_transaction { | |||
187 | void *callback_data; | 179 | void *callback_data; |
188 | }; | 180 | }; |
189 | 181 | ||
190 | static inline struct fw_packet * | ||
191 | fw_packet(struct list_head *l) | ||
192 | { | ||
193 | return list_entry(l, struct fw_packet, link); | ||
194 | } | ||
195 | |||
196 | struct fw_address_handler { | 182 | struct fw_address_handler { |
197 | u64 offset; | 183 | u64 offset; |
198 | size_t length; | 184 | size_t length; |
@@ -201,7 +187,6 @@ struct fw_address_handler { | |||
201 | struct list_head link; | 187 | struct list_head link; |
202 | }; | 188 | }; |
203 | 189 | ||
204 | |||
205 | struct fw_address_region { | 190 | struct fw_address_region { |
206 | u64 start; | 191 | u64 start; |
207 | u64 end; | 192 | u64 end; |
@@ -255,6 +240,7 @@ struct fw_card { | |||
255 | int bm_retries; | 240 | int bm_retries; |
256 | int bm_generation; | 241 | int bm_generation; |
257 | 242 | ||
243 | bool broadcast_channel_allocated; | ||
258 | u32 broadcast_channel; | 244 | u32 broadcast_channel; |
259 | u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; | 245 | u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; |
260 | }; | 246 | }; |
@@ -315,10 +301,8 @@ struct fw_iso_packet { | |||
315 | struct fw_iso_context; | 301 | struct fw_iso_context; |
316 | 302 | ||
317 | typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, | 303 | typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, |
318 | u32 cycle, | 304 | u32 cycle, size_t header_length, |
319 | size_t header_length, | 305 | void *header, void *data); |
320 | void *header, | ||
321 | void *data); | ||
322 | 306 | ||
323 | /* | 307 | /* |
324 | * An iso buffer is just a set of pages mapped for DMA in the | 308 | * An iso buffer is just a set of pages mapped for DMA in the |
@@ -344,36 +328,25 @@ struct fw_iso_context { | |||
344 | void *callback_data; | 328 | void *callback_data; |
345 | }; | 329 | }; |
346 | 330 | ||
347 | int | 331 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, |
348 | fw_iso_buffer_init(struct fw_iso_buffer *buffer, | 332 | int page_count, enum dma_data_direction direction); |
349 | struct fw_card *card, | 333 | int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); |
350 | int page_count, | 334 | void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); |
351 | enum dma_data_direction direction); | 335 | |
352 | int | 336 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, |
353 | fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); | 337 | int type, int channel, int speed, size_t header_size, |
354 | void | 338 | fw_iso_callback_t callback, void *callback_data); |
355 | fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); | 339 | int fw_iso_context_queue(struct fw_iso_context *ctx, |
356 | 340 | struct fw_iso_packet *packet, | |
357 | struct fw_iso_context * | 341 | struct fw_iso_buffer *buffer, |
358 | fw_iso_context_create(struct fw_card *card, int type, | 342 | unsigned long payload); |
359 | int channel, int speed, size_t header_size, | 343 | int fw_iso_context_start(struct fw_iso_context *ctx, |
360 | fw_iso_callback_t callback, void *callback_data); | 344 | int cycle, int sync, int tags); |
361 | 345 | int fw_iso_context_stop(struct fw_iso_context *ctx); | |
362 | void | 346 | void fw_iso_context_destroy(struct fw_iso_context *ctx); |
363 | fw_iso_context_destroy(struct fw_iso_context *ctx); | 347 | |
364 | 348 | void fw_iso_resource_manage(struct fw_card *card, int generation, | |
365 | int | 349 | u64 channels_mask, int *channel, int *bandwidth, bool allocate); |
366 | fw_iso_context_queue(struct fw_iso_context *ctx, | ||
367 | struct fw_iso_packet *packet, | ||
368 | struct fw_iso_buffer *buffer, | ||
369 | unsigned long payload); | ||
370 | |||
371 | int | ||
372 | fw_iso_context_start(struct fw_iso_context *ctx, | ||
373 | int cycle, int sync, int tags); | ||
374 | |||
375 | int | ||
376 | fw_iso_context_stop(struct fw_iso_context *ctx); | ||
377 | 350 | ||
378 | struct fw_card_driver { | 351 | struct fw_card_driver { |
379 | /* | 352 | /* |
@@ -415,7 +388,7 @@ struct fw_card_driver { | |||
415 | 388 | ||
416 | struct fw_iso_context * | 389 | struct fw_iso_context * |
417 | (*allocate_iso_context)(struct fw_card *card, | 390 | (*allocate_iso_context)(struct fw_card *card, |
418 | int type, size_t header_size); | 391 | int type, int channel, size_t header_size); |
419 | void (*free_iso_context)(struct fw_iso_context *ctx); | 392 | void (*free_iso_context)(struct fw_iso_context *ctx); |
420 | 393 | ||
421 | int (*start_iso)(struct fw_iso_context *ctx, | 394 | int (*start_iso)(struct fw_iso_context *ctx, |
@@ -429,54 +402,45 @@ struct fw_card_driver { | |||
429 | int (*stop_iso)(struct fw_iso_context *ctx); | 402 | int (*stop_iso)(struct fw_iso_context *ctx); |
430 | }; | 403 | }; |
431 | 404 | ||
432 | int | 405 | int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset); |
433 | fw_core_initiate_bus_reset(struct fw_card *card, int short_reset); | ||
434 | 406 | ||
435 | void | 407 | void fw_send_request(struct fw_card *card, struct fw_transaction *t, |
436 | fw_send_request(struct fw_card *card, struct fw_transaction *t, | ||
437 | int tcode, int destination_id, int generation, int speed, | 408 | int tcode, int destination_id, int generation, int speed, |
438 | unsigned long long offset, void *data, size_t length, | 409 | unsigned long long offset, void *payload, size_t length, |
439 | fw_transaction_callback_t callback, void *callback_data); | 410 | fw_transaction_callback_t callback, void *callback_data); |
440 | |||
441 | int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, | ||
442 | int generation, int speed, unsigned long long offset, | ||
443 | void *data, size_t length); | ||
444 | |||
445 | int fw_cancel_transaction(struct fw_card *card, | 411 | int fw_cancel_transaction(struct fw_card *card, |
446 | struct fw_transaction *transaction); | 412 | struct fw_transaction *transaction); |
447 | |||
448 | void fw_flush_transactions(struct fw_card *card); | 413 | void fw_flush_transactions(struct fw_card *card); |
449 | 414 | int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, | |
415 | int generation, int speed, unsigned long long offset, | ||
416 | void *payload, size_t length); | ||
450 | void fw_send_phy_config(struct fw_card *card, | 417 | void fw_send_phy_config(struct fw_card *card, |
451 | int node_id, int generation, int gap_count); | 418 | int node_id, int generation, int gap_count); |
452 | 419 | ||
420 | static inline int fw_stream_packet_destination_id(int tag, int channel, int sy) | ||
421 | { | ||
422 | return tag << 14 | channel << 8 | sy; | ||
423 | } | ||
424 | |||
453 | /* | 425 | /* |
454 | * Called by the topology code to inform the device code of node | 426 | * Called by the topology code to inform the device code of node |
455 | * activity; found, lost, or updated nodes. | 427 | * activity; found, lost, or updated nodes. |
456 | */ | 428 | */ |
457 | void | 429 | void fw_node_event(struct fw_card *card, struct fw_node *node, int event); |
458 | fw_node_event(struct fw_card *card, struct fw_node *node, int event); | ||
459 | 430 | ||
460 | /* API used by card level drivers */ | 431 | /* API used by card level drivers */ |
461 | 432 | ||
462 | void | 433 | void fw_card_initialize(struct fw_card *card, |
463 | fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, | 434 | const struct fw_card_driver *driver, struct device *device); |
464 | struct device *device); | 435 | int fw_card_add(struct fw_card *card, |
465 | int | 436 | u32 max_receive, u32 link_speed, u64 guid); |
466 | fw_card_add(struct fw_card *card, | 437 | void fw_core_remove_card(struct fw_card *card); |
467 | u32 max_receive, u32 link_speed, u64 guid); | 438 | void fw_core_handle_bus_reset(struct fw_card *card, int node_id, |
468 | 439 | int generation, int self_id_count, u32 *self_ids); | |
469 | void | 440 | void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); |
470 | fw_core_remove_card(struct fw_card *card); | 441 | void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); |
471 | 442 | ||
472 | void | 443 | extern int fw_irm_set_broadcast_channel_register(struct device *dev, |
473 | fw_core_handle_bus_reset(struct fw_card *card, | 444 | void *data); |
474 | int node_id, int generation, | ||
475 | int self_id_count, u32 *self_ids); | ||
476 | void | ||
477 | fw_core_handle_request(struct fw_card *card, struct fw_packet *request); | ||
478 | |||
479 | void | ||
480 | fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); | ||
481 | 445 | ||
482 | #endif /* __fw_transaction_h */ | 446 | #endif /* __fw_transaction_h */ |
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 30022c4a5c12..4ec5061fa584 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -10,7 +10,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ | |||
10 | drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ | 10 | drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ |
11 | drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ | 11 | drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ |
12 | drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ | 12 | drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ |
13 | drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o | 13 | drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \ |
14 | drm_info.o drm_debugfs.o | ||
14 | 15 | ||
15 | drm-$(CONFIG_COMPAT) += drm_ioc32.o | 16 | drm-$(CONFIG_COMPAT) += drm_ioc32.o |
16 | 17 | ||
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c new file mode 100644 index 000000000000..c77c6c6d9d2c --- /dev/null +++ b/drivers/gpu/drm/drm_debugfs.c | |||
@@ -0,0 +1,235 @@ | |||
1 | /** | ||
2 | * \file drm_debugfs.c | ||
3 | * debugfs support for DRM | ||
4 | * | ||
5 | * \author Ben Gamari <bgamari@gmail.com> | ||
6 | */ | ||
7 | |||
8 | /* | ||
9 | * Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com | ||
10 | * | ||
11 | * Copyright 2008 Ben Gamari <bgamari@gmail.com> | ||
12 | * | ||
13 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
14 | * copy of this software and associated documentation files (the "Software"), | ||
15 | * to deal in the Software without restriction, including without limitation | ||
16 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
17 | * and/or sell copies of the Software, and to permit persons to whom the | ||
18 | * Software is furnished to do so, subject to the following conditions: | ||
19 | * | ||
20 | * The above copyright notice and this permission notice (including the next | ||
21 | * paragraph) shall be included in all copies or substantial portions of the | ||
22 | * Software. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
27 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
28 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
29 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
30 | * OTHER DEALINGS IN THE SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <linux/debugfs.h> | ||
34 | #include <linux/seq_file.h> | ||
35 | #include "drmP.h" | ||
36 | |||
37 | #if defined(CONFIG_DEBUG_FS) | ||
38 | |||
39 | /*************************************************** | ||
40 | * Initialization, etc. | ||
41 | **************************************************/ | ||
42 | |||
43 | static struct drm_info_list drm_debugfs_list[] = { | ||
44 | {"name", drm_name_info, 0}, | ||
45 | {"vm", drm_vm_info, 0}, | ||
46 | {"clients", drm_clients_info, 0}, | ||
47 | {"queues", drm_queues_info, 0}, | ||
48 | {"bufs", drm_bufs_info, 0}, | ||
49 | {"gem_names", drm_gem_name_info, DRIVER_GEM}, | ||
50 | {"gem_objects", drm_gem_object_info, DRIVER_GEM}, | ||
51 | #if DRM_DEBUG_CODE | ||
52 | {"vma", drm_vma_info, 0}, | ||
53 | #endif | ||
54 | }; | ||
55 | #define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list) | ||
56 | |||
57 | |||
58 | static int drm_debugfs_open(struct inode *inode, struct file *file) | ||
59 | { | ||
60 | struct drm_info_node *node = inode->i_private; | ||
61 | |||
62 | return single_open(file, node->info_ent->show, node); | ||
63 | } | ||
64 | |||
65 | |||
66 | static const struct file_operations drm_debugfs_fops = { | ||
67 | .owner = THIS_MODULE, | ||
68 | .open = drm_debugfs_open, | ||
69 | .read = seq_read, | ||
70 | .llseek = seq_lseek, | ||
71 | .release = single_release, | ||
72 | }; | ||
73 | |||
74 | |||
75 | /** | ||
76 | * Initialize a given set of debugfs files for a device | ||
77 | * | ||
78 | * \param files The array of files to create | ||
79 | * \param count The number of files given | ||
80 | * \param root DRI debugfs dir entry. | ||
81 | * \param minor device minor number | ||
82 | * \return Zero on success, non-zero on failure | ||
83 | * | ||
84 | * Create a given set of debugfs files represented by an array of | ||
85 | * gdm_debugfs_lists in the given root directory. | ||
86 | */ | ||
87 | int drm_debugfs_create_files(struct drm_info_list *files, int count, | ||
88 | struct dentry *root, struct drm_minor *minor) | ||
89 | { | ||
90 | struct drm_device *dev = minor->dev; | ||
91 | struct dentry *ent; | ||
92 | struct drm_info_node *tmp; | ||
93 | char name[64]; | ||
94 | int i, ret; | ||
95 | |||
96 | for (i = 0; i < count; i++) { | ||
97 | u32 features = files[i].driver_features; | ||
98 | |||
99 | if (features != 0 && | ||
100 | (dev->driver->driver_features & features) != features) | ||
101 | continue; | ||
102 | |||
103 | tmp = drm_alloc(sizeof(struct drm_info_node), | ||
104 | _DRM_DRIVER); | ||
105 | ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, | ||
106 | root, tmp, &drm_debugfs_fops); | ||
107 | if (!ent) { | ||
108 | DRM_ERROR("Cannot create /debugfs/dri/%s/%s\n", | ||
109 | name, files[i].name); | ||
110 | drm_free(tmp, sizeof(struct drm_info_node), | ||
111 | _DRM_DRIVER); | ||
112 | ret = -1; | ||
113 | goto fail; | ||
114 | } | ||
115 | |||
116 | tmp->minor = minor; | ||
117 | tmp->dent = ent; | ||
118 | tmp->info_ent = &files[i]; | ||
119 | list_add(&(tmp->list), &(minor->debugfs_nodes.list)); | ||
120 | } | ||
121 | return 0; | ||
122 | |||
123 | fail: | ||
124 | drm_debugfs_remove_files(files, count, minor); | ||
125 | return ret; | ||
126 | } | ||
127 | EXPORT_SYMBOL(drm_debugfs_create_files); | ||
128 | |||
129 | /** | ||
130 | * Initialize the DRI debugfs filesystem for a device | ||
131 | * | ||
132 | * \param dev DRM device | ||
133 | * \param minor device minor number | ||
134 | * \param root DRI debugfs dir entry. | ||
135 | * | ||
136 | * Create the DRI debugfs root entry "/debugfs/dri", the device debugfs root entry | ||
137 | * "/debugfs/dri/%minor%/", and each entry in debugfs_list as | ||
138 | * "/debugfs/dri/%minor%/%name%". | ||
139 | */ | ||
140 | int drm_debugfs_init(struct drm_minor *minor, int minor_id, | ||
141 | struct dentry *root) | ||
142 | { | ||
143 | struct drm_device *dev = minor->dev; | ||
144 | char name[64]; | ||
145 | int ret; | ||
146 | |||
147 | INIT_LIST_HEAD(&minor->debugfs_nodes.list); | ||
148 | sprintf(name, "%d", minor_id); | ||
149 | minor->debugfs_root = debugfs_create_dir(name, root); | ||
150 | if (!minor->debugfs_root) { | ||
151 | DRM_ERROR("Cannot create /debugfs/dri/%s\n", name); | ||
152 | return -1; | ||
153 | } | ||
154 | |||
155 | ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, | ||
156 | minor->debugfs_root, minor); | ||
157 | if (ret) { | ||
158 | debugfs_remove(minor->debugfs_root); | ||
159 | minor->debugfs_root = NULL; | ||
160 | DRM_ERROR("Failed to create core drm debugfs files\n"); | ||
161 | return ret; | ||
162 | } | ||
163 | |||
164 | if (dev->driver->debugfs_init) { | ||
165 | ret = dev->driver->debugfs_init(minor); | ||
166 | if (ret) { | ||
167 | DRM_ERROR("DRM: Driver failed to initialize " | ||
168 | "/debugfs/dri.\n"); | ||
169 | return ret; | ||
170 | } | ||
171 | } | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | |||
176 | /** | ||
177 | * Remove a list of debugfs files | ||
178 | * | ||
179 | * \param files The list of files | ||
180 | * \param count The number of files | ||
181 | * \param minor The minor of which we should remove the files | ||
182 | * \return always zero. | ||
183 | * | ||
184 | * Remove all debugfs entries created by debugfs_init(). | ||
185 | */ | ||
186 | int drm_debugfs_remove_files(struct drm_info_list *files, int count, | ||
187 | struct drm_minor *minor) | ||
188 | { | ||
189 | struct list_head *pos, *q; | ||
190 | struct drm_info_node *tmp; | ||
191 | int i; | ||
192 | |||
193 | for (i = 0; i < count; i++) { | ||
194 | list_for_each_safe(pos, q, &minor->debugfs_nodes.list) { | ||
195 | tmp = list_entry(pos, struct drm_info_node, list); | ||
196 | if (tmp->info_ent == &files[i]) { | ||
197 | debugfs_remove(tmp->dent); | ||
198 | list_del(pos); | ||
199 | drm_free(tmp, sizeof(struct drm_info_node), | ||
200 | _DRM_DRIVER); | ||
201 | } | ||
202 | } | ||
203 | } | ||
204 | return 0; | ||
205 | } | ||
206 | EXPORT_SYMBOL(drm_debugfs_remove_files); | ||
207 | |||
208 | /** | ||
209 | * Cleanup the debugfs filesystem resources. | ||
210 | * | ||
211 | * \param minor device minor number. | ||
212 | * \return always zero. | ||
213 | * | ||
214 | * Remove all debugfs entries created by debugfs_init(). | ||
215 | */ | ||
216 | int drm_debugfs_cleanup(struct drm_minor *minor) | ||
217 | { | ||
218 | struct drm_device *dev = minor->dev; | ||
219 | |||
220 | if (!minor->debugfs_root) | ||
221 | return 0; | ||
222 | |||
223 | if (dev->driver->debugfs_cleanup) | ||
224 | dev->driver->debugfs_cleanup(minor); | ||
225 | |||
226 | drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor); | ||
227 | |||
228 | debugfs_remove(minor->debugfs_root); | ||
229 | minor->debugfs_root = NULL; | ||
230 | |||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | #endif /* CONFIG_DEBUG_FS */ | ||
235 | |||
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 14c7a23dc157..ed32edb17166 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -46,9 +46,11 @@ | |||
46 | * OTHER DEALINGS IN THE SOFTWARE. | 46 | * OTHER DEALINGS IN THE SOFTWARE. |
47 | */ | 47 | */ |
48 | 48 | ||
49 | #include <linux/debugfs.h> | ||
49 | #include "drmP.h" | 50 | #include "drmP.h" |
50 | #include "drm_core.h" | 51 | #include "drm_core.h" |
51 | 52 | ||
53 | |||
52 | static int drm_version(struct drm_device *dev, void *data, | 54 | static int drm_version(struct drm_device *dev, void *data, |
53 | struct drm_file *file_priv); | 55 | struct drm_file *file_priv); |
54 | 56 | ||
@@ -178,7 +180,7 @@ int drm_lastclose(struct drm_device * dev) | |||
178 | 180 | ||
179 | /* Clear AGP information */ | 181 | /* Clear AGP information */ |
180 | if (drm_core_has_AGP(dev) && dev->agp && | 182 | if (drm_core_has_AGP(dev) && dev->agp && |
181 | !drm_core_check_feature(dev, DRIVER_MODESET)) { | 183 | !drm_core_check_feature(dev, DRIVER_MODESET)) { |
182 | struct drm_agp_mem *entry, *tempe; | 184 | struct drm_agp_mem *entry, *tempe; |
183 | 185 | ||
184 | /* Remove AGP resources, but leave dev->agp | 186 | /* Remove AGP resources, but leave dev->agp |
@@ -382,6 +384,13 @@ static int __init drm_core_init(void) | |||
382 | goto err_p3; | 384 | goto err_p3; |
383 | } | 385 | } |
384 | 386 | ||
387 | drm_debugfs_root = debugfs_create_dir("dri", NULL); | ||
388 | if (!drm_debugfs_root) { | ||
389 | DRM_ERROR("Cannot create /debugfs/dri\n"); | ||
390 | ret = -1; | ||
391 | goto err_p3; | ||
392 | } | ||
393 | |||
385 | drm_mem_init(); | 394 | drm_mem_init(); |
386 | 395 | ||
387 | DRM_INFO("Initialized %s %d.%d.%d %s\n", | 396 | DRM_INFO("Initialized %s %d.%d.%d %s\n", |
@@ -400,6 +409,7 @@ err_p1: | |||
400 | static void __exit drm_core_exit(void) | 409 | static void __exit drm_core_exit(void) |
401 | { | 410 | { |
402 | remove_proc_entry("dri", NULL); | 411 | remove_proc_entry("dri", NULL); |
412 | debugfs_remove(drm_debugfs_root); | ||
403 | drm_sysfs_destroy(); | 413 | drm_sysfs_destroy(); |
404 | 414 | ||
405 | unregister_chrdev(DRM_MAJOR, "drm"); | 415 | unregister_chrdev(DRM_MAJOR, "drm"); |
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c new file mode 100644 index 000000000000..fc98952b9033 --- /dev/null +++ b/drivers/gpu/drm/drm_info.c | |||
@@ -0,0 +1,328 @@ | |||
1 | /** | ||
2 | * \file drm_info.c | ||
3 | * DRM info file implementations | ||
4 | * | ||
5 | * \author Ben Gamari <bgamari@gmail.com> | ||
6 | */ | ||
7 | |||
8 | /* | ||
9 | * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com | ||
10 | * | ||
11 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | ||
12 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | ||
13 | * Copyright 2008 Ben Gamari <bgamari@gmail.com> | ||
14 | * All Rights Reserved. | ||
15 | * | ||
16 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
17 | * copy of this software and associated documentation files (the "Software"), | ||
18 | * to deal in the Software without restriction, including without limitation | ||
19 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
20 | * and/or sell copies of the Software, and to permit persons to whom the | ||
21 | * Software is furnished to do so, subject to the following conditions: | ||
22 | * | ||
23 | * The above copyright notice and this permission notice (including the next | ||
24 | * paragraph) shall be included in all copies or substantial portions of the | ||
25 | * Software. | ||
26 | * | ||
27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
28 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
29 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
30 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
31 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
32 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
33 | * OTHER DEALINGS IN THE SOFTWARE. | ||
34 | */ | ||
35 | |||
36 | #include <linux/seq_file.h> | ||
37 | #include "drmP.h" | ||
38 | |||
39 | /** | ||
40 | * Called when "/proc/dri/.../name" is read. | ||
41 | * | ||
42 | * Prints the device name together with the bus id if available. | ||
43 | */ | ||
44 | int drm_name_info(struct seq_file *m, void *data) | ||
45 | { | ||
46 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
47 | struct drm_minor *minor = node->minor; | ||
48 | struct drm_device *dev = minor->dev; | ||
49 | struct drm_master *master = minor->master; | ||
50 | |||
51 | if (!master) | ||
52 | return 0; | ||
53 | |||
54 | if (master->unique) { | ||
55 | seq_printf(m, "%s %s %s\n", | ||
56 | dev->driver->pci_driver.name, | ||
57 | pci_name(dev->pdev), master->unique); | ||
58 | } else { | ||
59 | seq_printf(m, "%s %s\n", dev->driver->pci_driver.name, | ||
60 | pci_name(dev->pdev)); | ||
61 | } | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | /** | ||
67 | * Called when "/proc/dri/.../vm" is read. | ||
68 | * | ||
69 | * Prints information about all mappings in drm_device::maplist. | ||
70 | */ | ||
71 | int drm_vm_info(struct seq_file *m, void *data) | ||
72 | { | ||
73 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
74 | struct drm_device *dev = node->minor->dev; | ||
75 | struct drm_map *map; | ||
76 | struct drm_map_list *r_list; | ||
77 | |||
78 | /* Hardcoded from _DRM_FRAME_BUFFER, | ||
79 | _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and | ||
80 | _DRM_SCATTER_GATHER and _DRM_CONSISTENT */ | ||
81 | const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" }; | ||
82 | const char *type; | ||
83 | int i; | ||
84 | |||
85 | mutex_lock(&dev->struct_mutex); | ||
86 | seq_printf(m, "slot offset size type flags address mtrr\n\n"); | ||
87 | i = 0; | ||
88 | list_for_each_entry(r_list, &dev->maplist, head) { | ||
89 | map = r_list->map; | ||
90 | if (!map) | ||
91 | continue; | ||
92 | if (map->type < 0 || map->type > 5) | ||
93 | type = "??"; | ||
94 | else | ||
95 | type = types[map->type]; | ||
96 | |||
97 | seq_printf(m, "%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", | ||
98 | i, | ||
99 | map->offset, | ||
100 | map->size, type, map->flags, | ||
101 | (unsigned long) r_list->user_token); | ||
102 | if (map->mtrr < 0) | ||
103 | seq_printf(m, "none\n"); | ||
104 | else | ||
105 | seq_printf(m, "%4d\n", map->mtrr); | ||
106 | i++; | ||
107 | } | ||
108 | mutex_unlock(&dev->struct_mutex); | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | /** | ||
113 | * Called when "/proc/dri/.../queues" is read. | ||
114 | */ | ||
115 | int drm_queues_info(struct seq_file *m, void *data) | ||
116 | { | ||
117 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
118 | struct drm_device *dev = node->minor->dev; | ||
119 | int i; | ||
120 | struct drm_queue *q; | ||
121 | |||
122 | mutex_lock(&dev->struct_mutex); | ||
123 | seq_printf(m, " ctx/flags use fin" | ||
124 | " blk/rw/rwf wait flushed queued" | ||
125 | " locks\n\n"); | ||
126 | for (i = 0; i < dev->queue_count; i++) { | ||
127 | q = dev->queuelist[i]; | ||
128 | atomic_inc(&q->use_count); | ||
129 | seq_printf(m, "%5d/0x%03x %5d %5d" | ||
130 | " %5d/%c%c/%c%c%c %5Zd\n", | ||
131 | i, | ||
132 | q->flags, | ||
133 | atomic_read(&q->use_count), | ||
134 | atomic_read(&q->finalization), | ||
135 | atomic_read(&q->block_count), | ||
136 | atomic_read(&q->block_read) ? 'r' : '-', | ||
137 | atomic_read(&q->block_write) ? 'w' : '-', | ||
138 | waitqueue_active(&q->read_queue) ? 'r' : '-', | ||
139 | waitqueue_active(&q->write_queue) ? 'w' : '-', | ||
140 | waitqueue_active(&q->flush_queue) ? 'f' : '-', | ||
141 | DRM_BUFCOUNT(&q->waitlist)); | ||
142 | atomic_dec(&q->use_count); | ||
143 | } | ||
144 | mutex_unlock(&dev->struct_mutex); | ||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | /** | ||
149 | * Called when "/proc/dri/.../bufs" is read. | ||
150 | */ | ||
151 | int drm_bufs_info(struct seq_file *m, void *data) | ||
152 | { | ||
153 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
154 | struct drm_device *dev = node->minor->dev; | ||
155 | struct drm_device_dma *dma; | ||
156 | int i, seg_pages; | ||
157 | |||
158 | mutex_lock(&dev->struct_mutex); | ||
159 | dma = dev->dma; | ||
160 | if (!dma) { | ||
161 | mutex_unlock(&dev->struct_mutex); | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | seq_printf(m, " o size count free segs pages kB\n\n"); | ||
166 | for (i = 0; i <= DRM_MAX_ORDER; i++) { | ||
167 | if (dma->bufs[i].buf_count) { | ||
168 | seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order); | ||
169 | seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n", | ||
170 | i, | ||
171 | dma->bufs[i].buf_size, | ||
172 | dma->bufs[i].buf_count, | ||
173 | atomic_read(&dma->bufs[i].freelist.count), | ||
174 | dma->bufs[i].seg_count, | ||
175 | seg_pages, | ||
176 | seg_pages * PAGE_SIZE / 1024); | ||
177 | } | ||
178 | } | ||
179 | seq_printf(m, "\n"); | ||
180 | for (i = 0; i < dma->buf_count; i++) { | ||
181 | if (i && !(i % 32)) | ||
182 | seq_printf(m, "\n"); | ||
183 | seq_printf(m, " %d", dma->buflist[i]->list); | ||
184 | } | ||
185 | seq_printf(m, "\n"); | ||
186 | mutex_unlock(&dev->struct_mutex); | ||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | /** | ||
191 | * Called when "/proc/dri/.../vblank" is read. | ||
192 | */ | ||
193 | int drm_vblank_info(struct seq_file *m, void *data) | ||
194 | { | ||
195 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
196 | struct drm_device *dev = node->minor->dev; | ||
197 | int crtc; | ||
198 | |||
199 | mutex_lock(&dev->struct_mutex); | ||
200 | for (crtc = 0; crtc < dev->num_crtcs; crtc++) { | ||
201 | seq_printf(m, "CRTC %d enable: %d\n", | ||
202 | crtc, atomic_read(&dev->vblank_refcount[crtc])); | ||
203 | seq_printf(m, "CRTC %d counter: %d\n", | ||
204 | crtc, drm_vblank_count(dev, crtc)); | ||
205 | seq_printf(m, "CRTC %d last wait: %d\n", | ||
206 | crtc, dev->last_vblank_wait[crtc]); | ||
207 | seq_printf(m, "CRTC %d in modeset: %d\n", | ||
208 | crtc, dev->vblank_inmodeset[crtc]); | ||
209 | } | ||
210 | mutex_unlock(&dev->struct_mutex); | ||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | /** | ||
215 | * Called when "/proc/dri/.../clients" is read. | ||
216 | * | ||
217 | */ | ||
218 | int drm_clients_info(struct seq_file *m, void *data) | ||
219 | { | ||
220 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
221 | struct drm_device *dev = node->minor->dev; | ||
222 | struct drm_file *priv; | ||
223 | |||
224 | mutex_lock(&dev->struct_mutex); | ||
225 | seq_printf(m, "a dev pid uid magic ioctls\n\n"); | ||
226 | list_for_each_entry(priv, &dev->filelist, lhead) { | ||
227 | seq_printf(m, "%c %3d %5d %5d %10u %10lu\n", | ||
228 | priv->authenticated ? 'y' : 'n', | ||
229 | priv->minor->index, | ||
230 | priv->pid, | ||
231 | priv->uid, priv->magic, priv->ioctl_count); | ||
232 | } | ||
233 | mutex_unlock(&dev->struct_mutex); | ||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | |||
238 | int drm_gem_one_name_info(int id, void *ptr, void *data) | ||
239 | { | ||
240 | struct drm_gem_object *obj = ptr; | ||
241 | struct seq_file *m = data; | ||
242 | |||
243 | seq_printf(m, "name %d size %zd\n", obj->name, obj->size); | ||
244 | |||
245 | seq_printf(m, "%6d %8zd %7d %8d\n", | ||
246 | obj->name, obj->size, | ||
247 | atomic_read(&obj->handlecount.refcount), | ||
248 | atomic_read(&obj->refcount.refcount)); | ||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | int drm_gem_name_info(struct seq_file *m, void *data) | ||
253 | { | ||
254 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
255 | struct drm_device *dev = node->minor->dev; | ||
256 | |||
257 | seq_printf(m, " name size handles refcount\n"); | ||
258 | idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m); | ||
259 | return 0; | ||
260 | } | ||
261 | |||
262 | int drm_gem_object_info(struct seq_file *m, void* data) | ||
263 | { | ||
264 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
265 | struct drm_device *dev = node->minor->dev; | ||
266 | |||
267 | seq_printf(m, "%d objects\n", atomic_read(&dev->object_count)); | ||
268 | seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory)); | ||
269 | seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count)); | ||
270 | seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory)); | ||
271 | seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory)); | ||
272 | seq_printf(m, "%d gtt total\n", dev->gtt_total); | ||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | #if DRM_DEBUG_CODE | ||
277 | |||
278 | int drm_vma_info(struct seq_file *m, void *data) | ||
279 | { | ||
280 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
281 | struct drm_device *dev = node->minor->dev; | ||
282 | struct drm_vma_entry *pt; | ||
283 | struct vm_area_struct *vma; | ||
284 | #if defined(__i386__) | ||
285 | unsigned int pgprot; | ||
286 | #endif | ||
287 | |||
288 | mutex_lock(&dev->struct_mutex); | ||
289 | seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08lx\n", | ||
290 | atomic_read(&dev->vma_count), | ||
291 | high_memory, virt_to_phys(high_memory)); | ||
292 | |||
293 | list_for_each_entry(pt, &dev->vmalist, head) { | ||
294 | vma = pt->vma; | ||
295 | if (!vma) | ||
296 | continue; | ||
297 | seq_printf(m, | ||
298 | "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000", | ||
299 | pt->pid, vma->vm_start, vma->vm_end, | ||
300 | vma->vm_flags & VM_READ ? 'r' : '-', | ||
301 | vma->vm_flags & VM_WRITE ? 'w' : '-', | ||
302 | vma->vm_flags & VM_EXEC ? 'x' : '-', | ||
303 | vma->vm_flags & VM_MAYSHARE ? 's' : 'p', | ||
304 | vma->vm_flags & VM_LOCKED ? 'l' : '-', | ||
305 | vma->vm_flags & VM_IO ? 'i' : '-', | ||
306 | vma->vm_pgoff); | ||
307 | |||
308 | #if defined(__i386__) | ||
309 | pgprot = pgprot_val(vma->vm_page_prot); | ||
310 | seq_printf(m, " %c%c%c%c%c%c%c%c%c", | ||
311 | pgprot & _PAGE_PRESENT ? 'p' : '-', | ||
312 | pgprot & _PAGE_RW ? 'w' : 'r', | ||
313 | pgprot & _PAGE_USER ? 'u' : 's', | ||
314 | pgprot & _PAGE_PWT ? 't' : 'b', | ||
315 | pgprot & _PAGE_PCD ? 'u' : 'c', | ||
316 | pgprot & _PAGE_ACCESSED ? 'a' : '-', | ||
317 | pgprot & _PAGE_DIRTY ? 'd' : '-', | ||
318 | pgprot & _PAGE_PSE ? 'm' : 'k', | ||
319 | pgprot & _PAGE_GLOBAL ? 'g' : 'l'); | ||
320 | #endif | ||
321 | seq_printf(m, "\n"); | ||
322 | } | ||
323 | mutex_unlock(&dev->struct_mutex); | ||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | #endif | ||
328 | |||
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c index 8df849f66830..9b3c5af61e98 100644 --- a/drivers/gpu/drm/drm_proc.c +++ b/drivers/gpu/drm/drm_proc.c | |||
@@ -37,697 +37,196 @@ | |||
37 | * OTHER DEALINGS IN THE SOFTWARE. | 37 | * OTHER DEALINGS IN THE SOFTWARE. |
38 | */ | 38 | */ |
39 | 39 | ||
40 | #include <linux/seq_file.h> | ||
40 | #include "drmP.h" | 41 | #include "drmP.h" |
41 | 42 | ||
42 | static int drm_name_info(char *buf, char **start, off_t offset, | 43 | |
43 | int request, int *eof, void *data); | 44 | /*************************************************** |
44 | static int drm_vm_info(char *buf, char **start, off_t offset, | 45 | * Initialization, etc. |
45 | int request, int *eof, void *data); | 46 | **************************************************/ |
46 | static int drm_clients_info(char *buf, char **start, off_t offset, | ||
47 | int request, int *eof, void *data); | ||
48 | static int drm_queues_info(char *buf, char **start, off_t offset, | ||
49 | int request, int *eof, void *data); | ||
50 | static int drm_bufs_info(char *buf, char **start, off_t offset, | ||
51 | int request, int *eof, void *data); | ||
52 | static int drm_vblank_info(char *buf, char **start, off_t offset, | ||
53 | int request, int *eof, void *data); | ||
54 | static int drm_gem_name_info(char *buf, char **start, off_t offset, | ||
55 | int request, int *eof, void *data); | ||
56 | static int drm_gem_object_info(char *buf, char **start, off_t offset, | ||
57 | int request, int *eof, void *data); | ||
58 | #if DRM_DEBUG_CODE | ||
59 | static int drm_vma_info(char *buf, char **start, off_t offset, | ||
60 | int request, int *eof, void *data); | ||
61 | #endif | ||
62 | 47 | ||
63 | /** | 48 | /** |
64 | * Proc file list. | 49 | * Proc file list. |
65 | */ | 50 | */ |
66 | static struct drm_proc_list { | 51 | static struct drm_info_list drm_proc_list[] = { |
67 | const char *name; /**< file name */ | ||
68 | int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/ | ||
69 | u32 driver_features; /**< Required driver features for this entry */ | ||
70 | } drm_proc_list[] = { | ||
71 | {"name", drm_name_info, 0}, | 52 | {"name", drm_name_info, 0}, |
72 | {"mem", drm_mem_info, 0}, | ||
73 | {"vm", drm_vm_info, 0}, | 53 | {"vm", drm_vm_info, 0}, |
74 | {"clients", drm_clients_info, 0}, | 54 | {"clients", drm_clients_info, 0}, |
75 | {"queues", drm_queues_info, 0}, | 55 | {"queues", drm_queues_info, 0}, |
76 | {"bufs", drm_bufs_info, 0}, | 56 | {"bufs", drm_bufs_info, 0}, |
77 | {"vblank", drm_vblank_info, 0}, | ||
78 | {"gem_names", drm_gem_name_info, DRIVER_GEM}, | 57 | {"gem_names", drm_gem_name_info, DRIVER_GEM}, |
79 | {"gem_objects", drm_gem_object_info, DRIVER_GEM}, | 58 | {"gem_objects", drm_gem_object_info, DRIVER_GEM}, |
80 | #if DRM_DEBUG_CODE | 59 | #if DRM_DEBUG_CODE |
81 | {"vma", drm_vma_info}, | 60 | {"vma", drm_vma_info, 0}, |
82 | #endif | 61 | #endif |
83 | }; | 62 | }; |
84 | |||
85 | #define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list) | 63 | #define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list) |
86 | 64 | ||
65 | static int drm_proc_open(struct inode *inode, struct file *file) | ||
66 | { | ||
67 | struct drm_info_node* node = PDE(inode)->data; | ||
68 | |||
69 | return single_open(file, node->info_ent->show, node); | ||
70 | } | ||
71 | |||
72 | static const struct file_operations drm_proc_fops = { | ||
73 | .owner = THIS_MODULE, | ||
74 | .open = drm_proc_open, | ||
75 | .read = seq_read, | ||
76 | .llseek = seq_lseek, | ||
77 | .release = single_release, | ||
78 | }; | ||
79 | |||
80 | |||
87 | /** | 81 | /** |
88 | * Initialize the DRI proc filesystem for a device. | 82 | * Initialize a given set of proc files for a device |
89 | * | 83 | * |
90 | * \param dev DRM device. | 84 | * \param files The array of files to create |
91 | * \param minor device minor number. | 85 | * \param count The number of files given |
92 | * \param root DRI proc dir entry. | 86 | * \param root DRI proc dir entry. |
93 | * \param dev_root resulting DRI device proc dir entry. | 87 | * \param minor device minor number |
94 | * \return root entry pointer on success, or NULL on failure. | 88 | * \return Zero on success, non-zero on failure |
95 | * | 89 | * |
96 | * Create the DRI proc root entry "/proc/dri", the device proc root entry | 90 | * Create a given set of proc files represented by an array of |
97 | * "/proc/dri/%minor%/", and each entry in proc_list as | 91 | * gdm_proc_lists in the given root directory. |
98 | * "/proc/dri/%minor%/%name%". | ||
99 | */ | 92 | */ |
100 | int drm_proc_init(struct drm_minor *minor, int minor_id, | 93 | int drm_proc_create_files(struct drm_info_list *files, int count, |
101 | struct proc_dir_entry *root) | 94 | struct proc_dir_entry *root, struct drm_minor *minor) |
102 | { | 95 | { |
103 | struct drm_device *dev = minor->dev; | 96 | struct drm_device *dev = minor->dev; |
104 | struct proc_dir_entry *ent; | 97 | struct proc_dir_entry *ent; |
105 | int i, j, ret; | 98 | struct drm_info_node *tmp; |
106 | char name[64]; | 99 | char name[64]; |
100 | int i, ret; | ||
107 | 101 | ||
108 | sprintf(name, "%d", minor_id); | 102 | for (i = 0; i < count; i++) { |
109 | minor->dev_root = proc_mkdir(name, root); | 103 | u32 features = files[i].driver_features; |
110 | if (!minor->dev_root) { | ||
111 | DRM_ERROR("Cannot create /proc/dri/%s\n", name); | ||
112 | return -1; | ||
113 | } | ||
114 | |||
115 | for (i = 0; i < DRM_PROC_ENTRIES; i++) { | ||
116 | u32 features = drm_proc_list[i].driver_features; | ||
117 | 104 | ||
118 | if (features != 0 && | 105 | if (features != 0 && |
119 | (dev->driver->driver_features & features) != features) | 106 | (dev->driver->driver_features & features) != features) |
120 | continue; | 107 | continue; |
121 | 108 | ||
122 | ent = create_proc_entry(drm_proc_list[i].name, | 109 | tmp = drm_alloc(sizeof(struct drm_info_node), _DRM_DRIVER); |
123 | S_IFREG | S_IRUGO, minor->dev_root); | 110 | ent = create_proc_entry(files[i].name, S_IFREG | S_IRUGO, root); |
124 | if (!ent) { | 111 | if (!ent) { |
125 | DRM_ERROR("Cannot create /proc/dri/%s/%s\n", | 112 | DRM_ERROR("Cannot create /proc/dri/%s/%s\n", |
126 | name, drm_proc_list[i].name); | 113 | name, files[i].name); |
114 | drm_free(tmp, sizeof(struct drm_info_node), | ||
115 | _DRM_DRIVER); | ||
127 | ret = -1; | 116 | ret = -1; |
128 | goto fail; | 117 | goto fail; |
129 | } | 118 | } |
130 | ent->read_proc = drm_proc_list[i].f; | ||
131 | ent->data = minor; | ||
132 | } | ||
133 | 119 | ||
134 | if (dev->driver->proc_init) { | 120 | ent->proc_fops = &drm_proc_fops; |
135 | ret = dev->driver->proc_init(minor); | 121 | ent->data = tmp; |
136 | if (ret) { | 122 | tmp->minor = minor; |
137 | DRM_ERROR("DRM: Driver failed to initialize " | 123 | tmp->info_ent = &files[i]; |
138 | "/proc/dri.\n"); | 124 | list_add(&(tmp->list), &(minor->proc_nodes.list)); |
139 | goto fail; | ||
140 | } | ||
141 | } | 125 | } |
142 | |||
143 | return 0; | 126 | return 0; |
144 | fail: | ||
145 | 127 | ||
146 | for (j = 0; j < i; j++) | 128 | fail: |
147 | remove_proc_entry(drm_proc_list[i].name, | 129 | for (i = 0; i < count; i++) |
148 | minor->dev_root); | 130 | remove_proc_entry(drm_proc_list[i].name, minor->proc_root); |
149 | remove_proc_entry(name, root); | ||
150 | minor->dev_root = NULL; | ||
151 | return ret; | 131 | return ret; |
152 | } | 132 | } |
153 | 133 | ||
154 | /** | 134 | /** |
155 | * Cleanup the proc filesystem resources. | 135 | * Initialize the DRI proc filesystem for a device |
156 | * | 136 | * |
157 | * \param minor device minor number. | 137 | * \param dev DRM device |
138 | * \param minor device minor number | ||
158 | * \param root DRI proc dir entry. | 139 | * \param root DRI proc dir entry. |
159 | * \param dev_root DRI device proc dir entry. | 140 | * \param dev_root resulting DRI device proc dir entry. |
160 | * \return always zero. | 141 | * \return root entry pointer on success, or NULL on failure. |
161 | * | 142 | * |
162 | * Remove all proc entries created by proc_init(). | 143 | * Create the DRI proc root entry "/proc/dri", the device proc root entry |
144 | * "/proc/dri/%minor%/", and each entry in proc_list as | ||
145 | * "/proc/dri/%minor%/%name%". | ||
163 | */ | 146 | */ |
164 | int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) | 147 | int drm_proc_init(struct drm_minor *minor, int minor_id, |
148 | struct proc_dir_entry *root) | ||
165 | { | 149 | { |
166 | struct drm_device *dev = minor->dev; | 150 | struct drm_device *dev = minor->dev; |
167 | int i; | ||
168 | char name[64]; | 151 | char name[64]; |
152 | int ret; | ||
169 | 153 | ||
170 | if (!root || !minor->dev_root) | 154 | INIT_LIST_HEAD(&minor->proc_nodes.list); |
171 | return 0; | 155 | sprintf(name, "%d", minor_id); |
172 | 156 | minor->proc_root = proc_mkdir(name, root); | |
173 | if (dev->driver->proc_cleanup) | 157 | if (!minor->proc_root) { |
174 | dev->driver->proc_cleanup(minor); | 158 | DRM_ERROR("Cannot create /proc/dri/%s\n", name); |
175 | 159 | return -1; | |
176 | for (i = 0; i < DRM_PROC_ENTRIES; i++) | ||
177 | remove_proc_entry(drm_proc_list[i].name, minor->dev_root); | ||
178 | sprintf(name, "%d", minor->index); | ||
179 | remove_proc_entry(name, root); | ||
180 | |||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | /** | ||
185 | * Called when "/proc/dri/.../name" is read. | ||
186 | * | ||
187 | * \param buf output buffer. | ||
188 | * \param start start of output data. | ||
189 | * \param offset requested start offset. | ||
190 | * \param request requested number of bytes. | ||
191 | * \param eof whether there is no more data to return. | ||
192 | * \param data private data. | ||
193 | * \return number of written bytes. | ||
194 | * | ||
195 | * Prints the device name together with the bus id if available. | ||
196 | */ | ||
197 | static int drm_name_info(char *buf, char **start, off_t offset, int request, | ||
198 | int *eof, void *data) | ||
199 | { | ||
200 | struct drm_minor *minor = (struct drm_minor *) data; | ||
201 | struct drm_master *master = minor->master; | ||
202 | struct drm_device *dev = minor->dev; | ||
203 | int len = 0; | ||
204 | |||
205 | if (offset > DRM_PROC_LIMIT) { | ||
206 | *eof = 1; | ||
207 | return 0; | ||
208 | } | 160 | } |
209 | 161 | ||
210 | if (!master) | 162 | ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES, |
211 | return 0; | 163 | minor->proc_root, minor); |
212 | 164 | if (ret) { | |
213 | *start = &buf[offset]; | 165 | remove_proc_entry(name, root); |
214 | *eof = 0; | 166 | minor->proc_root = NULL; |
215 | 167 | DRM_ERROR("Failed to create core drm proc files\n"); | |
216 | if (master->unique) { | 168 | return ret; |
217 | DRM_PROC_PRINT("%s %s %s\n", | ||
218 | dev->driver->pci_driver.name, | ||
219 | pci_name(dev->pdev), master->unique); | ||
220 | } else { | ||
221 | DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name, | ||
222 | pci_name(dev->pdev)); | ||
223 | } | 169 | } |
224 | 170 | ||
225 | if (len > request + offset) | 171 | if (dev->driver->proc_init) { |
226 | return request; | 172 | ret = dev->driver->proc_init(minor); |
227 | *eof = 1; | 173 | if (ret) { |
228 | return len - offset; | 174 | DRM_ERROR("DRM: Driver failed to initialize " |
229 | } | 175 | "/proc/dri.\n"); |
230 | 176 | return ret; | |
231 | /** | ||
232 | * Called when "/proc/dri/.../vm" is read. | ||
233 | * | ||
234 | * \param buf output buffer. | ||
235 | * \param start start of output data. | ||
236 | * \param offset requested start offset. | ||
237 | * \param request requested number of bytes. | ||
238 | * \param eof whether there is no more data to return. | ||
239 | * \param data private data. | ||
240 | * \return number of written bytes. | ||
241 | * | ||
242 | * Prints information about all mappings in drm_device::maplist. | ||
243 | */ | ||
244 | static int drm__vm_info(char *buf, char **start, off_t offset, int request, | ||
245 | int *eof, void *data) | ||
246 | { | ||
247 | struct drm_minor *minor = (struct drm_minor *) data; | ||
248 | struct drm_device *dev = minor->dev; | ||
249 | int len = 0; | ||
250 | struct drm_map *map; | ||
251 | struct drm_map_list *r_list; | ||
252 | |||
253 | /* Hardcoded from _DRM_FRAME_BUFFER, | ||
254 | _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and | ||
255 | _DRM_SCATTER_GATHER and _DRM_CONSISTENT */ | ||
256 | const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" }; | ||
257 | const char *type; | ||
258 | int i; | ||
259 | |||
260 | if (offset > DRM_PROC_LIMIT) { | ||
261 | *eof = 1; | ||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | *start = &buf[offset]; | ||
266 | *eof = 0; | ||
267 | |||
268 | DRM_PROC_PRINT("slot offset size type flags " | ||
269 | "address mtrr\n\n"); | ||
270 | i = 0; | ||
271 | list_for_each_entry(r_list, &dev->maplist, head) { | ||
272 | map = r_list->map; | ||
273 | if (!map) | ||
274 | continue; | ||
275 | if (map->type < 0 || map->type > 5) | ||
276 | type = "??"; | ||
277 | else | ||
278 | type = types[map->type]; | ||
279 | DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", | ||
280 | i, | ||
281 | map->offset, | ||
282 | map->size, type, map->flags, | ||
283 | (unsigned long) r_list->user_token); | ||
284 | if (map->mtrr < 0) { | ||
285 | DRM_PROC_PRINT("none\n"); | ||
286 | } else { | ||
287 | DRM_PROC_PRINT("%4d\n", map->mtrr); | ||
288 | } | 177 | } |
289 | i++; | ||
290 | } | ||
291 | |||
292 | if (len > request + offset) | ||
293 | return request; | ||
294 | *eof = 1; | ||
295 | return len - offset; | ||
296 | } | ||
297 | |||
298 | /** | ||
299 | * Simply calls _vm_info() while holding the drm_device::struct_mutex lock. | ||
300 | */ | ||
301 | static int drm_vm_info(char *buf, char **start, off_t offset, int request, | ||
302 | int *eof, void *data) | ||
303 | { | ||
304 | struct drm_minor *minor = (struct drm_minor *) data; | ||
305 | struct drm_device *dev = minor->dev; | ||
306 | int ret; | ||
307 | |||
308 | mutex_lock(&dev->struct_mutex); | ||
309 | ret = drm__vm_info(buf, start, offset, request, eof, data); | ||
310 | mutex_unlock(&dev->struct_mutex); | ||
311 | return ret; | ||
312 | } | ||
313 | |||
314 | /** | ||
315 | * Called when "/proc/dri/.../queues" is read. | ||
316 | * | ||
317 | * \param buf output buffer. | ||
318 | * \param start start of output data. | ||
319 | * \param offset requested start offset. | ||
320 | * \param request requested number of bytes. | ||
321 | * \param eof whether there is no more data to return. | ||
322 | * \param data private data. | ||
323 | * \return number of written bytes. | ||
324 | */ | ||
325 | static int drm__queues_info(char *buf, char **start, off_t offset, | ||
326 | int request, int *eof, void *data) | ||
327 | { | ||
328 | struct drm_minor *minor = (struct drm_minor *) data; | ||
329 | struct drm_device *dev = minor->dev; | ||
330 | int len = 0; | ||
331 | int i; | ||
332 | struct drm_queue *q; | ||
333 | |||
334 | if (offset > DRM_PROC_LIMIT) { | ||
335 | *eof = 1; | ||
336 | return 0; | ||
337 | } | 178 | } |
338 | 179 | return 0; | |
339 | *start = &buf[offset]; | ||
340 | *eof = 0; | ||
341 | |||
342 | DRM_PROC_PRINT(" ctx/flags use fin" | ||
343 | " blk/rw/rwf wait flushed queued" | ||
344 | " locks\n\n"); | ||
345 | for (i = 0; i < dev->queue_count; i++) { | ||
346 | q = dev->queuelist[i]; | ||
347 | atomic_inc(&q->use_count); | ||
348 | DRM_PROC_PRINT_RET(atomic_dec(&q->use_count), | ||
349 | "%5d/0x%03x %5d %5d" | ||
350 | " %5d/%c%c/%c%c%c %5Zd\n", | ||
351 | i, | ||
352 | q->flags, | ||
353 | atomic_read(&q->use_count), | ||
354 | atomic_read(&q->finalization), | ||
355 | atomic_read(&q->block_count), | ||
356 | atomic_read(&q->block_read) ? 'r' : '-', | ||
357 | atomic_read(&q->block_write) ? 'w' : '-', | ||
358 | waitqueue_active(&q->read_queue) ? 'r' : '-', | ||
359 | waitqueue_active(&q-> | ||
360 | write_queue) ? 'w' : '-', | ||
361 | waitqueue_active(&q-> | ||
362 | flush_queue) ? 'f' : '-', | ||
363 | DRM_BUFCOUNT(&q->waitlist)); | ||
364 | atomic_dec(&q->use_count); | ||
365 | } | ||
366 | |||
367 | if (len > request + offset) | ||
368 | return request; | ||
369 | *eof = 1; | ||
370 | return len - offset; | ||
371 | } | ||
372 | |||
373 | /** | ||
374 | * Simply calls _queues_info() while holding the drm_device::struct_mutex lock. | ||
375 | */ | ||
376 | static int drm_queues_info(char *buf, char **start, off_t offset, int request, | ||
377 | int *eof, void *data) | ||
378 | { | ||
379 | struct drm_minor *minor = (struct drm_minor *) data; | ||
380 | struct drm_device *dev = minor->dev; | ||
381 | int ret; | ||
382 | |||
383 | mutex_lock(&dev->struct_mutex); | ||
384 | ret = drm__queues_info(buf, start, offset, request, eof, data); | ||
385 | mutex_unlock(&dev->struct_mutex); | ||
386 | return ret; | ||
387 | } | 180 | } |
388 | 181 | ||
389 | /** | 182 | int drm_proc_remove_files(struct drm_info_list *files, int count, |
390 | * Called when "/proc/dri/.../bufs" is read. | 183 | struct drm_minor *minor) |
391 | * | ||
392 | * \param buf output buffer. | ||
393 | * \param start start of output data. | ||
394 | * \param offset requested start offset. | ||
395 | * \param request requested number of bytes. | ||
396 | * \param eof whether there is no more data to return. | ||
397 | * \param data private data. | ||
398 | * \return number of written bytes. | ||
399 | */ | ||
400 | static int drm__bufs_info(char *buf, char **start, off_t offset, int request, | ||
401 | int *eof, void *data) | ||
402 | { | 184 | { |
403 | struct drm_minor *minor = (struct drm_minor *) data; | 185 | struct list_head *pos, *q; |
404 | struct drm_device *dev = minor->dev; | 186 | struct drm_info_node *tmp; |
405 | int len = 0; | ||
406 | struct drm_device_dma *dma = dev->dma; | ||
407 | int i; | 187 | int i; |
408 | 188 | ||
409 | if (!dma || offset > DRM_PROC_LIMIT) { | 189 | for (i = 0; i < count; i++) { |
410 | *eof = 1; | 190 | list_for_each_safe(pos, q, &minor->proc_nodes.list) { |
411 | return 0; | 191 | tmp = list_entry(pos, struct drm_info_node, list); |
412 | } | 192 | if (tmp->info_ent == &files[i]) { |
413 | 193 | remove_proc_entry(files[i].name, | |
414 | *start = &buf[offset]; | 194 | minor->proc_root); |
415 | *eof = 0; | 195 | list_del(pos); |
416 | 196 | drm_free(tmp, sizeof(struct drm_info_node), | |
417 | DRM_PROC_PRINT(" o size count free segs pages kB\n\n"); | 197 | _DRM_DRIVER); |
418 | for (i = 0; i <= DRM_MAX_ORDER; i++) { | 198 | } |
419 | if (dma->bufs[i].buf_count) | 199 | } |
420 | DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n", | ||
421 | i, | ||
422 | dma->bufs[i].buf_size, | ||
423 | dma->bufs[i].buf_count, | ||
424 | atomic_read(&dma->bufs[i] | ||
425 | .freelist.count), | ||
426 | dma->bufs[i].seg_count, | ||
427 | dma->bufs[i].seg_count | ||
428 | * (1 << dma->bufs[i].page_order), | ||
429 | (dma->bufs[i].seg_count | ||
430 | * (1 << dma->bufs[i].page_order)) | ||
431 | * PAGE_SIZE / 1024); | ||
432 | } | ||
433 | DRM_PROC_PRINT("\n"); | ||
434 | for (i = 0; i < dma->buf_count; i++) { | ||
435 | if (i && !(i % 32)) | ||
436 | DRM_PROC_PRINT("\n"); | ||
437 | DRM_PROC_PRINT(" %d", dma->buflist[i]->list); | ||
438 | } | 200 | } |
439 | DRM_PROC_PRINT("\n"); | 201 | return 0; |
440 | |||
441 | if (len > request + offset) | ||
442 | return request; | ||
443 | *eof = 1; | ||
444 | return len - offset; | ||
445 | } | ||
446 | |||
447 | /** | ||
448 | * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock. | ||
449 | */ | ||
450 | static int drm_bufs_info(char *buf, char **start, off_t offset, int request, | ||
451 | int *eof, void *data) | ||
452 | { | ||
453 | struct drm_minor *minor = (struct drm_minor *) data; | ||
454 | struct drm_device *dev = minor->dev; | ||
455 | int ret; | ||
456 | |||
457 | mutex_lock(&dev->struct_mutex); | ||
458 | ret = drm__bufs_info(buf, start, offset, request, eof, data); | ||
459 | mutex_unlock(&dev->struct_mutex); | ||
460 | return ret; | ||
461 | } | 202 | } |
462 | 203 | ||
463 | /** | 204 | /** |
464 | * Called when "/proc/dri/.../vblank" is read. | 205 | * Cleanup the proc filesystem resources. |
465 | * | 206 | * |
466 | * \param buf output buffer. | 207 | * \param minor device minor number. |
467 | * \param start start of output data. | 208 | * \param root DRI proc dir entry. |
468 | * \param offset requested start offset. | 209 | * \param dev_root DRI device proc dir entry. |
469 | * \param request requested number of bytes. | 210 | * \return always zero. |
470 | * \param eof whether there is no more data to return. | ||
471 | * \param data private data. | ||
472 | * \return number of written bytes. | ||
473 | */ | ||
474 | static int drm__vblank_info(char *buf, char **start, off_t offset, int request, | ||
475 | int *eof, void *data) | ||
476 | { | ||
477 | struct drm_minor *minor = (struct drm_minor *) data; | ||
478 | struct drm_device *dev = minor->dev; | ||
479 | int len = 0; | ||
480 | int crtc; | ||
481 | |||
482 | if (offset > DRM_PROC_LIMIT) { | ||
483 | *eof = 1; | ||
484 | return 0; | ||
485 | } | ||
486 | |||
487 | *start = &buf[offset]; | ||
488 | *eof = 0; | ||
489 | |||
490 | for (crtc = 0; crtc < dev->num_crtcs; crtc++) { | ||
491 | DRM_PROC_PRINT("CRTC %d enable: %d\n", | ||
492 | crtc, atomic_read(&dev->vblank_refcount[crtc])); | ||
493 | DRM_PROC_PRINT("CRTC %d counter: %d\n", | ||
494 | crtc, drm_vblank_count(dev, crtc)); | ||
495 | DRM_PROC_PRINT("CRTC %d last wait: %d\n", | ||
496 | crtc, dev->last_vblank_wait[crtc]); | ||
497 | DRM_PROC_PRINT("CRTC %d in modeset: %d\n", | ||
498 | crtc, dev->vblank_inmodeset[crtc]); | ||
499 | } | ||
500 | |||
501 | if (len > request + offset) | ||
502 | return request; | ||
503 | *eof = 1; | ||
504 | return len - offset; | ||
505 | } | ||
506 | |||
507 | /** | ||
508 | * Simply calls _vblank_info() while holding the drm_device::struct_mutex lock. | ||
509 | */ | ||
510 | static int drm_vblank_info(char *buf, char **start, off_t offset, int request, | ||
511 | int *eof, void *data) | ||
512 | { | ||
513 | struct drm_minor *minor = (struct drm_minor *) data; | ||
514 | struct drm_device *dev = minor->dev; | ||
515 | int ret; | ||
516 | |||
517 | mutex_lock(&dev->struct_mutex); | ||
518 | ret = drm__vblank_info(buf, start, offset, request, eof, data); | ||
519 | mutex_unlock(&dev->struct_mutex); | ||
520 | return ret; | ||
521 | } | ||
522 | |||
523 | /** | ||
524 | * Called when "/proc/dri/.../clients" is read. | ||
525 | * | 211 | * |
526 | * \param buf output buffer. | 212 | * Remove all proc entries created by proc_init(). |
527 | * \param start start of output data. | ||
528 | * \param offset requested start offset. | ||
529 | * \param request requested number of bytes. | ||
530 | * \param eof whether there is no more data to return. | ||
531 | * \param data private data. | ||
532 | * \return number of written bytes. | ||
533 | */ | 213 | */ |
534 | static int drm__clients_info(char *buf, char **start, off_t offset, | 214 | int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) |
535 | int request, int *eof, void *data) | ||
536 | { | 215 | { |
537 | struct drm_minor *minor = (struct drm_minor *) data; | ||
538 | struct drm_device *dev = minor->dev; | 216 | struct drm_device *dev = minor->dev; |
539 | int len = 0; | 217 | char name[64]; |
540 | struct drm_file *priv; | ||
541 | 218 | ||
542 | if (offset > DRM_PROC_LIMIT) { | 219 | if (!root || !minor->proc_root) |
543 | *eof = 1; | ||
544 | return 0; | 220 | return 0; |
545 | } | ||
546 | |||
547 | *start = &buf[offset]; | ||
548 | *eof = 0; | ||
549 | |||
550 | DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n"); | ||
551 | list_for_each_entry(priv, &dev->filelist, lhead) { | ||
552 | DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n", | ||
553 | priv->authenticated ? 'y' : 'n', | ||
554 | priv->minor->index, | ||
555 | priv->pid, | ||
556 | priv->uid, priv->magic, priv->ioctl_count); | ||
557 | } | ||
558 | 221 | ||
559 | if (len > request + offset) | 222 | if (dev->driver->proc_cleanup) |
560 | return request; | 223 | dev->driver->proc_cleanup(minor); |
561 | *eof = 1; | ||
562 | return len - offset; | ||
563 | } | ||
564 | |||
565 | /** | ||
566 | * Simply calls _clients_info() while holding the drm_device::struct_mutex lock. | ||
567 | */ | ||
568 | static int drm_clients_info(char *buf, char **start, off_t offset, | ||
569 | int request, int *eof, void *data) | ||
570 | { | ||
571 | struct drm_minor *minor = (struct drm_minor *) data; | ||
572 | struct drm_device *dev = minor->dev; | ||
573 | int ret; | ||
574 | |||
575 | mutex_lock(&dev->struct_mutex); | ||
576 | ret = drm__clients_info(buf, start, offset, request, eof, data); | ||
577 | mutex_unlock(&dev->struct_mutex); | ||
578 | return ret; | ||
579 | } | ||
580 | |||
581 | struct drm_gem_name_info_data { | ||
582 | int len; | ||
583 | char *buf; | ||
584 | int eof; | ||
585 | }; | ||
586 | 224 | ||
587 | static int drm_gem_one_name_info(int id, void *ptr, void *data) | 225 | drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor); |
588 | { | ||
589 | struct drm_gem_object *obj = ptr; | ||
590 | struct drm_gem_name_info_data *nid = data; | ||
591 | 226 | ||
592 | DRM_INFO("name %d size %zd\n", obj->name, obj->size); | 227 | sprintf(name, "%d", minor->index); |
593 | if (nid->eof) | 228 | remove_proc_entry(name, root); |
594 | return 0; | ||
595 | 229 | ||
596 | nid->len += sprintf(&nid->buf[nid->len], | ||
597 | "%6d %8zd %7d %8d\n", | ||
598 | obj->name, obj->size, | ||
599 | atomic_read(&obj->handlecount.refcount), | ||
600 | atomic_read(&obj->refcount.refcount)); | ||
601 | if (nid->len > DRM_PROC_LIMIT) { | ||
602 | nid->eof = 1; | ||
603 | return 0; | ||
604 | } | ||
605 | return 0; | 230 | return 0; |
606 | } | 231 | } |
607 | 232 | ||
608 | static int drm_gem_name_info(char *buf, char **start, off_t offset, | ||
609 | int request, int *eof, void *data) | ||
610 | { | ||
611 | struct drm_minor *minor = (struct drm_minor *) data; | ||
612 | struct drm_device *dev = minor->dev; | ||
613 | struct drm_gem_name_info_data nid; | ||
614 | |||
615 | if (offset > DRM_PROC_LIMIT) { | ||
616 | *eof = 1; | ||
617 | return 0; | ||
618 | } | ||
619 | |||
620 | nid.len = sprintf(buf, " name size handles refcount\n"); | ||
621 | nid.buf = buf; | ||
622 | nid.eof = 0; | ||
623 | idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid); | ||
624 | |||
625 | *start = &buf[offset]; | ||
626 | *eof = 0; | ||
627 | if (nid.len > request + offset) | ||
628 | return request; | ||
629 | *eof = 1; | ||
630 | return nid.len - offset; | ||
631 | } | ||
632 | |||
633 | static int drm_gem_object_info(char *buf, char **start, off_t offset, | ||
634 | int request, int *eof, void *data) | ||
635 | { | ||
636 | struct drm_minor *minor = (struct drm_minor *) data; | ||
637 | struct drm_device *dev = minor->dev; | ||
638 | int len = 0; | ||
639 | |||
640 | if (offset > DRM_PROC_LIMIT) { | ||
641 | *eof = 1; | ||
642 | return 0; | ||
643 | } | ||
644 | |||
645 | *start = &buf[offset]; | ||
646 | *eof = 0; | ||
647 | DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count)); | ||
648 | DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory)); | ||
649 | DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count)); | ||
650 | DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory)); | ||
651 | DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory)); | ||
652 | DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total); | ||
653 | if (len > request + offset) | ||
654 | return request; | ||
655 | *eof = 1; | ||
656 | return len - offset; | ||
657 | } | ||
658 | |||
659 | #if DRM_DEBUG_CODE | ||
660 | |||
661 | static int drm__vma_info(char *buf, char **start, off_t offset, int request, | ||
662 | int *eof, void *data) | ||
663 | { | ||
664 | struct drm_minor *minor = (struct drm_minor *) data; | ||
665 | struct drm_device *dev = minor->dev; | ||
666 | int len = 0; | ||
667 | struct drm_vma_entry *pt; | ||
668 | struct vm_area_struct *vma; | ||
669 | #if defined(__i386__) | ||
670 | unsigned int pgprot; | ||
671 | #endif | ||
672 | |||
673 | if (offset > DRM_PROC_LIMIT) { | ||
674 | *eof = 1; | ||
675 | return 0; | ||
676 | } | ||
677 | |||
678 | *start = &buf[offset]; | ||
679 | *eof = 0; | ||
680 | |||
681 | DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n", | ||
682 | atomic_read(&dev->vma_count), | ||
683 | high_memory, virt_to_phys(high_memory)); | ||
684 | list_for_each_entry(pt, &dev->vmalist, head) { | ||
685 | if (!(vma = pt->vma)) | ||
686 | continue; | ||
687 | DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000", | ||
688 | pt->pid, | ||
689 | vma->vm_start, | ||
690 | vma->vm_end, | ||
691 | vma->vm_flags & VM_READ ? 'r' : '-', | ||
692 | vma->vm_flags & VM_WRITE ? 'w' : '-', | ||
693 | vma->vm_flags & VM_EXEC ? 'x' : '-', | ||
694 | vma->vm_flags & VM_MAYSHARE ? 's' : 'p', | ||
695 | vma->vm_flags & VM_LOCKED ? 'l' : '-', | ||
696 | vma->vm_flags & VM_IO ? 'i' : '-', | ||
697 | vma->vm_pgoff); | ||
698 | |||
699 | #if defined(__i386__) | ||
700 | pgprot = pgprot_val(vma->vm_page_prot); | ||
701 | DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c", | ||
702 | pgprot & _PAGE_PRESENT ? 'p' : '-', | ||
703 | pgprot & _PAGE_RW ? 'w' : 'r', | ||
704 | pgprot & _PAGE_USER ? 'u' : 's', | ||
705 | pgprot & _PAGE_PWT ? 't' : 'b', | ||
706 | pgprot & _PAGE_PCD ? 'u' : 'c', | ||
707 | pgprot & _PAGE_ACCESSED ? 'a' : '-', | ||
708 | pgprot & _PAGE_DIRTY ? 'd' : '-', | ||
709 | pgprot & _PAGE_PSE ? 'm' : 'k', | ||
710 | pgprot & _PAGE_GLOBAL ? 'g' : 'l'); | ||
711 | #endif | ||
712 | DRM_PROC_PRINT("\n"); | ||
713 | } | ||
714 | |||
715 | if (len > request + offset) | ||
716 | return request; | ||
717 | *eof = 1; | ||
718 | return len - offset; | ||
719 | } | ||
720 | |||
721 | static int drm_vma_info(char *buf, char **start, off_t offset, int request, | ||
722 | int *eof, void *data) | ||
723 | { | ||
724 | struct drm_minor *minor = (struct drm_minor *) data; | ||
725 | struct drm_device *dev = minor->dev; | ||
726 | int ret; | ||
727 | |||
728 | mutex_lock(&dev->struct_mutex); | ||
729 | ret = drm__vma_info(buf, start, offset, request, eof, data); | ||
730 | mutex_unlock(&dev->struct_mutex); | ||
731 | return ret; | ||
732 | } | ||
733 | #endif | ||
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 7c8b15b22bf2..48f33be8fd0f 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
@@ -50,6 +50,7 @@ struct idr drm_minors_idr; | |||
50 | 50 | ||
51 | struct class *drm_class; | 51 | struct class *drm_class; |
52 | struct proc_dir_entry *drm_proc_root; | 52 | struct proc_dir_entry *drm_proc_root; |
53 | struct dentry *drm_debugfs_root; | ||
53 | 54 | ||
54 | static int drm_minor_get_id(struct drm_device *dev, int type) | 55 | static int drm_minor_get_id(struct drm_device *dev, int type) |
55 | { | 56 | { |
@@ -313,7 +314,15 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t | |||
313 | goto err_mem; | 314 | goto err_mem; |
314 | } | 315 | } |
315 | } else | 316 | } else |
316 | new_minor->dev_root = NULL; | 317 | new_minor->proc_root = NULL; |
318 | |||
319 | #if defined(CONFIG_DEBUG_FS) | ||
320 | ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root); | ||
321 | if (ret) { | ||
322 | DRM_ERROR("DRM: Failed to initialize /debugfs/dri.\n"); | ||
323 | goto err_g2; | ||
324 | } | ||
325 | #endif | ||
317 | 326 | ||
318 | ret = drm_sysfs_device_add(new_minor); | 327 | ret = drm_sysfs_device_add(new_minor); |
319 | if (ret) { | 328 | if (ret) { |
@@ -451,6 +460,10 @@ int drm_put_minor(struct drm_minor **minor_p) | |||
451 | 460 | ||
452 | if (minor->type == DRM_MINOR_LEGACY) | 461 | if (minor->type == DRM_MINOR_LEGACY) |
453 | drm_proc_cleanup(minor, drm_proc_root); | 462 | drm_proc_cleanup(minor, drm_proc_root); |
463 | #if defined(CONFIG_DEBUG_FS) | ||
464 | drm_debugfs_cleanup(minor); | ||
465 | #endif | ||
466 | |||
454 | drm_sysfs_device_remove(minor); | 467 | drm_sysfs_device_remove(minor); |
455 | 468 | ||
456 | idr_remove(&drm_minors_idr, minor->index); | 469 | idr_remove(&drm_minors_idr, minor->index); |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 793cba39d832..51c5a050aa73 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -7,7 +7,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | |||
7 | i915_suspend.o \ | 7 | i915_suspend.o \ |
8 | i915_gem.o \ | 8 | i915_gem.o \ |
9 | i915_gem_debug.o \ | 9 | i915_gem_debug.o \ |
10 | i915_gem_proc.o \ | 10 | i915_gem_debugfs.o \ |
11 | i915_gem_tiling.o \ | 11 | i915_gem_tiling.o \ |
12 | intel_display.o \ | 12 | intel_display.o \ |
13 | intel_crt.o \ | 13 | intel_crt.o \ |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 6d21b9e48b89..a818b377e1f7 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -41,7 +41,6 @@ | |||
41 | int i915_wait_ring(struct drm_device * dev, int n, const char *caller) | 41 | int i915_wait_ring(struct drm_device * dev, int n, const char *caller) |
42 | { | 42 | { |
43 | drm_i915_private_t *dev_priv = dev->dev_private; | 43 | drm_i915_private_t *dev_priv = dev->dev_private; |
44 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
45 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); | 44 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); |
46 | u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; | 45 | u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; |
47 | u32 last_acthd = I915_READ(acthd_reg); | 46 | u32 last_acthd = I915_READ(acthd_reg); |
@@ -58,8 +57,12 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) | |||
58 | if (ring->space >= n) | 57 | if (ring->space >= n) |
59 | return 0; | 58 | return 0; |
60 | 59 | ||
61 | if (master_priv->sarea_priv) | 60 | if (dev->primary->master) { |
62 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 61 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
62 | if (master_priv->sarea_priv) | ||
63 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | ||
64 | } | ||
65 | |||
63 | 66 | ||
64 | if (ring->head != last_head) | 67 | if (ring->head != last_head) |
65 | i = 0; | 68 | i = 0; |
@@ -356,7 +359,7 @@ static int validate_cmd(int cmd) | |||
356 | return ret; | 359 | return ret; |
357 | } | 360 | } |
358 | 361 | ||
359 | static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords) | 362 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) |
360 | { | 363 | { |
361 | drm_i915_private_t *dev_priv = dev->dev_private; | 364 | drm_i915_private_t *dev_priv = dev->dev_private; |
362 | int i; | 365 | int i; |
@@ -370,8 +373,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor | |||
370 | for (i = 0; i < dwords;) { | 373 | for (i = 0; i < dwords;) { |
371 | int cmd, sz; | 374 | int cmd, sz; |
372 | 375 | ||
373 | if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) | 376 | cmd = buffer[i]; |
374 | return -EINVAL; | ||
375 | 377 | ||
376 | if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) | 378 | if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) |
377 | return -EINVAL; | 379 | return -EINVAL; |
@@ -379,11 +381,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor | |||
379 | OUT_RING(cmd); | 381 | OUT_RING(cmd); |
380 | 382 | ||
381 | while (++i, --sz) { | 383 | while (++i, --sz) { |
382 | if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], | 384 | OUT_RING(buffer[i]); |
383 | sizeof(cmd))) { | ||
384 | return -EINVAL; | ||
385 | } | ||
386 | OUT_RING(cmd); | ||
387 | } | 385 | } |
388 | } | 386 | } |
389 | 387 | ||
@@ -397,17 +395,13 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor | |||
397 | 395 | ||
398 | int | 396 | int |
399 | i915_emit_box(struct drm_device *dev, | 397 | i915_emit_box(struct drm_device *dev, |
400 | struct drm_clip_rect __user *boxes, | 398 | struct drm_clip_rect *boxes, |
401 | int i, int DR1, int DR4) | 399 | int i, int DR1, int DR4) |
402 | { | 400 | { |
403 | drm_i915_private_t *dev_priv = dev->dev_private; | 401 | drm_i915_private_t *dev_priv = dev->dev_private; |
404 | struct drm_clip_rect box; | 402 | struct drm_clip_rect box = boxes[i]; |
405 | RING_LOCALS; | 403 | RING_LOCALS; |
406 | 404 | ||
407 | if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { | ||
408 | return -EFAULT; | ||
409 | } | ||
410 | |||
411 | if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { | 405 | if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { |
412 | DRM_ERROR("Bad box %d,%d..%d,%d\n", | 406 | DRM_ERROR("Bad box %d,%d..%d,%d\n", |
413 | box.x1, box.y1, box.x2, box.y2); | 407 | box.x1, box.y1, box.x2, box.y2); |
@@ -460,7 +454,9 @@ static void i915_emit_breadcrumb(struct drm_device *dev) | |||
460 | } | 454 | } |
461 | 455 | ||
462 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, | 456 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, |
463 | drm_i915_cmdbuffer_t * cmd) | 457 | drm_i915_cmdbuffer_t *cmd, |
458 | struct drm_clip_rect *cliprects, | ||
459 | void *cmdbuf) | ||
464 | { | 460 | { |
465 | int nbox = cmd->num_cliprects; | 461 | int nbox = cmd->num_cliprects; |
466 | int i = 0, count, ret; | 462 | int i = 0, count, ret; |
@@ -476,13 +472,13 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, | |||
476 | 472 | ||
477 | for (i = 0; i < count; i++) { | 473 | for (i = 0; i < count; i++) { |
478 | if (i < nbox) { | 474 | if (i < nbox) { |
479 | ret = i915_emit_box(dev, cmd->cliprects, i, | 475 | ret = i915_emit_box(dev, cliprects, i, |
480 | cmd->DR1, cmd->DR4); | 476 | cmd->DR1, cmd->DR4); |
481 | if (ret) | 477 | if (ret) |
482 | return ret; | 478 | return ret; |
483 | } | 479 | } |
484 | 480 | ||
485 | ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4); | 481 | ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); |
486 | if (ret) | 482 | if (ret) |
487 | return ret; | 483 | return ret; |
488 | } | 484 | } |
@@ -492,10 +488,10 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, | |||
492 | } | 488 | } |
493 | 489 | ||
494 | static int i915_dispatch_batchbuffer(struct drm_device * dev, | 490 | static int i915_dispatch_batchbuffer(struct drm_device * dev, |
495 | drm_i915_batchbuffer_t * batch) | 491 | drm_i915_batchbuffer_t * batch, |
492 | struct drm_clip_rect *cliprects) | ||
496 | { | 493 | { |
497 | drm_i915_private_t *dev_priv = dev->dev_private; | 494 | drm_i915_private_t *dev_priv = dev->dev_private; |
498 | struct drm_clip_rect __user *boxes = batch->cliprects; | ||
499 | int nbox = batch->num_cliprects; | 495 | int nbox = batch->num_cliprects; |
500 | int i = 0, count; | 496 | int i = 0, count; |
501 | RING_LOCALS; | 497 | RING_LOCALS; |
@@ -511,7 +507,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, | |||
511 | 507 | ||
512 | for (i = 0; i < count; i++) { | 508 | for (i = 0; i < count; i++) { |
513 | if (i < nbox) { | 509 | if (i < nbox) { |
514 | int ret = i915_emit_box(dev, boxes, i, | 510 | int ret = i915_emit_box(dev, cliprects, i, |
515 | batch->DR1, batch->DR4); | 511 | batch->DR1, batch->DR4); |
516 | if (ret) | 512 | if (ret) |
517 | return ret; | 513 | return ret; |
@@ -626,6 +622,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, | |||
626 | master_priv->sarea_priv; | 622 | master_priv->sarea_priv; |
627 | drm_i915_batchbuffer_t *batch = data; | 623 | drm_i915_batchbuffer_t *batch = data; |
628 | int ret; | 624 | int ret; |
625 | struct drm_clip_rect *cliprects = NULL; | ||
629 | 626 | ||
630 | if (!dev_priv->allow_batchbuffer) { | 627 | if (!dev_priv->allow_batchbuffer) { |
631 | DRM_ERROR("Batchbuffer ioctl disabled\n"); | 628 | DRM_ERROR("Batchbuffer ioctl disabled\n"); |
@@ -637,17 +634,35 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, | |||
637 | 634 | ||
638 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | 635 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
639 | 636 | ||
640 | if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, | 637 | if (batch->num_cliprects < 0) |
641 | batch->num_cliprects * | 638 | return -EINVAL; |
642 | sizeof(struct drm_clip_rect))) | 639 | |
643 | return -EFAULT; | 640 | if (batch->num_cliprects) { |
641 | cliprects = drm_calloc(batch->num_cliprects, | ||
642 | sizeof(struct drm_clip_rect), | ||
643 | DRM_MEM_DRIVER); | ||
644 | if (cliprects == NULL) | ||
645 | return -ENOMEM; | ||
646 | |||
647 | ret = copy_from_user(cliprects, batch->cliprects, | ||
648 | batch->num_cliprects * | ||
649 | sizeof(struct drm_clip_rect)); | ||
650 | if (ret != 0) | ||
651 | goto fail_free; | ||
652 | } | ||
644 | 653 | ||
645 | mutex_lock(&dev->struct_mutex); | 654 | mutex_lock(&dev->struct_mutex); |
646 | ret = i915_dispatch_batchbuffer(dev, batch); | 655 | ret = i915_dispatch_batchbuffer(dev, batch, cliprects); |
647 | mutex_unlock(&dev->struct_mutex); | 656 | mutex_unlock(&dev->struct_mutex); |
648 | 657 | ||
649 | if (sarea_priv) | 658 | if (sarea_priv) |
650 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | 659 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
660 | |||
661 | fail_free: | ||
662 | drm_free(cliprects, | ||
663 | batch->num_cliprects * sizeof(struct drm_clip_rect), | ||
664 | DRM_MEM_DRIVER); | ||
665 | |||
651 | return ret; | 666 | return ret; |
652 | } | 667 | } |
653 | 668 | ||
@@ -659,6 +674,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, | |||
659 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) | 674 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
660 | master_priv->sarea_priv; | 675 | master_priv->sarea_priv; |
661 | drm_i915_cmdbuffer_t *cmdbuf = data; | 676 | drm_i915_cmdbuffer_t *cmdbuf = data; |
677 | struct drm_clip_rect *cliprects = NULL; | ||
678 | void *batch_data; | ||
662 | int ret; | 679 | int ret; |
663 | 680 | ||
664 | DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", | 681 | DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", |
@@ -666,25 +683,50 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, | |||
666 | 683 | ||
667 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | 684 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
668 | 685 | ||
669 | if (cmdbuf->num_cliprects && | 686 | if (cmdbuf->num_cliprects < 0) |
670 | DRM_VERIFYAREA_READ(cmdbuf->cliprects, | 687 | return -EINVAL; |
671 | cmdbuf->num_cliprects * | 688 | |
672 | sizeof(struct drm_clip_rect))) { | 689 | batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER); |
673 | DRM_ERROR("Fault accessing cliprects\n"); | 690 | if (batch_data == NULL) |
674 | return -EFAULT; | 691 | return -ENOMEM; |
692 | |||
693 | ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); | ||
694 | if (ret != 0) | ||
695 | goto fail_batch_free; | ||
696 | |||
697 | if (cmdbuf->num_cliprects) { | ||
698 | cliprects = drm_calloc(cmdbuf->num_cliprects, | ||
699 | sizeof(struct drm_clip_rect), | ||
700 | DRM_MEM_DRIVER); | ||
701 | if (cliprects == NULL) | ||
702 | goto fail_batch_free; | ||
703 | |||
704 | ret = copy_from_user(cliprects, cmdbuf->cliprects, | ||
705 | cmdbuf->num_cliprects * | ||
706 | sizeof(struct drm_clip_rect)); | ||
707 | if (ret != 0) | ||
708 | goto fail_clip_free; | ||
675 | } | 709 | } |
676 | 710 | ||
677 | mutex_lock(&dev->struct_mutex); | 711 | mutex_lock(&dev->struct_mutex); |
678 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf); | 712 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); |
679 | mutex_unlock(&dev->struct_mutex); | 713 | mutex_unlock(&dev->struct_mutex); |
680 | if (ret) { | 714 | if (ret) { |
681 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); | 715 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); |
682 | return ret; | 716 | goto fail_batch_free; |
683 | } | 717 | } |
684 | 718 | ||
685 | if (sarea_priv) | 719 | if (sarea_priv) |
686 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | 720 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
687 | return 0; | 721 | |
722 | fail_batch_free: | ||
723 | drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER); | ||
724 | fail_clip_free: | ||
725 | drm_free(cliprects, | ||
726 | cmdbuf->num_cliprects * sizeof(struct drm_clip_rect), | ||
727 | DRM_MEM_DRIVER); | ||
728 | |||
729 | return ret; | ||
688 | } | 730 | } |
689 | 731 | ||
690 | static int i915_flip_bufs(struct drm_device *dev, void *data, | 732 | static int i915_flip_bufs(struct drm_device *dev, void *data, |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index b293ef0bae71..dcb91f5df6e3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -150,8 +150,10 @@ static struct drm_driver driver = { | |||
150 | .get_reg_ofs = drm_core_get_reg_ofs, | 150 | .get_reg_ofs = drm_core_get_reg_ofs, |
151 | .master_create = i915_master_create, | 151 | .master_create = i915_master_create, |
152 | .master_destroy = i915_master_destroy, | 152 | .master_destroy = i915_master_destroy, |
153 | .proc_init = i915_gem_proc_init, | 153 | #if defined(CONFIG_DEBUG_FS) |
154 | .proc_cleanup = i915_gem_proc_cleanup, | 154 | .debugfs_init = i915_gem_debugfs_init, |
155 | .debugfs_cleanup = i915_gem_debugfs_cleanup, | ||
156 | #endif | ||
155 | .gem_init_object = i915_gem_init_object, | 157 | .gem_init_object = i915_gem_init_object, |
156 | .gem_free_object = i915_gem_free_object, | 158 | .gem_free_object = i915_gem_free_object, |
157 | .gem_vm_ops = &i915_gem_vm_ops, | 159 | .gem_vm_ops = &i915_gem_vm_ops, |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d6cc9861e0a1..c1685d0c704f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -404,7 +404,8 @@ struct drm_i915_gem_object { | |||
404 | /** AGP memory structure for our GTT binding. */ | 404 | /** AGP memory structure for our GTT binding. */ |
405 | DRM_AGP_MEM *agp_mem; | 405 | DRM_AGP_MEM *agp_mem; |
406 | 406 | ||
407 | struct page **page_list; | 407 | struct page **pages; |
408 | int pages_refcount; | ||
408 | 409 | ||
409 | /** | 410 | /** |
410 | * Current offset of the object in GTT space. | 411 | * Current offset of the object in GTT space. |
@@ -519,7 +520,7 @@ extern int i915_driver_device_is_agp(struct drm_device * dev); | |||
519 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, | 520 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, |
520 | unsigned long arg); | 521 | unsigned long arg); |
521 | extern int i915_emit_box(struct drm_device *dev, | 522 | extern int i915_emit_box(struct drm_device *dev, |
522 | struct drm_clip_rect __user *boxes, | 523 | struct drm_clip_rect *boxes, |
523 | int i, int DR1, int DR4); | 524 | int i, int DR1, int DR4); |
524 | 525 | ||
525 | /* i915_irq.c */ | 526 | /* i915_irq.c */ |
@@ -604,8 +605,6 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data, | |||
604 | int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | 605 | int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
605 | struct drm_file *file_priv); | 606 | struct drm_file *file_priv); |
606 | void i915_gem_load(struct drm_device *dev); | 607 | void i915_gem_load(struct drm_device *dev); |
607 | int i915_gem_proc_init(struct drm_minor *minor); | ||
608 | void i915_gem_proc_cleanup(struct drm_minor *minor); | ||
609 | int i915_gem_init_object(struct drm_gem_object *obj); | 608 | int i915_gem_init_object(struct drm_gem_object *obj); |
610 | void i915_gem_free_object(struct drm_gem_object *obj); | 609 | void i915_gem_free_object(struct drm_gem_object *obj); |
611 | int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); | 610 | int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); |
@@ -649,6 +648,10 @@ void i915_gem_dump_object(struct drm_gem_object *obj, int len, | |||
649 | const char *where, uint32_t mark); | 648 | const char *where, uint32_t mark); |
650 | void i915_dump_lru(struct drm_device *dev, const char *where); | 649 | void i915_dump_lru(struct drm_device *dev, const char *where); |
651 | 650 | ||
651 | /* i915_debugfs.c */ | ||
652 | int i915_gem_debugfs_init(struct drm_minor *minor); | ||
653 | void i915_gem_debugfs_cleanup(struct drm_minor *minor); | ||
654 | |||
652 | /* i915_suspend.c */ | 655 | /* i915_suspend.c */ |
653 | extern int i915_save_state(struct drm_device *dev); | 656 | extern int i915_save_state(struct drm_device *dev); |
654 | extern int i915_restore_state(struct drm_device *dev); | 657 | extern int i915_restore_state(struct drm_device *dev); |
@@ -784,15 +787,21 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
784 | (dev)->pci_device == 0x2E22 || \ | 787 | (dev)->pci_device == 0x2E22 || \ |
785 | IS_GM45(dev)) | 788 | IS_GM45(dev)) |
786 | 789 | ||
790 | #define IS_IGDG(dev) ((dev)->pci_device == 0xa001) | ||
791 | #define IS_IGDGM(dev) ((dev)->pci_device == 0xa011) | ||
792 | #define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev)) | ||
793 | |||
787 | #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ | 794 | #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ |
788 | (dev)->pci_device == 0x29B2 || \ | 795 | (dev)->pci_device == 0x29B2 || \ |
789 | (dev)->pci_device == 0x29D2) | 796 | (dev)->pci_device == 0x29D2 || \ |
797 | (IS_IGD(dev))) | ||
790 | 798 | ||
791 | #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ | 799 | #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ |
792 | IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) | 800 | IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) |
793 | 801 | ||
794 | #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ | 802 | #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ |
795 | IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) | 803 | IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ |
804 | IS_IGD(dev)) | ||
796 | 805 | ||
797 | #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) | 806 | #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) |
798 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | 807 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 37427e4016cb..b52cba0f16d2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -43,8 +43,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
43 | uint64_t offset, | 43 | uint64_t offset, |
44 | uint64_t size); | 44 | uint64_t size); |
45 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); | 45 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); |
46 | static int i915_gem_object_get_page_list(struct drm_gem_object *obj); | 46 | static int i915_gem_object_get_pages(struct drm_gem_object *obj); |
47 | static void i915_gem_object_free_page_list(struct drm_gem_object *obj); | 47 | static void i915_gem_object_put_pages(struct drm_gem_object *obj); |
48 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); | 48 | static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); |
49 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 49 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, |
50 | unsigned alignment); | 50 | unsigned alignment); |
@@ -136,6 +136,224 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, | |||
136 | return 0; | 136 | return 0; |
137 | } | 137 | } |
138 | 138 | ||
139 | static inline int | ||
140 | fast_shmem_read(struct page **pages, | ||
141 | loff_t page_base, int page_offset, | ||
142 | char __user *data, | ||
143 | int length) | ||
144 | { | ||
145 | char __iomem *vaddr; | ||
146 | int ret; | ||
147 | |||
148 | vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); | ||
149 | if (vaddr == NULL) | ||
150 | return -ENOMEM; | ||
151 | ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); | ||
152 | kunmap_atomic(vaddr, KM_USER0); | ||
153 | |||
154 | return ret; | ||
155 | } | ||
156 | |||
157 | static inline int | ||
158 | slow_shmem_copy(struct page *dst_page, | ||
159 | int dst_offset, | ||
160 | struct page *src_page, | ||
161 | int src_offset, | ||
162 | int length) | ||
163 | { | ||
164 | char *dst_vaddr, *src_vaddr; | ||
165 | |||
166 | dst_vaddr = kmap_atomic(dst_page, KM_USER0); | ||
167 | if (dst_vaddr == NULL) | ||
168 | return -ENOMEM; | ||
169 | |||
170 | src_vaddr = kmap_atomic(src_page, KM_USER1); | ||
171 | if (src_vaddr == NULL) { | ||
172 | kunmap_atomic(dst_vaddr, KM_USER0); | ||
173 | return -ENOMEM; | ||
174 | } | ||
175 | |||
176 | memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); | ||
177 | |||
178 | kunmap_atomic(src_vaddr, KM_USER1); | ||
179 | kunmap_atomic(dst_vaddr, KM_USER0); | ||
180 | |||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | /** | ||
185 | * This is the fast shmem pread path, which attempts to copy_from_user directly | ||
186 | * from the backing pages of the object to the user's address space. On a | ||
187 | * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). | ||
188 | */ | ||
189 | static int | ||
190 | i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | ||
191 | struct drm_i915_gem_pread *args, | ||
192 | struct drm_file *file_priv) | ||
193 | { | ||
194 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
195 | ssize_t remain; | ||
196 | loff_t offset, page_base; | ||
197 | char __user *user_data; | ||
198 | int page_offset, page_length; | ||
199 | int ret; | ||
200 | |||
201 | user_data = (char __user *) (uintptr_t) args->data_ptr; | ||
202 | remain = args->size; | ||
203 | |||
204 | mutex_lock(&dev->struct_mutex); | ||
205 | |||
206 | ret = i915_gem_object_get_pages(obj); | ||
207 | if (ret != 0) | ||
208 | goto fail_unlock; | ||
209 | |||
210 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, | ||
211 | args->size); | ||
212 | if (ret != 0) | ||
213 | goto fail_put_pages; | ||
214 | |||
215 | obj_priv = obj->driver_private; | ||
216 | offset = args->offset; | ||
217 | |||
218 | while (remain > 0) { | ||
219 | /* Operation in this page | ||
220 | * | ||
221 | * page_base = page offset within aperture | ||
222 | * page_offset = offset within page | ||
223 | * page_length = bytes to copy for this page | ||
224 | */ | ||
225 | page_base = (offset & ~(PAGE_SIZE-1)); | ||
226 | page_offset = offset & (PAGE_SIZE-1); | ||
227 | page_length = remain; | ||
228 | if ((page_offset + remain) > PAGE_SIZE) | ||
229 | page_length = PAGE_SIZE - page_offset; | ||
230 | |||
231 | ret = fast_shmem_read(obj_priv->pages, | ||
232 | page_base, page_offset, | ||
233 | user_data, page_length); | ||
234 | if (ret) | ||
235 | goto fail_put_pages; | ||
236 | |||
237 | remain -= page_length; | ||
238 | user_data += page_length; | ||
239 | offset += page_length; | ||
240 | } | ||
241 | |||
242 | fail_put_pages: | ||
243 | i915_gem_object_put_pages(obj); | ||
244 | fail_unlock: | ||
245 | mutex_unlock(&dev->struct_mutex); | ||
246 | |||
247 | return ret; | ||
248 | } | ||
249 | |||
250 | /** | ||
251 | * This is the fallback shmem pread path, which allocates temporary storage | ||
252 | * in kernel space to copy_to_user into outside of the struct_mutex, so we | ||
253 | * can copy out of the object's backing pages while holding the struct mutex | ||
254 | * and not take page faults. | ||
255 | */ | ||
256 | static int | ||
257 | i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, | ||
258 | struct drm_i915_gem_pread *args, | ||
259 | struct drm_file *file_priv) | ||
260 | { | ||
261 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
262 | struct mm_struct *mm = current->mm; | ||
263 | struct page **user_pages; | ||
264 | ssize_t remain; | ||
265 | loff_t offset, pinned_pages, i; | ||
266 | loff_t first_data_page, last_data_page, num_pages; | ||
267 | int shmem_page_index, shmem_page_offset; | ||
268 | int data_page_index, data_page_offset; | ||
269 | int page_length; | ||
270 | int ret; | ||
271 | uint64_t data_ptr = args->data_ptr; | ||
272 | |||
273 | remain = args->size; | ||
274 | |||
275 | /* Pin the user pages containing the data. We can't fault while | ||
276 | * holding the struct mutex, yet we want to hold it while | ||
277 | * dereferencing the user data. | ||
278 | */ | ||
279 | first_data_page = data_ptr / PAGE_SIZE; | ||
280 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | ||
281 | num_pages = last_data_page - first_data_page + 1; | ||
282 | |||
283 | user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); | ||
284 | if (user_pages == NULL) | ||
285 | return -ENOMEM; | ||
286 | |||
287 | down_read(&mm->mmap_sem); | ||
288 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | ||
289 | num_pages, 0, 0, user_pages, NULL); | ||
290 | up_read(&mm->mmap_sem); | ||
291 | if (pinned_pages < num_pages) { | ||
292 | ret = -EFAULT; | ||
293 | goto fail_put_user_pages; | ||
294 | } | ||
295 | |||
296 | mutex_lock(&dev->struct_mutex); | ||
297 | |||
298 | ret = i915_gem_object_get_pages(obj); | ||
299 | if (ret != 0) | ||
300 | goto fail_unlock; | ||
301 | |||
302 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, | ||
303 | args->size); | ||
304 | if (ret != 0) | ||
305 | goto fail_put_pages; | ||
306 | |||
307 | obj_priv = obj->driver_private; | ||
308 | offset = args->offset; | ||
309 | |||
310 | while (remain > 0) { | ||
311 | /* Operation in this page | ||
312 | * | ||
313 | * shmem_page_index = page number within shmem file | ||
314 | * shmem_page_offset = offset within page in shmem file | ||
315 | * data_page_index = page number in get_user_pages return | ||
316 | * data_page_offset = offset with data_page_index page. | ||
317 | * page_length = bytes to copy for this page | ||
318 | */ | ||
319 | shmem_page_index = offset / PAGE_SIZE; | ||
320 | shmem_page_offset = offset & ~PAGE_MASK; | ||
321 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | ||
322 | data_page_offset = data_ptr & ~PAGE_MASK; | ||
323 | |||
324 | page_length = remain; | ||
325 | if ((shmem_page_offset + page_length) > PAGE_SIZE) | ||
326 | page_length = PAGE_SIZE - shmem_page_offset; | ||
327 | if ((data_page_offset + page_length) > PAGE_SIZE) | ||
328 | page_length = PAGE_SIZE - data_page_offset; | ||
329 | |||
330 | ret = slow_shmem_copy(user_pages[data_page_index], | ||
331 | data_page_offset, | ||
332 | obj_priv->pages[shmem_page_index], | ||
333 | shmem_page_offset, | ||
334 | page_length); | ||
335 | if (ret) | ||
336 | goto fail_put_pages; | ||
337 | |||
338 | remain -= page_length; | ||
339 | data_ptr += page_length; | ||
340 | offset += page_length; | ||
341 | } | ||
342 | |||
343 | fail_put_pages: | ||
344 | i915_gem_object_put_pages(obj); | ||
345 | fail_unlock: | ||
346 | mutex_unlock(&dev->struct_mutex); | ||
347 | fail_put_user_pages: | ||
348 | for (i = 0; i < pinned_pages; i++) { | ||
349 | SetPageDirty(user_pages[i]); | ||
350 | page_cache_release(user_pages[i]); | ||
351 | } | ||
352 | kfree(user_pages); | ||
353 | |||
354 | return ret; | ||
355 | } | ||
356 | |||
139 | /** | 357 | /** |
140 | * Reads data from the object referenced by handle. | 358 | * Reads data from the object referenced by handle. |
141 | * | 359 | * |
@@ -148,8 +366,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
148 | struct drm_i915_gem_pread *args = data; | 366 | struct drm_i915_gem_pread *args = data; |
149 | struct drm_gem_object *obj; | 367 | struct drm_gem_object *obj; |
150 | struct drm_i915_gem_object *obj_priv; | 368 | struct drm_i915_gem_object *obj_priv; |
151 | ssize_t read; | ||
152 | loff_t offset; | ||
153 | int ret; | 369 | int ret; |
154 | 370 | ||
155 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 371 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
@@ -167,33 +383,13 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
167 | return -EINVAL; | 383 | return -EINVAL; |
168 | } | 384 | } |
169 | 385 | ||
170 | mutex_lock(&dev->struct_mutex); | 386 | ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); |
171 | 387 | if (ret != 0) | |
172 | ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, | 388 | ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); |
173 | args->size); | ||
174 | if (ret != 0) { | ||
175 | drm_gem_object_unreference(obj); | ||
176 | mutex_unlock(&dev->struct_mutex); | ||
177 | return ret; | ||
178 | } | ||
179 | |||
180 | offset = args->offset; | ||
181 | |||
182 | read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr, | ||
183 | args->size, &offset); | ||
184 | if (read != args->size) { | ||
185 | drm_gem_object_unreference(obj); | ||
186 | mutex_unlock(&dev->struct_mutex); | ||
187 | if (read < 0) | ||
188 | return read; | ||
189 | else | ||
190 | return -EINVAL; | ||
191 | } | ||
192 | 389 | ||
193 | drm_gem_object_unreference(obj); | 390 | drm_gem_object_unreference(obj); |
194 | mutex_unlock(&dev->struct_mutex); | ||
195 | 391 | ||
196 | return 0; | 392 | return ret; |
197 | } | 393 | } |
198 | 394 | ||
199 | /* This is the fast write path which cannot handle | 395 | /* This is the fast write path which cannot handle |
@@ -223,29 +419,51 @@ fast_user_write(struct io_mapping *mapping, | |||
223 | */ | 419 | */ |
224 | 420 | ||
225 | static inline int | 421 | static inline int |
226 | slow_user_write(struct io_mapping *mapping, | 422 | slow_kernel_write(struct io_mapping *mapping, |
227 | loff_t page_base, int page_offset, | 423 | loff_t gtt_base, int gtt_offset, |
228 | char __user *user_data, | 424 | struct page *user_page, int user_offset, |
229 | int length) | 425 | int length) |
230 | { | 426 | { |
231 | char __iomem *vaddr; | 427 | char *src_vaddr, *dst_vaddr; |
232 | unsigned long unwritten; | 428 | unsigned long unwritten; |
233 | 429 | ||
234 | vaddr = io_mapping_map_wc(mapping, page_base); | 430 | dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base); |
235 | if (vaddr == NULL) | 431 | src_vaddr = kmap_atomic(user_page, KM_USER1); |
236 | return -EFAULT; | 432 | unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset, |
237 | unwritten = __copy_from_user(vaddr + page_offset, | 433 | src_vaddr + user_offset, |
238 | user_data, length); | 434 | length); |
239 | io_mapping_unmap(vaddr); | 435 | kunmap_atomic(src_vaddr, KM_USER1); |
436 | io_mapping_unmap_atomic(dst_vaddr); | ||
240 | if (unwritten) | 437 | if (unwritten) |
241 | return -EFAULT; | 438 | return -EFAULT; |
242 | return 0; | 439 | return 0; |
243 | } | 440 | } |
244 | 441 | ||
442 | static inline int | ||
443 | fast_shmem_write(struct page **pages, | ||
444 | loff_t page_base, int page_offset, | ||
445 | char __user *data, | ||
446 | int length) | ||
447 | { | ||
448 | char __iomem *vaddr; | ||
449 | |||
450 | vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); | ||
451 | if (vaddr == NULL) | ||
452 | return -ENOMEM; | ||
453 | __copy_from_user_inatomic(vaddr + page_offset, data, length); | ||
454 | kunmap_atomic(vaddr, KM_USER0); | ||
455 | |||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | /** | ||
460 | * This is the fast pwrite path, where we copy the data directly from the | ||
461 | * user into the GTT, uncached. | ||
462 | */ | ||
245 | static int | 463 | static int |
246 | i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 464 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, |
247 | struct drm_i915_gem_pwrite *args, | 465 | struct drm_i915_gem_pwrite *args, |
248 | struct drm_file *file_priv) | 466 | struct drm_file *file_priv) |
249 | { | 467 | { |
250 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 468 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
251 | drm_i915_private_t *dev_priv = dev->dev_private; | 469 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -273,7 +491,6 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
273 | 491 | ||
274 | obj_priv = obj->driver_private; | 492 | obj_priv = obj->driver_private; |
275 | offset = obj_priv->gtt_offset + args->offset; | 493 | offset = obj_priv->gtt_offset + args->offset; |
276 | obj_priv->dirty = 1; | ||
277 | 494 | ||
278 | while (remain > 0) { | 495 | while (remain > 0) { |
279 | /* Operation in this page | 496 | /* Operation in this page |
@@ -292,16 +509,11 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
292 | page_offset, user_data, page_length); | 509 | page_offset, user_data, page_length); |
293 | 510 | ||
294 | /* If we get a fault while copying data, then (presumably) our | 511 | /* If we get a fault while copying data, then (presumably) our |
295 | * source page isn't available. In this case, use the | 512 | * source page isn't available. Return the error and we'll |
296 | * non-atomic function | 513 | * retry in the slow path. |
297 | */ | 514 | */ |
298 | if (ret) { | 515 | if (ret) |
299 | ret = slow_user_write (dev_priv->mm.gtt_mapping, | 516 | goto fail; |
300 | page_base, page_offset, | ||
301 | user_data, page_length); | ||
302 | if (ret) | ||
303 | goto fail; | ||
304 | } | ||
305 | 517 | ||
306 | remain -= page_length; | 518 | remain -= page_length; |
307 | user_data += page_length; | 519 | user_data += page_length; |
@@ -315,39 +527,284 @@ fail: | |||
315 | return ret; | 527 | return ret; |
316 | } | 528 | } |
317 | 529 | ||
530 | /** | ||
531 | * This is the fallback GTT pwrite path, which uses get_user_pages to pin | ||
532 | * the memory and maps it using kmap_atomic for copying. | ||
533 | * | ||
534 | * This code resulted in x11perf -rgb10text consuming about 10% more CPU | ||
535 | * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). | ||
536 | */ | ||
318 | static int | 537 | static int |
319 | i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 538 | i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, |
320 | struct drm_i915_gem_pwrite *args, | 539 | struct drm_i915_gem_pwrite *args, |
321 | struct drm_file *file_priv) | 540 | struct drm_file *file_priv) |
322 | { | 541 | { |
542 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
543 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
544 | ssize_t remain; | ||
545 | loff_t gtt_page_base, offset; | ||
546 | loff_t first_data_page, last_data_page, num_pages; | ||
547 | loff_t pinned_pages, i; | ||
548 | struct page **user_pages; | ||
549 | struct mm_struct *mm = current->mm; | ||
550 | int gtt_page_offset, data_page_offset, data_page_index, page_length; | ||
323 | int ret; | 551 | int ret; |
324 | loff_t offset; | 552 | uint64_t data_ptr = args->data_ptr; |
325 | ssize_t written; | 553 | |
554 | remain = args->size; | ||
555 | |||
556 | /* Pin the user pages containing the data. We can't fault while | ||
557 | * holding the struct mutex, and all of the pwrite implementations | ||
558 | * want to hold it while dereferencing the user data. | ||
559 | */ | ||
560 | first_data_page = data_ptr / PAGE_SIZE; | ||
561 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | ||
562 | num_pages = last_data_page - first_data_page + 1; | ||
563 | |||
564 | user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); | ||
565 | if (user_pages == NULL) | ||
566 | return -ENOMEM; | ||
567 | |||
568 | down_read(&mm->mmap_sem); | ||
569 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | ||
570 | num_pages, 0, 0, user_pages, NULL); | ||
571 | up_read(&mm->mmap_sem); | ||
572 | if (pinned_pages < num_pages) { | ||
573 | ret = -EFAULT; | ||
574 | goto out_unpin_pages; | ||
575 | } | ||
326 | 576 | ||
327 | mutex_lock(&dev->struct_mutex); | 577 | mutex_lock(&dev->struct_mutex); |
578 | ret = i915_gem_object_pin(obj, 0); | ||
579 | if (ret) | ||
580 | goto out_unlock; | ||
581 | |||
582 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | ||
583 | if (ret) | ||
584 | goto out_unpin_object; | ||
585 | |||
586 | obj_priv = obj->driver_private; | ||
587 | offset = obj_priv->gtt_offset + args->offset; | ||
588 | |||
589 | while (remain > 0) { | ||
590 | /* Operation in this page | ||
591 | * | ||
592 | * gtt_page_base = page offset within aperture | ||
593 | * gtt_page_offset = offset within page in aperture | ||
594 | * data_page_index = page number in get_user_pages return | ||
595 | * data_page_offset = offset with data_page_index page. | ||
596 | * page_length = bytes to copy for this page | ||
597 | */ | ||
598 | gtt_page_base = offset & PAGE_MASK; | ||
599 | gtt_page_offset = offset & ~PAGE_MASK; | ||
600 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | ||
601 | data_page_offset = data_ptr & ~PAGE_MASK; | ||
602 | |||
603 | page_length = remain; | ||
604 | if ((gtt_page_offset + page_length) > PAGE_SIZE) | ||
605 | page_length = PAGE_SIZE - gtt_page_offset; | ||
606 | if ((data_page_offset + page_length) > PAGE_SIZE) | ||
607 | page_length = PAGE_SIZE - data_page_offset; | ||
608 | |||
609 | ret = slow_kernel_write(dev_priv->mm.gtt_mapping, | ||
610 | gtt_page_base, gtt_page_offset, | ||
611 | user_pages[data_page_index], | ||
612 | data_page_offset, | ||
613 | page_length); | ||
614 | |||
615 | /* If we get a fault while copying data, then (presumably) our | ||
616 | * source page isn't available. Return the error and we'll | ||
617 | * retry in the slow path. | ||
618 | */ | ||
619 | if (ret) | ||
620 | goto out_unpin_object; | ||
621 | |||
622 | remain -= page_length; | ||
623 | offset += page_length; | ||
624 | data_ptr += page_length; | ||
625 | } | ||
626 | |||
627 | out_unpin_object: | ||
628 | i915_gem_object_unpin(obj); | ||
629 | out_unlock: | ||
630 | mutex_unlock(&dev->struct_mutex); | ||
631 | out_unpin_pages: | ||
632 | for (i = 0; i < pinned_pages; i++) | ||
633 | page_cache_release(user_pages[i]); | ||
634 | kfree(user_pages); | ||
635 | |||
636 | return ret; | ||
637 | } | ||
638 | |||
639 | /** | ||
640 | * This is the fast shmem pwrite path, which attempts to directly | ||
641 | * copy_from_user into the kmapped pages backing the object. | ||
642 | */ | ||
643 | static int | ||
644 | i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | ||
645 | struct drm_i915_gem_pwrite *args, | ||
646 | struct drm_file *file_priv) | ||
647 | { | ||
648 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
649 | ssize_t remain; | ||
650 | loff_t offset, page_base; | ||
651 | char __user *user_data; | ||
652 | int page_offset, page_length; | ||
653 | int ret; | ||
654 | |||
655 | user_data = (char __user *) (uintptr_t) args->data_ptr; | ||
656 | remain = args->size; | ||
657 | |||
658 | mutex_lock(&dev->struct_mutex); | ||
659 | |||
660 | ret = i915_gem_object_get_pages(obj); | ||
661 | if (ret != 0) | ||
662 | goto fail_unlock; | ||
328 | 663 | ||
329 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | 664 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
330 | if (ret) { | 665 | if (ret != 0) |
331 | mutex_unlock(&dev->struct_mutex); | 666 | goto fail_put_pages; |
332 | return ret; | 667 | |
668 | obj_priv = obj->driver_private; | ||
669 | offset = args->offset; | ||
670 | obj_priv->dirty = 1; | ||
671 | |||
672 | while (remain > 0) { | ||
673 | /* Operation in this page | ||
674 | * | ||
675 | * page_base = page offset within aperture | ||
676 | * page_offset = offset within page | ||
677 | * page_length = bytes to copy for this page | ||
678 | */ | ||
679 | page_base = (offset & ~(PAGE_SIZE-1)); | ||
680 | page_offset = offset & (PAGE_SIZE-1); | ||
681 | page_length = remain; | ||
682 | if ((page_offset + remain) > PAGE_SIZE) | ||
683 | page_length = PAGE_SIZE - page_offset; | ||
684 | |||
685 | ret = fast_shmem_write(obj_priv->pages, | ||
686 | page_base, page_offset, | ||
687 | user_data, page_length); | ||
688 | if (ret) | ||
689 | goto fail_put_pages; | ||
690 | |||
691 | remain -= page_length; | ||
692 | user_data += page_length; | ||
693 | offset += page_length; | ||
333 | } | 694 | } |
334 | 695 | ||
696 | fail_put_pages: | ||
697 | i915_gem_object_put_pages(obj); | ||
698 | fail_unlock: | ||
699 | mutex_unlock(&dev->struct_mutex); | ||
700 | |||
701 | return ret; | ||
702 | } | ||
703 | |||
704 | /** | ||
705 | * This is the fallback shmem pwrite path, which uses get_user_pages to pin | ||
706 | * the memory and maps it using kmap_atomic for copying. | ||
707 | * | ||
708 | * This avoids taking mmap_sem for faulting on the user's address while the | ||
709 | * struct_mutex is held. | ||
710 | */ | ||
711 | static int | ||
712 | i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, | ||
713 | struct drm_i915_gem_pwrite *args, | ||
714 | struct drm_file *file_priv) | ||
715 | { | ||
716 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
717 | struct mm_struct *mm = current->mm; | ||
718 | struct page **user_pages; | ||
719 | ssize_t remain; | ||
720 | loff_t offset, pinned_pages, i; | ||
721 | loff_t first_data_page, last_data_page, num_pages; | ||
722 | int shmem_page_index, shmem_page_offset; | ||
723 | int data_page_index, data_page_offset; | ||
724 | int page_length; | ||
725 | int ret; | ||
726 | uint64_t data_ptr = args->data_ptr; | ||
727 | |||
728 | remain = args->size; | ||
729 | |||
730 | /* Pin the user pages containing the data. We can't fault while | ||
731 | * holding the struct mutex, and all of the pwrite implementations | ||
732 | * want to hold it while dereferencing the user data. | ||
733 | */ | ||
734 | first_data_page = data_ptr / PAGE_SIZE; | ||
735 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; | ||
736 | num_pages = last_data_page - first_data_page + 1; | ||
737 | |||
738 | user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); | ||
739 | if (user_pages == NULL) | ||
740 | return -ENOMEM; | ||
741 | |||
742 | down_read(&mm->mmap_sem); | ||
743 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, | ||
744 | num_pages, 0, 0, user_pages, NULL); | ||
745 | up_read(&mm->mmap_sem); | ||
746 | if (pinned_pages < num_pages) { | ||
747 | ret = -EFAULT; | ||
748 | goto fail_put_user_pages; | ||
749 | } | ||
750 | |||
751 | mutex_lock(&dev->struct_mutex); | ||
752 | |||
753 | ret = i915_gem_object_get_pages(obj); | ||
754 | if (ret != 0) | ||
755 | goto fail_unlock; | ||
756 | |||
757 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | ||
758 | if (ret != 0) | ||
759 | goto fail_put_pages; | ||
760 | |||
761 | obj_priv = obj->driver_private; | ||
335 | offset = args->offset; | 762 | offset = args->offset; |
763 | obj_priv->dirty = 1; | ||
336 | 764 | ||
337 | written = vfs_write(obj->filp, | 765 | while (remain > 0) { |
338 | (char __user *)(uintptr_t) args->data_ptr, | 766 | /* Operation in this page |
339 | args->size, &offset); | 767 | * |
340 | if (written != args->size) { | 768 | * shmem_page_index = page number within shmem file |
341 | mutex_unlock(&dev->struct_mutex); | 769 | * shmem_page_offset = offset within page in shmem file |
342 | if (written < 0) | 770 | * data_page_index = page number in get_user_pages return |
343 | return written; | 771 | * data_page_offset = offset with data_page_index page. |
344 | else | 772 | * page_length = bytes to copy for this page |
345 | return -EINVAL; | 773 | */ |
774 | shmem_page_index = offset / PAGE_SIZE; | ||
775 | shmem_page_offset = offset & ~PAGE_MASK; | ||
776 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; | ||
777 | data_page_offset = data_ptr & ~PAGE_MASK; | ||
778 | |||
779 | page_length = remain; | ||
780 | if ((shmem_page_offset + page_length) > PAGE_SIZE) | ||
781 | page_length = PAGE_SIZE - shmem_page_offset; | ||
782 | if ((data_page_offset + page_length) > PAGE_SIZE) | ||
783 | page_length = PAGE_SIZE - data_page_offset; | ||
784 | |||
785 | ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], | ||
786 | shmem_page_offset, | ||
787 | user_pages[data_page_index], | ||
788 | data_page_offset, | ||
789 | page_length); | ||
790 | if (ret) | ||
791 | goto fail_put_pages; | ||
792 | |||
793 | remain -= page_length; | ||
794 | data_ptr += page_length; | ||
795 | offset += page_length; | ||
346 | } | 796 | } |
347 | 797 | ||
798 | fail_put_pages: | ||
799 | i915_gem_object_put_pages(obj); | ||
800 | fail_unlock: | ||
348 | mutex_unlock(&dev->struct_mutex); | 801 | mutex_unlock(&dev->struct_mutex); |
802 | fail_put_user_pages: | ||
803 | for (i = 0; i < pinned_pages; i++) | ||
804 | page_cache_release(user_pages[i]); | ||
805 | kfree(user_pages); | ||
349 | 806 | ||
350 | return 0; | 807 | return ret; |
351 | } | 808 | } |
352 | 809 | ||
353 | /** | 810 | /** |
@@ -388,10 +845,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
388 | if (obj_priv->phys_obj) | 845 | if (obj_priv->phys_obj) |
389 | ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); | 846 | ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); |
390 | else if (obj_priv->tiling_mode == I915_TILING_NONE && | 847 | else if (obj_priv->tiling_mode == I915_TILING_NONE && |
391 | dev->gtt_total != 0) | 848 | dev->gtt_total != 0) { |
392 | ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); | 849 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); |
393 | else | 850 | if (ret == -EFAULT) { |
394 | ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); | 851 | ret = i915_gem_gtt_pwrite_slow(dev, obj, args, |
852 | file_priv); | ||
853 | } | ||
854 | } else { | ||
855 | ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); | ||
856 | if (ret == -EFAULT) { | ||
857 | ret = i915_gem_shmem_pwrite_slow(dev, obj, args, | ||
858 | file_priv); | ||
859 | } | ||
860 | } | ||
395 | 861 | ||
396 | #if WATCH_PWRITE | 862 | #if WATCH_PWRITE |
397 | if (ret) | 863 | if (ret) |
@@ -816,29 +1282,30 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |||
816 | } | 1282 | } |
817 | 1283 | ||
818 | static void | 1284 | static void |
819 | i915_gem_object_free_page_list(struct drm_gem_object *obj) | 1285 | i915_gem_object_put_pages(struct drm_gem_object *obj) |
820 | { | 1286 | { |
821 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1287 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
822 | int page_count = obj->size / PAGE_SIZE; | 1288 | int page_count = obj->size / PAGE_SIZE; |
823 | int i; | 1289 | int i; |
824 | 1290 | ||
825 | if (obj_priv->page_list == NULL) | 1291 | BUG_ON(obj_priv->pages_refcount == 0); |
826 | return; | ||
827 | 1292 | ||
1293 | if (--obj_priv->pages_refcount != 0) | ||
1294 | return; | ||
828 | 1295 | ||
829 | for (i = 0; i < page_count; i++) | 1296 | for (i = 0; i < page_count; i++) |
830 | if (obj_priv->page_list[i] != NULL) { | 1297 | if (obj_priv->pages[i] != NULL) { |
831 | if (obj_priv->dirty) | 1298 | if (obj_priv->dirty) |
832 | set_page_dirty(obj_priv->page_list[i]); | 1299 | set_page_dirty(obj_priv->pages[i]); |
833 | mark_page_accessed(obj_priv->page_list[i]); | 1300 | mark_page_accessed(obj_priv->pages[i]); |
834 | page_cache_release(obj_priv->page_list[i]); | 1301 | page_cache_release(obj_priv->pages[i]); |
835 | } | 1302 | } |
836 | obj_priv->dirty = 0; | 1303 | obj_priv->dirty = 0; |
837 | 1304 | ||
838 | drm_free(obj_priv->page_list, | 1305 | drm_free(obj_priv->pages, |
839 | page_count * sizeof(struct page *), | 1306 | page_count * sizeof(struct page *), |
840 | DRM_MEM_DRIVER); | 1307 | DRM_MEM_DRIVER); |
841 | obj_priv->page_list = NULL; | 1308 | obj_priv->pages = NULL; |
842 | } | 1309 | } |
843 | 1310 | ||
844 | static void | 1311 | static void |
@@ -1290,7 +1757,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
1290 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | 1757 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) |
1291 | i915_gem_clear_fence_reg(obj); | 1758 | i915_gem_clear_fence_reg(obj); |
1292 | 1759 | ||
1293 | i915_gem_object_free_page_list(obj); | 1760 | i915_gem_object_put_pages(obj); |
1294 | 1761 | ||
1295 | if (obj_priv->gtt_space) { | 1762 | if (obj_priv->gtt_space) { |
1296 | atomic_dec(&dev->gtt_count); | 1763 | atomic_dec(&dev->gtt_count); |
@@ -1409,7 +1876,7 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
1409 | } | 1876 | } |
1410 | 1877 | ||
1411 | static int | 1878 | static int |
1412 | i915_gem_object_get_page_list(struct drm_gem_object *obj) | 1879 | i915_gem_object_get_pages(struct drm_gem_object *obj) |
1413 | { | 1880 | { |
1414 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 1881 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
1415 | int page_count, i; | 1882 | int page_count, i; |
@@ -1418,18 +1885,19 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj) | |||
1418 | struct page *page; | 1885 | struct page *page; |
1419 | int ret; | 1886 | int ret; |
1420 | 1887 | ||
1421 | if (obj_priv->page_list) | 1888 | if (obj_priv->pages_refcount++ != 0) |
1422 | return 0; | 1889 | return 0; |
1423 | 1890 | ||
1424 | /* Get the list of pages out of our struct file. They'll be pinned | 1891 | /* Get the list of pages out of our struct file. They'll be pinned |
1425 | * at this point until we release them. | 1892 | * at this point until we release them. |
1426 | */ | 1893 | */ |
1427 | page_count = obj->size / PAGE_SIZE; | 1894 | page_count = obj->size / PAGE_SIZE; |
1428 | BUG_ON(obj_priv->page_list != NULL); | 1895 | BUG_ON(obj_priv->pages != NULL); |
1429 | obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *), | 1896 | obj_priv->pages = drm_calloc(page_count, sizeof(struct page *), |
1430 | DRM_MEM_DRIVER); | 1897 | DRM_MEM_DRIVER); |
1431 | if (obj_priv->page_list == NULL) { | 1898 | if (obj_priv->pages == NULL) { |
1432 | DRM_ERROR("Faled to allocate page list\n"); | 1899 | DRM_ERROR("Faled to allocate page list\n"); |
1900 | obj_priv->pages_refcount--; | ||
1433 | return -ENOMEM; | 1901 | return -ENOMEM; |
1434 | } | 1902 | } |
1435 | 1903 | ||
@@ -1440,10 +1908,10 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj) | |||
1440 | if (IS_ERR(page)) { | 1908 | if (IS_ERR(page)) { |
1441 | ret = PTR_ERR(page); | 1909 | ret = PTR_ERR(page); |
1442 | DRM_ERROR("read_mapping_page failed: %d\n", ret); | 1910 | DRM_ERROR("read_mapping_page failed: %d\n", ret); |
1443 | i915_gem_object_free_page_list(obj); | 1911 | i915_gem_object_put_pages(obj); |
1444 | return ret; | 1912 | return ret; |
1445 | } | 1913 | } |
1446 | obj_priv->page_list[i] = page; | 1914 | obj_priv->pages[i] = page; |
1447 | } | 1915 | } |
1448 | return 0; | 1916 | return 0; |
1449 | } | 1917 | } |
@@ -1766,7 +2234,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
1766 | DRM_INFO("Binding object of size %d at 0x%08x\n", | 2234 | DRM_INFO("Binding object of size %d at 0x%08x\n", |
1767 | obj->size, obj_priv->gtt_offset); | 2235 | obj->size, obj_priv->gtt_offset); |
1768 | #endif | 2236 | #endif |
1769 | ret = i915_gem_object_get_page_list(obj); | 2237 | ret = i915_gem_object_get_pages(obj); |
1770 | if (ret) { | 2238 | if (ret) { |
1771 | drm_mm_put_block(obj_priv->gtt_space); | 2239 | drm_mm_put_block(obj_priv->gtt_space); |
1772 | obj_priv->gtt_space = NULL; | 2240 | obj_priv->gtt_space = NULL; |
@@ -1778,12 +2246,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
1778 | * into the GTT. | 2246 | * into the GTT. |
1779 | */ | 2247 | */ |
1780 | obj_priv->agp_mem = drm_agp_bind_pages(dev, | 2248 | obj_priv->agp_mem = drm_agp_bind_pages(dev, |
1781 | obj_priv->page_list, | 2249 | obj_priv->pages, |
1782 | page_count, | 2250 | page_count, |
1783 | obj_priv->gtt_offset, | 2251 | obj_priv->gtt_offset, |
1784 | obj_priv->agp_type); | 2252 | obj_priv->agp_type); |
1785 | if (obj_priv->agp_mem == NULL) { | 2253 | if (obj_priv->agp_mem == NULL) { |
1786 | i915_gem_object_free_page_list(obj); | 2254 | i915_gem_object_put_pages(obj); |
1787 | drm_mm_put_block(obj_priv->gtt_space); | 2255 | drm_mm_put_block(obj_priv->gtt_space); |
1788 | obj_priv->gtt_space = NULL; | 2256 | obj_priv->gtt_space = NULL; |
1789 | return -ENOMEM; | 2257 | return -ENOMEM; |
@@ -1810,10 +2278,10 @@ i915_gem_clflush_object(struct drm_gem_object *obj) | |||
1810 | * to GPU, and we can ignore the cache flush because it'll happen | 2278 | * to GPU, and we can ignore the cache flush because it'll happen |
1811 | * again at bind time. | 2279 | * again at bind time. |
1812 | */ | 2280 | */ |
1813 | if (obj_priv->page_list == NULL) | 2281 | if (obj_priv->pages == NULL) |
1814 | return; | 2282 | return; |
1815 | 2283 | ||
1816 | drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); | 2284 | drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); |
1817 | } | 2285 | } |
1818 | 2286 | ||
1819 | /** Flushes any GPU write domain for the object if it's dirty. */ | 2287 | /** Flushes any GPU write domain for the object if it's dirty. */ |
@@ -1913,7 +2381,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
1913 | static int | 2381 | static int |
1914 | i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | 2382 | i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) |
1915 | { | 2383 | { |
1916 | struct drm_device *dev = obj->dev; | ||
1917 | int ret; | 2384 | int ret; |
1918 | 2385 | ||
1919 | i915_gem_object_flush_gpu_write_domain(obj); | 2386 | i915_gem_object_flush_gpu_write_domain(obj); |
@@ -1932,7 +2399,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
1932 | /* Flush the CPU cache if it's still invalid. */ | 2399 | /* Flush the CPU cache if it's still invalid. */ |
1933 | if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { | 2400 | if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { |
1934 | i915_gem_clflush_object(obj); | 2401 | i915_gem_clflush_object(obj); |
1935 | drm_agp_chipset_flush(dev); | ||
1936 | 2402 | ||
1937 | obj->read_domains |= I915_GEM_DOMAIN_CPU; | 2403 | obj->read_domains |= I915_GEM_DOMAIN_CPU; |
1938 | } | 2404 | } |
@@ -2144,7 +2610,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) | |||
2144 | static void | 2610 | static void |
2145 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) | 2611 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) |
2146 | { | 2612 | { |
2147 | struct drm_device *dev = obj->dev; | ||
2148 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2613 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2149 | 2614 | ||
2150 | if (!obj_priv->page_cpu_valid) | 2615 | if (!obj_priv->page_cpu_valid) |
@@ -2158,9 +2623,8 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) | |||
2158 | for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { | 2623 | for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { |
2159 | if (obj_priv->page_cpu_valid[i]) | 2624 | if (obj_priv->page_cpu_valid[i]) |
2160 | continue; | 2625 | continue; |
2161 | drm_clflush_pages(obj_priv->page_list + i, 1); | 2626 | drm_clflush_pages(obj_priv->pages + i, 1); |
2162 | } | 2627 | } |
2163 | drm_agp_chipset_flush(dev); | ||
2164 | } | 2628 | } |
2165 | 2629 | ||
2166 | /* Free the page_cpu_valid mappings which are now stale, whether | 2630 | /* Free the page_cpu_valid mappings which are now stale, whether |
@@ -2224,7 +2688,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
2224 | if (obj_priv->page_cpu_valid[i]) | 2688 | if (obj_priv->page_cpu_valid[i]) |
2225 | continue; | 2689 | continue; |
2226 | 2690 | ||
2227 | drm_clflush_pages(obj_priv->page_list + i, 1); | 2691 | drm_clflush_pages(obj_priv->pages + i, 1); |
2228 | 2692 | ||
2229 | obj_priv->page_cpu_valid[i] = 1; | 2693 | obj_priv->page_cpu_valid[i] = 1; |
2230 | } | 2694 | } |
@@ -2245,12 +2709,11 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
2245 | static int | 2709 | static int |
2246 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | 2710 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, |
2247 | struct drm_file *file_priv, | 2711 | struct drm_file *file_priv, |
2248 | struct drm_i915_gem_exec_object *entry) | 2712 | struct drm_i915_gem_exec_object *entry, |
2713 | struct drm_i915_gem_relocation_entry *relocs) | ||
2249 | { | 2714 | { |
2250 | struct drm_device *dev = obj->dev; | 2715 | struct drm_device *dev = obj->dev; |
2251 | drm_i915_private_t *dev_priv = dev->dev_private; | 2716 | drm_i915_private_t *dev_priv = dev->dev_private; |
2252 | struct drm_i915_gem_relocation_entry reloc; | ||
2253 | struct drm_i915_gem_relocation_entry __user *relocs; | ||
2254 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2717 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2255 | int i, ret; | 2718 | int i, ret; |
2256 | void __iomem *reloc_page; | 2719 | void __iomem *reloc_page; |
@@ -2262,25 +2725,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
2262 | 2725 | ||
2263 | entry->offset = obj_priv->gtt_offset; | 2726 | entry->offset = obj_priv->gtt_offset; |
2264 | 2727 | ||
2265 | relocs = (struct drm_i915_gem_relocation_entry __user *) | ||
2266 | (uintptr_t) entry->relocs_ptr; | ||
2267 | /* Apply the relocations, using the GTT aperture to avoid cache | 2728 | /* Apply the relocations, using the GTT aperture to avoid cache |
2268 | * flushing requirements. | 2729 | * flushing requirements. |
2269 | */ | 2730 | */ |
2270 | for (i = 0; i < entry->relocation_count; i++) { | 2731 | for (i = 0; i < entry->relocation_count; i++) { |
2732 | struct drm_i915_gem_relocation_entry *reloc= &relocs[i]; | ||
2271 | struct drm_gem_object *target_obj; | 2733 | struct drm_gem_object *target_obj; |
2272 | struct drm_i915_gem_object *target_obj_priv; | 2734 | struct drm_i915_gem_object *target_obj_priv; |
2273 | uint32_t reloc_val, reloc_offset; | 2735 | uint32_t reloc_val, reloc_offset; |
2274 | uint32_t __iomem *reloc_entry; | 2736 | uint32_t __iomem *reloc_entry; |
2275 | 2737 | ||
2276 | ret = copy_from_user(&reloc, relocs + i, sizeof(reloc)); | ||
2277 | if (ret != 0) { | ||
2278 | i915_gem_object_unpin(obj); | ||
2279 | return ret; | ||
2280 | } | ||
2281 | |||
2282 | target_obj = drm_gem_object_lookup(obj->dev, file_priv, | 2738 | target_obj = drm_gem_object_lookup(obj->dev, file_priv, |
2283 | reloc.target_handle); | 2739 | reloc->target_handle); |
2284 | if (target_obj == NULL) { | 2740 | if (target_obj == NULL) { |
2285 | i915_gem_object_unpin(obj); | 2741 | i915_gem_object_unpin(obj); |
2286 | return -EBADF; | 2742 | return -EBADF; |
@@ -2292,53 +2748,53 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
2292 | */ | 2748 | */ |
2293 | if (target_obj_priv->gtt_space == NULL) { | 2749 | if (target_obj_priv->gtt_space == NULL) { |
2294 | DRM_ERROR("No GTT space found for object %d\n", | 2750 | DRM_ERROR("No GTT space found for object %d\n", |
2295 | reloc.target_handle); | 2751 | reloc->target_handle); |
2296 | drm_gem_object_unreference(target_obj); | 2752 | drm_gem_object_unreference(target_obj); |
2297 | i915_gem_object_unpin(obj); | 2753 | i915_gem_object_unpin(obj); |
2298 | return -EINVAL; | 2754 | return -EINVAL; |
2299 | } | 2755 | } |
2300 | 2756 | ||
2301 | if (reloc.offset > obj->size - 4) { | 2757 | if (reloc->offset > obj->size - 4) { |
2302 | DRM_ERROR("Relocation beyond object bounds: " | 2758 | DRM_ERROR("Relocation beyond object bounds: " |
2303 | "obj %p target %d offset %d size %d.\n", | 2759 | "obj %p target %d offset %d size %d.\n", |
2304 | obj, reloc.target_handle, | 2760 | obj, reloc->target_handle, |
2305 | (int) reloc.offset, (int) obj->size); | 2761 | (int) reloc->offset, (int) obj->size); |
2306 | drm_gem_object_unreference(target_obj); | 2762 | drm_gem_object_unreference(target_obj); |
2307 | i915_gem_object_unpin(obj); | 2763 | i915_gem_object_unpin(obj); |
2308 | return -EINVAL; | 2764 | return -EINVAL; |
2309 | } | 2765 | } |
2310 | if (reloc.offset & 3) { | 2766 | if (reloc->offset & 3) { |
2311 | DRM_ERROR("Relocation not 4-byte aligned: " | 2767 | DRM_ERROR("Relocation not 4-byte aligned: " |
2312 | "obj %p target %d offset %d.\n", | 2768 | "obj %p target %d offset %d.\n", |
2313 | obj, reloc.target_handle, | 2769 | obj, reloc->target_handle, |
2314 | (int) reloc.offset); | 2770 | (int) reloc->offset); |
2315 | drm_gem_object_unreference(target_obj); | 2771 | drm_gem_object_unreference(target_obj); |
2316 | i915_gem_object_unpin(obj); | 2772 | i915_gem_object_unpin(obj); |
2317 | return -EINVAL; | 2773 | return -EINVAL; |
2318 | } | 2774 | } |
2319 | 2775 | ||
2320 | if (reloc.write_domain & I915_GEM_DOMAIN_CPU || | 2776 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || |
2321 | reloc.read_domains & I915_GEM_DOMAIN_CPU) { | 2777 | reloc->read_domains & I915_GEM_DOMAIN_CPU) { |
2322 | DRM_ERROR("reloc with read/write CPU domains: " | 2778 | DRM_ERROR("reloc with read/write CPU domains: " |
2323 | "obj %p target %d offset %d " | 2779 | "obj %p target %d offset %d " |
2324 | "read %08x write %08x", | 2780 | "read %08x write %08x", |
2325 | obj, reloc.target_handle, | 2781 | obj, reloc->target_handle, |
2326 | (int) reloc.offset, | 2782 | (int) reloc->offset, |
2327 | reloc.read_domains, | 2783 | reloc->read_domains, |
2328 | reloc.write_domain); | 2784 | reloc->write_domain); |
2329 | drm_gem_object_unreference(target_obj); | 2785 | drm_gem_object_unreference(target_obj); |
2330 | i915_gem_object_unpin(obj); | 2786 | i915_gem_object_unpin(obj); |
2331 | return -EINVAL; | 2787 | return -EINVAL; |
2332 | } | 2788 | } |
2333 | 2789 | ||
2334 | if (reloc.write_domain && target_obj->pending_write_domain && | 2790 | if (reloc->write_domain && target_obj->pending_write_domain && |
2335 | reloc.write_domain != target_obj->pending_write_domain) { | 2791 | reloc->write_domain != target_obj->pending_write_domain) { |
2336 | DRM_ERROR("Write domain conflict: " | 2792 | DRM_ERROR("Write domain conflict: " |
2337 | "obj %p target %d offset %d " | 2793 | "obj %p target %d offset %d " |
2338 | "new %08x old %08x\n", | 2794 | "new %08x old %08x\n", |
2339 | obj, reloc.target_handle, | 2795 | obj, reloc->target_handle, |
2340 | (int) reloc.offset, | 2796 | (int) reloc->offset, |
2341 | reloc.write_domain, | 2797 | reloc->write_domain, |
2342 | target_obj->pending_write_domain); | 2798 | target_obj->pending_write_domain); |
2343 | drm_gem_object_unreference(target_obj); | 2799 | drm_gem_object_unreference(target_obj); |
2344 | i915_gem_object_unpin(obj); | 2800 | i915_gem_object_unpin(obj); |
@@ -2351,22 +2807,22 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
2351 | "presumed %08x delta %08x\n", | 2807 | "presumed %08x delta %08x\n", |
2352 | __func__, | 2808 | __func__, |
2353 | obj, | 2809 | obj, |
2354 | (int) reloc.offset, | 2810 | (int) reloc->offset, |
2355 | (int) reloc.target_handle, | 2811 | (int) reloc->target_handle, |
2356 | (int) reloc.read_domains, | 2812 | (int) reloc->read_domains, |
2357 | (int) reloc.write_domain, | 2813 | (int) reloc->write_domain, |
2358 | (int) target_obj_priv->gtt_offset, | 2814 | (int) target_obj_priv->gtt_offset, |
2359 | (int) reloc.presumed_offset, | 2815 | (int) reloc->presumed_offset, |
2360 | reloc.delta); | 2816 | reloc->delta); |
2361 | #endif | 2817 | #endif |
2362 | 2818 | ||
2363 | target_obj->pending_read_domains |= reloc.read_domains; | 2819 | target_obj->pending_read_domains |= reloc->read_domains; |
2364 | target_obj->pending_write_domain |= reloc.write_domain; | 2820 | target_obj->pending_write_domain |= reloc->write_domain; |
2365 | 2821 | ||
2366 | /* If the relocation already has the right value in it, no | 2822 | /* If the relocation already has the right value in it, no |
2367 | * more work needs to be done. | 2823 | * more work needs to be done. |
2368 | */ | 2824 | */ |
2369 | if (target_obj_priv->gtt_offset == reloc.presumed_offset) { | 2825 | if (target_obj_priv->gtt_offset == reloc->presumed_offset) { |
2370 | drm_gem_object_unreference(target_obj); | 2826 | drm_gem_object_unreference(target_obj); |
2371 | continue; | 2827 | continue; |
2372 | } | 2828 | } |
@@ -2381,32 +2837,26 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
2381 | /* Map the page containing the relocation we're going to | 2837 | /* Map the page containing the relocation we're going to |
2382 | * perform. | 2838 | * perform. |
2383 | */ | 2839 | */ |
2384 | reloc_offset = obj_priv->gtt_offset + reloc.offset; | 2840 | reloc_offset = obj_priv->gtt_offset + reloc->offset; |
2385 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | 2841 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, |
2386 | (reloc_offset & | 2842 | (reloc_offset & |
2387 | ~(PAGE_SIZE - 1))); | 2843 | ~(PAGE_SIZE - 1))); |
2388 | reloc_entry = (uint32_t __iomem *)(reloc_page + | 2844 | reloc_entry = (uint32_t __iomem *)(reloc_page + |
2389 | (reloc_offset & (PAGE_SIZE - 1))); | 2845 | (reloc_offset & (PAGE_SIZE - 1))); |
2390 | reloc_val = target_obj_priv->gtt_offset + reloc.delta; | 2846 | reloc_val = target_obj_priv->gtt_offset + reloc->delta; |
2391 | 2847 | ||
2392 | #if WATCH_BUF | 2848 | #if WATCH_BUF |
2393 | DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", | 2849 | DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", |
2394 | obj, (unsigned int) reloc.offset, | 2850 | obj, (unsigned int) reloc->offset, |
2395 | readl(reloc_entry), reloc_val); | 2851 | readl(reloc_entry), reloc_val); |
2396 | #endif | 2852 | #endif |
2397 | writel(reloc_val, reloc_entry); | 2853 | writel(reloc_val, reloc_entry); |
2398 | io_mapping_unmap_atomic(reloc_page); | 2854 | io_mapping_unmap_atomic(reloc_page); |
2399 | 2855 | ||
2400 | /* Write the updated presumed offset for this entry back out | 2856 | /* The updated presumed offset for this entry will be |
2401 | * to the user. | 2857 | * copied back out to the user. |
2402 | */ | 2858 | */ |
2403 | reloc.presumed_offset = target_obj_priv->gtt_offset; | 2859 | reloc->presumed_offset = target_obj_priv->gtt_offset; |
2404 | ret = copy_to_user(relocs + i, &reloc, sizeof(reloc)); | ||
2405 | if (ret != 0) { | ||
2406 | drm_gem_object_unreference(target_obj); | ||
2407 | i915_gem_object_unpin(obj); | ||
2408 | return ret; | ||
2409 | } | ||
2410 | 2860 | ||
2411 | drm_gem_object_unreference(target_obj); | 2861 | drm_gem_object_unreference(target_obj); |
2412 | } | 2862 | } |
@@ -2423,11 +2873,10 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
2423 | static int | 2873 | static int |
2424 | i915_dispatch_gem_execbuffer(struct drm_device *dev, | 2874 | i915_dispatch_gem_execbuffer(struct drm_device *dev, |
2425 | struct drm_i915_gem_execbuffer *exec, | 2875 | struct drm_i915_gem_execbuffer *exec, |
2876 | struct drm_clip_rect *cliprects, | ||
2426 | uint64_t exec_offset) | 2877 | uint64_t exec_offset) |
2427 | { | 2878 | { |
2428 | drm_i915_private_t *dev_priv = dev->dev_private; | 2879 | drm_i915_private_t *dev_priv = dev->dev_private; |
2429 | struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *) | ||
2430 | (uintptr_t) exec->cliprects_ptr; | ||
2431 | int nbox = exec->num_cliprects; | 2880 | int nbox = exec->num_cliprects; |
2432 | int i = 0, count; | 2881 | int i = 0, count; |
2433 | uint32_t exec_start, exec_len; | 2882 | uint32_t exec_start, exec_len; |
@@ -2448,7 +2897,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev, | |||
2448 | 2897 | ||
2449 | for (i = 0; i < count; i++) { | 2898 | for (i = 0; i < count; i++) { |
2450 | if (i < nbox) { | 2899 | if (i < nbox) { |
2451 | int ret = i915_emit_box(dev, boxes, i, | 2900 | int ret = i915_emit_box(dev, cliprects, i, |
2452 | exec->DR1, exec->DR4); | 2901 | exec->DR1, exec->DR4); |
2453 | if (ret) | 2902 | if (ret) |
2454 | return ret; | 2903 | return ret; |
@@ -2504,6 +2953,75 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) | |||
2504 | return ret; | 2953 | return ret; |
2505 | } | 2954 | } |
2506 | 2955 | ||
2956 | static int | ||
2957 | i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | ||
2958 | uint32_t buffer_count, | ||
2959 | struct drm_i915_gem_relocation_entry **relocs) | ||
2960 | { | ||
2961 | uint32_t reloc_count = 0, reloc_index = 0, i; | ||
2962 | int ret; | ||
2963 | |||
2964 | *relocs = NULL; | ||
2965 | for (i = 0; i < buffer_count; i++) { | ||
2966 | if (reloc_count + exec_list[i].relocation_count < reloc_count) | ||
2967 | return -EINVAL; | ||
2968 | reloc_count += exec_list[i].relocation_count; | ||
2969 | } | ||
2970 | |||
2971 | *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER); | ||
2972 | if (*relocs == NULL) | ||
2973 | return -ENOMEM; | ||
2974 | |||
2975 | for (i = 0; i < buffer_count; i++) { | ||
2976 | struct drm_i915_gem_relocation_entry __user *user_relocs; | ||
2977 | |||
2978 | user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; | ||
2979 | |||
2980 | ret = copy_from_user(&(*relocs)[reloc_index], | ||
2981 | user_relocs, | ||
2982 | exec_list[i].relocation_count * | ||
2983 | sizeof(**relocs)); | ||
2984 | if (ret != 0) { | ||
2985 | drm_free(*relocs, reloc_count * sizeof(**relocs), | ||
2986 | DRM_MEM_DRIVER); | ||
2987 | *relocs = NULL; | ||
2988 | return ret; | ||
2989 | } | ||
2990 | |||
2991 | reloc_index += exec_list[i].relocation_count; | ||
2992 | } | ||
2993 | |||
2994 | return ret; | ||
2995 | } | ||
2996 | |||
2997 | static int | ||
2998 | i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, | ||
2999 | uint32_t buffer_count, | ||
3000 | struct drm_i915_gem_relocation_entry *relocs) | ||
3001 | { | ||
3002 | uint32_t reloc_count = 0, i; | ||
3003 | int ret; | ||
3004 | |||
3005 | for (i = 0; i < buffer_count; i++) { | ||
3006 | struct drm_i915_gem_relocation_entry __user *user_relocs; | ||
3007 | |||
3008 | user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; | ||
3009 | |||
3010 | if (ret == 0) { | ||
3011 | ret = copy_to_user(user_relocs, | ||
3012 | &relocs[reloc_count], | ||
3013 | exec_list[i].relocation_count * | ||
3014 | sizeof(*relocs)); | ||
3015 | } | ||
3016 | |||
3017 | reloc_count += exec_list[i].relocation_count; | ||
3018 | } | ||
3019 | |||
3020 | drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); | ||
3021 | |||
3022 | return ret; | ||
3023 | } | ||
3024 | |||
2507 | int | 3025 | int |
2508 | i915_gem_execbuffer(struct drm_device *dev, void *data, | 3026 | i915_gem_execbuffer(struct drm_device *dev, void *data, |
2509 | struct drm_file *file_priv) | 3027 | struct drm_file *file_priv) |
@@ -2515,9 +3033,11 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2515 | struct drm_gem_object **object_list = NULL; | 3033 | struct drm_gem_object **object_list = NULL; |
2516 | struct drm_gem_object *batch_obj; | 3034 | struct drm_gem_object *batch_obj; |
2517 | struct drm_i915_gem_object *obj_priv; | 3035 | struct drm_i915_gem_object *obj_priv; |
2518 | int ret, i, pinned = 0; | 3036 | struct drm_clip_rect *cliprects = NULL; |
3037 | struct drm_i915_gem_relocation_entry *relocs; | ||
3038 | int ret, ret2, i, pinned = 0; | ||
2519 | uint64_t exec_offset; | 3039 | uint64_t exec_offset; |
2520 | uint32_t seqno, flush_domains; | 3040 | uint32_t seqno, flush_domains, reloc_index; |
2521 | int pin_tries; | 3041 | int pin_tries; |
2522 | 3042 | ||
2523 | #if WATCH_EXEC | 3043 | #if WATCH_EXEC |
@@ -2551,6 +3071,28 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2551 | goto pre_mutex_err; | 3071 | goto pre_mutex_err; |
2552 | } | 3072 | } |
2553 | 3073 | ||
3074 | if (args->num_cliprects != 0) { | ||
3075 | cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects), | ||
3076 | DRM_MEM_DRIVER); | ||
3077 | if (cliprects == NULL) | ||
3078 | goto pre_mutex_err; | ||
3079 | |||
3080 | ret = copy_from_user(cliprects, | ||
3081 | (struct drm_clip_rect __user *) | ||
3082 | (uintptr_t) args->cliprects_ptr, | ||
3083 | sizeof(*cliprects) * args->num_cliprects); | ||
3084 | if (ret != 0) { | ||
3085 | DRM_ERROR("copy %d cliprects failed: %d\n", | ||
3086 | args->num_cliprects, ret); | ||
3087 | goto pre_mutex_err; | ||
3088 | } | ||
3089 | } | ||
3090 | |||
3091 | ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count, | ||
3092 | &relocs); | ||
3093 | if (ret != 0) | ||
3094 | goto pre_mutex_err; | ||
3095 | |||
2554 | mutex_lock(&dev->struct_mutex); | 3096 | mutex_lock(&dev->struct_mutex); |
2555 | 3097 | ||
2556 | i915_verify_inactive(dev, __FILE__, __LINE__); | 3098 | i915_verify_inactive(dev, __FILE__, __LINE__); |
@@ -2593,15 +3135,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2593 | /* Pin and relocate */ | 3135 | /* Pin and relocate */ |
2594 | for (pin_tries = 0; ; pin_tries++) { | 3136 | for (pin_tries = 0; ; pin_tries++) { |
2595 | ret = 0; | 3137 | ret = 0; |
3138 | reloc_index = 0; | ||
3139 | |||
2596 | for (i = 0; i < args->buffer_count; i++) { | 3140 | for (i = 0; i < args->buffer_count; i++) { |
2597 | object_list[i]->pending_read_domains = 0; | 3141 | object_list[i]->pending_read_domains = 0; |
2598 | object_list[i]->pending_write_domain = 0; | 3142 | object_list[i]->pending_write_domain = 0; |
2599 | ret = i915_gem_object_pin_and_relocate(object_list[i], | 3143 | ret = i915_gem_object_pin_and_relocate(object_list[i], |
2600 | file_priv, | 3144 | file_priv, |
2601 | &exec_list[i]); | 3145 | &exec_list[i], |
3146 | &relocs[reloc_index]); | ||
2602 | if (ret) | 3147 | if (ret) |
2603 | break; | 3148 | break; |
2604 | pinned = i + 1; | 3149 | pinned = i + 1; |
3150 | reloc_index += exec_list[i].relocation_count; | ||
2605 | } | 3151 | } |
2606 | /* success */ | 3152 | /* success */ |
2607 | if (ret == 0) | 3153 | if (ret == 0) |
@@ -2687,7 +3233,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2687 | #endif | 3233 | #endif |
2688 | 3234 | ||
2689 | /* Exec the batchbuffer */ | 3235 | /* Exec the batchbuffer */ |
2690 | ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); | 3236 | ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset); |
2691 | if (ret) { | 3237 | if (ret) { |
2692 | DRM_ERROR("dispatch failed %d\n", ret); | 3238 | DRM_ERROR("dispatch failed %d\n", ret); |
2693 | goto err; | 3239 | goto err; |
@@ -2751,11 +3297,27 @@ err: | |||
2751 | args->buffer_count, ret); | 3297 | args->buffer_count, ret); |
2752 | } | 3298 | } |
2753 | 3299 | ||
3300 | /* Copy the updated relocations out regardless of current error | ||
3301 | * state. Failure to update the relocs would mean that the next | ||
3302 | * time userland calls execbuf, it would do so with presumed offset | ||
3303 | * state that didn't match the actual object state. | ||
3304 | */ | ||
3305 | ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count, | ||
3306 | relocs); | ||
3307 | if (ret2 != 0) { | ||
3308 | DRM_ERROR("Failed to copy relocations back out: %d\n", ret2); | ||
3309 | |||
3310 | if (ret == 0) | ||
3311 | ret = ret2; | ||
3312 | } | ||
3313 | |||
2754 | pre_mutex_err: | 3314 | pre_mutex_err: |
2755 | drm_free(object_list, sizeof(*object_list) * args->buffer_count, | 3315 | drm_free(object_list, sizeof(*object_list) * args->buffer_count, |
2756 | DRM_MEM_DRIVER); | 3316 | DRM_MEM_DRIVER); |
2757 | drm_free(exec_list, sizeof(*exec_list) * args->buffer_count, | 3317 | drm_free(exec_list, sizeof(*exec_list) * args->buffer_count, |
2758 | DRM_MEM_DRIVER); | 3318 | DRM_MEM_DRIVER); |
3319 | drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects, | ||
3320 | DRM_MEM_DRIVER); | ||
2759 | 3321 | ||
2760 | return ret; | 3322 | return ret; |
2761 | } | 3323 | } |
@@ -3192,7 +3754,7 @@ i915_gem_init_hws(struct drm_device *dev) | |||
3192 | 3754 | ||
3193 | dev_priv->status_gfx_addr = obj_priv->gtt_offset; | 3755 | dev_priv->status_gfx_addr = obj_priv->gtt_offset; |
3194 | 3756 | ||
3195 | dev_priv->hw_status_page = kmap(obj_priv->page_list[0]); | 3757 | dev_priv->hw_status_page = kmap(obj_priv->pages[0]); |
3196 | if (dev_priv->hw_status_page == NULL) { | 3758 | if (dev_priv->hw_status_page == NULL) { |
3197 | DRM_ERROR("Failed to map status page.\n"); | 3759 | DRM_ERROR("Failed to map status page.\n"); |
3198 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 3760 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
@@ -3222,7 +3784,7 @@ i915_gem_cleanup_hws(struct drm_device *dev) | |||
3222 | obj = dev_priv->hws_obj; | 3784 | obj = dev_priv->hws_obj; |
3223 | obj_priv = obj->driver_private; | 3785 | obj_priv = obj->driver_private; |
3224 | 3786 | ||
3225 | kunmap(obj_priv->page_list[0]); | 3787 | kunmap(obj_priv->pages[0]); |
3226 | i915_gem_object_unpin(obj); | 3788 | i915_gem_object_unpin(obj); |
3227 | drm_gem_object_unreference(obj); | 3789 | drm_gem_object_unreference(obj); |
3228 | dev_priv->hws_obj = NULL; | 3790 | dev_priv->hws_obj = NULL; |
@@ -3525,20 +4087,20 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
3525 | if (!obj_priv->phys_obj) | 4087 | if (!obj_priv->phys_obj) |
3526 | return; | 4088 | return; |
3527 | 4089 | ||
3528 | ret = i915_gem_object_get_page_list(obj); | 4090 | ret = i915_gem_object_get_pages(obj); |
3529 | if (ret) | 4091 | if (ret) |
3530 | goto out; | 4092 | goto out; |
3531 | 4093 | ||
3532 | page_count = obj->size / PAGE_SIZE; | 4094 | page_count = obj->size / PAGE_SIZE; |
3533 | 4095 | ||
3534 | for (i = 0; i < page_count; i++) { | 4096 | for (i = 0; i < page_count; i++) { |
3535 | char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0); | 4097 | char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0); |
3536 | char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); | 4098 | char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); |
3537 | 4099 | ||
3538 | memcpy(dst, src, PAGE_SIZE); | 4100 | memcpy(dst, src, PAGE_SIZE); |
3539 | kunmap_atomic(dst, KM_USER0); | 4101 | kunmap_atomic(dst, KM_USER0); |
3540 | } | 4102 | } |
3541 | drm_clflush_pages(obj_priv->page_list, page_count); | 4103 | drm_clflush_pages(obj_priv->pages, page_count); |
3542 | drm_agp_chipset_flush(dev); | 4104 | drm_agp_chipset_flush(dev); |
3543 | out: | 4105 | out: |
3544 | obj_priv->phys_obj->cur_obj = NULL; | 4106 | obj_priv->phys_obj->cur_obj = NULL; |
@@ -3581,7 +4143,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
3581 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; | 4143 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; |
3582 | obj_priv->phys_obj->cur_obj = obj; | 4144 | obj_priv->phys_obj->cur_obj = obj; |
3583 | 4145 | ||
3584 | ret = i915_gem_object_get_page_list(obj); | 4146 | ret = i915_gem_object_get_pages(obj); |
3585 | if (ret) { | 4147 | if (ret) { |
3586 | DRM_ERROR("failed to get page list\n"); | 4148 | DRM_ERROR("failed to get page list\n"); |
3587 | goto out; | 4149 | goto out; |
@@ -3590,7 +4152,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
3590 | page_count = obj->size / PAGE_SIZE; | 4152 | page_count = obj->size / PAGE_SIZE; |
3591 | 4153 | ||
3592 | for (i = 0; i < page_count; i++) { | 4154 | for (i = 0; i < page_count; i++) { |
3593 | char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0); | 4155 | char *src = kmap_atomic(obj_priv->pages[i], KM_USER0); |
3594 | char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); | 4156 | char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); |
3595 | 4157 | ||
3596 | memcpy(dst, src, PAGE_SIZE); | 4158 | memcpy(dst, src, PAGE_SIZE); |
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c new file mode 100644 index 000000000000..455ec970b385 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c | |||
@@ -0,0 +1,257 @@ | |||
1 | /* | ||
2 | * Copyright Ā© 2008 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Eric Anholt <eric@anholt.net> | ||
25 | * Keith Packard <keithp@keithp.com> | ||
26 | * | ||
27 | */ | ||
28 | |||
29 | #include <linux/seq_file.h> | ||
30 | #include "drmP.h" | ||
31 | #include "drm.h" | ||
32 | #include "i915_drm.h" | ||
33 | #include "i915_drv.h" | ||
34 | |||
35 | #define DRM_I915_RING_DEBUG 1 | ||
36 | |||
37 | |||
38 | #if defined(CONFIG_DEBUG_FS) | ||
39 | |||
40 | #define ACTIVE_LIST 1 | ||
41 | #define FLUSHING_LIST 2 | ||
42 | #define INACTIVE_LIST 3 | ||
43 | |||
44 | static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv) | ||
45 | { | ||
46 | if (obj_priv->user_pin_count > 0) | ||
47 | return "P"; | ||
48 | else if (obj_priv->pin_count > 0) | ||
49 | return "p"; | ||
50 | else | ||
51 | return " "; | ||
52 | } | ||
53 | |||
54 | static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv) | ||
55 | { | ||
56 | switch (obj_priv->tiling_mode) { | ||
57 | default: | ||
58 | case I915_TILING_NONE: return " "; | ||
59 | case I915_TILING_X: return "X"; | ||
60 | case I915_TILING_Y: return "Y"; | ||
61 | } | ||
62 | } | ||
63 | |||
64 | static int i915_gem_object_list_info(struct seq_file *m, void *data) | ||
65 | { | ||
66 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
67 | uintptr_t list = (uintptr_t) node->info_ent->data; | ||
68 | struct list_head *head; | ||
69 | struct drm_device *dev = node->minor->dev; | ||
70 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
71 | struct drm_i915_gem_object *obj_priv; | ||
72 | |||
73 | switch (list) { | ||
74 | case ACTIVE_LIST: | ||
75 | seq_printf(m, "Active:\n"); | ||
76 | head = &dev_priv->mm.active_list; | ||
77 | break; | ||
78 | case INACTIVE_LIST: | ||
79 | seq_printf(m, "Inctive:\n"); | ||
80 | head = &dev_priv->mm.inactive_list; | ||
81 | break; | ||
82 | case FLUSHING_LIST: | ||
83 | seq_printf(m, "Flushing:\n"); | ||
84 | head = &dev_priv->mm.flushing_list; | ||
85 | break; | ||
86 | default: | ||
87 | DRM_INFO("Ooops, unexpected list\n"); | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | list_for_each_entry(obj_priv, head, list) | ||
92 | { | ||
93 | struct drm_gem_object *obj = obj_priv->obj; | ||
94 | |||
95 | seq_printf(m, " %p: %s %08x %08x %d", | ||
96 | obj, | ||
97 | get_pin_flag(obj_priv), | ||
98 | obj->read_domains, obj->write_domain, | ||
99 | obj_priv->last_rendering_seqno); | ||
100 | |||
101 | if (obj->name) | ||
102 | seq_printf(m, " (name: %d)", obj->name); | ||
103 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
104 | seq_printf(m, " (fence: %d\n", obj_priv->fence_reg); | ||
105 | seq_printf(m, "\n"); | ||
106 | } | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static int i915_gem_request_info(struct seq_file *m, void *data) | ||
111 | { | ||
112 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
113 | struct drm_device *dev = node->minor->dev; | ||
114 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
115 | struct drm_i915_gem_request *gem_request; | ||
116 | |||
117 | seq_printf(m, "Request:\n"); | ||
118 | list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) { | ||
119 | seq_printf(m, " %d @ %d\n", | ||
120 | gem_request->seqno, | ||
121 | (int) (jiffies - gem_request->emitted_jiffies)); | ||
122 | } | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | static int i915_gem_seqno_info(struct seq_file *m, void *data) | ||
127 | { | ||
128 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
129 | struct drm_device *dev = node->minor->dev; | ||
130 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
131 | |||
132 | if (dev_priv->hw_status_page != NULL) { | ||
133 | seq_printf(m, "Current sequence: %d\n", | ||
134 | i915_get_gem_seqno(dev)); | ||
135 | } else { | ||
136 | seq_printf(m, "Current sequence: hws uninitialized\n"); | ||
137 | } | ||
138 | seq_printf(m, "Waiter sequence: %d\n", | ||
139 | dev_priv->mm.waiting_gem_seqno); | ||
140 | seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | |||
145 | static int i915_interrupt_info(struct seq_file *m, void *data) | ||
146 | { | ||
147 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
148 | struct drm_device *dev = node->minor->dev; | ||
149 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
150 | |||
151 | seq_printf(m, "Interrupt enable: %08x\n", | ||
152 | I915_READ(IER)); | ||
153 | seq_printf(m, "Interrupt identity: %08x\n", | ||
154 | I915_READ(IIR)); | ||
155 | seq_printf(m, "Interrupt mask: %08x\n", | ||
156 | I915_READ(IMR)); | ||
157 | seq_printf(m, "Pipe A stat: %08x\n", | ||
158 | I915_READ(PIPEASTAT)); | ||
159 | seq_printf(m, "Pipe B stat: %08x\n", | ||
160 | I915_READ(PIPEBSTAT)); | ||
161 | seq_printf(m, "Interrupts received: %d\n", | ||
162 | atomic_read(&dev_priv->irq_received)); | ||
163 | if (dev_priv->hw_status_page != NULL) { | ||
164 | seq_printf(m, "Current sequence: %d\n", | ||
165 | i915_get_gem_seqno(dev)); | ||
166 | } else { | ||
167 | seq_printf(m, "Current sequence: hws uninitialized\n"); | ||
168 | } | ||
169 | seq_printf(m, "Waiter sequence: %d\n", | ||
170 | dev_priv->mm.waiting_gem_seqno); | ||
171 | seq_printf(m, "IRQ sequence: %d\n", | ||
172 | dev_priv->mm.irq_gem_seqno); | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static int i915_gem_fence_regs_info(struct seq_file *m, void *data) | ||
177 | { | ||
178 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
179 | struct drm_device *dev = node->minor->dev; | ||
180 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
181 | int i; | ||
182 | |||
183 | seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); | ||
184 | seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); | ||
185 | for (i = 0; i < dev_priv->num_fence_regs; i++) { | ||
186 | struct drm_gem_object *obj = dev_priv->fence_regs[i].obj; | ||
187 | |||
188 | if (obj == NULL) { | ||
189 | seq_printf(m, "Fenced object[%2d] = unused\n", i); | ||
190 | } else { | ||
191 | struct drm_i915_gem_object *obj_priv; | ||
192 | |||
193 | obj_priv = obj->driver_private; | ||
194 | seq_printf(m, "Fenced object[%2d] = %p: %s " | ||
195 | "%08x %08zx %08x %s %08x %08x %d", | ||
196 | i, obj, get_pin_flag(obj_priv), | ||
197 | obj_priv->gtt_offset, | ||
198 | obj->size, obj_priv->stride, | ||
199 | get_tiling_flag(obj_priv), | ||
200 | obj->read_domains, obj->write_domain, | ||
201 | obj_priv->last_rendering_seqno); | ||
202 | if (obj->name) | ||
203 | seq_printf(m, " (name: %d)", obj->name); | ||
204 | seq_printf(m, "\n"); | ||
205 | } | ||
206 | } | ||
207 | |||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | static int i915_hws_info(struct seq_file *m, void *data) | ||
212 | { | ||
213 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
214 | struct drm_device *dev = node->minor->dev; | ||
215 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
216 | int i; | ||
217 | volatile u32 *hws; | ||
218 | |||
219 | hws = (volatile u32 *)dev_priv->hw_status_page; | ||
220 | if (hws == NULL) | ||
221 | return 0; | ||
222 | |||
223 | for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { | ||
224 | seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", | ||
225 | i * 4, | ||
226 | hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); | ||
227 | } | ||
228 | return 0; | ||
229 | } | ||
230 | |||
231 | static struct drm_info_list i915_gem_debugfs_list[] = { | ||
232 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, | ||
233 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, | ||
234 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, | ||
235 | {"i915_gem_request", i915_gem_request_info, 0}, | ||
236 | {"i915_gem_seqno", i915_gem_seqno_info, 0}, | ||
237 | {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, | ||
238 | {"i915_gem_interrupt", i915_interrupt_info, 0}, | ||
239 | {"i915_gem_hws", i915_hws_info, 0}, | ||
240 | }; | ||
241 | #define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) | ||
242 | |||
243 | int i915_gem_debugfs_init(struct drm_minor *minor) | ||
244 | { | ||
245 | return drm_debugfs_create_files(i915_gem_debugfs_list, | ||
246 | I915_GEM_DEBUGFS_ENTRIES, | ||
247 | minor->debugfs_root, minor); | ||
248 | } | ||
249 | |||
250 | void i915_gem_debugfs_cleanup(struct drm_minor *minor) | ||
251 | { | ||
252 | drm_debugfs_remove_files(i915_gem_debugfs_list, | ||
253 | I915_GEM_DEBUGFS_ENTRIES, minor); | ||
254 | } | ||
255 | |||
256 | #endif /* CONFIG_DEBUG_FS */ | ||
257 | |||
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c deleted file mode 100644 index 4d1b9de0cd8b..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_proc.c +++ /dev/null | |||
@@ -1,334 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright Ā© 2008 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * Eric Anholt <eric@anholt.net> | ||
25 | * Keith Packard <keithp@keithp.com> | ||
26 | * | ||
27 | */ | ||
28 | |||
29 | #include "drmP.h" | ||
30 | #include "drm.h" | ||
31 | #include "i915_drm.h" | ||
32 | #include "i915_drv.h" | ||
33 | |||
34 | static int i915_gem_active_info(char *buf, char **start, off_t offset, | ||
35 | int request, int *eof, void *data) | ||
36 | { | ||
37 | struct drm_minor *minor = (struct drm_minor *) data; | ||
38 | struct drm_device *dev = minor->dev; | ||
39 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
40 | struct drm_i915_gem_object *obj_priv; | ||
41 | int len = 0; | ||
42 | |||
43 | if (offset > DRM_PROC_LIMIT) { | ||
44 | *eof = 1; | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | *start = &buf[offset]; | ||
49 | *eof = 0; | ||
50 | DRM_PROC_PRINT("Active:\n"); | ||
51 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, | ||
52 | list) | ||
53 | { | ||
54 | struct drm_gem_object *obj = obj_priv->obj; | ||
55 | if (obj->name) { | ||
56 | DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", | ||
57 | obj, obj->name, | ||
58 | obj->read_domains, obj->write_domain, | ||
59 | obj_priv->last_rendering_seqno); | ||
60 | } else { | ||
61 | DRM_PROC_PRINT(" %p: %08x %08x %d\n", | ||
62 | obj, | ||
63 | obj->read_domains, obj->write_domain, | ||
64 | obj_priv->last_rendering_seqno); | ||
65 | } | ||
66 | } | ||
67 | if (len > request + offset) | ||
68 | return request; | ||
69 | *eof = 1; | ||
70 | return len - offset; | ||
71 | } | ||
72 | |||
73 | static int i915_gem_flushing_info(char *buf, char **start, off_t offset, | ||
74 | int request, int *eof, void *data) | ||
75 | { | ||
76 | struct drm_minor *minor = (struct drm_minor *) data; | ||
77 | struct drm_device *dev = minor->dev; | ||
78 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
79 | struct drm_i915_gem_object *obj_priv; | ||
80 | int len = 0; | ||
81 | |||
82 | if (offset > DRM_PROC_LIMIT) { | ||
83 | *eof = 1; | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | *start = &buf[offset]; | ||
88 | *eof = 0; | ||
89 | DRM_PROC_PRINT("Flushing:\n"); | ||
90 | list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, | ||
91 | list) | ||
92 | { | ||
93 | struct drm_gem_object *obj = obj_priv->obj; | ||
94 | if (obj->name) { | ||
95 | DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", | ||
96 | obj, obj->name, | ||
97 | obj->read_domains, obj->write_domain, | ||
98 | obj_priv->last_rendering_seqno); | ||
99 | } else { | ||
100 | DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, | ||
101 | obj->read_domains, obj->write_domain, | ||
102 | obj_priv->last_rendering_seqno); | ||
103 | } | ||
104 | } | ||
105 | if (len > request + offset) | ||
106 | return request; | ||
107 | *eof = 1; | ||
108 | return len - offset; | ||
109 | } | ||
110 | |||
111 | static int i915_gem_inactive_info(char *buf, char **start, off_t offset, | ||
112 | int request, int *eof, void *data) | ||
113 | { | ||
114 | struct drm_minor *minor = (struct drm_minor *) data; | ||
115 | struct drm_device *dev = minor->dev; | ||
116 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
117 | struct drm_i915_gem_object *obj_priv; | ||
118 | int len = 0; | ||
119 | |||
120 | if (offset > DRM_PROC_LIMIT) { | ||
121 | *eof = 1; | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | *start = &buf[offset]; | ||
126 | *eof = 0; | ||
127 | DRM_PROC_PRINT("Inactive:\n"); | ||
128 | list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, | ||
129 | list) | ||
130 | { | ||
131 | struct drm_gem_object *obj = obj_priv->obj; | ||
132 | if (obj->name) { | ||
133 | DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", | ||
134 | obj, obj->name, | ||
135 | obj->read_domains, obj->write_domain, | ||
136 | obj_priv->last_rendering_seqno); | ||
137 | } else { | ||
138 | DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, | ||
139 | obj->read_domains, obj->write_domain, | ||
140 | obj_priv->last_rendering_seqno); | ||
141 | } | ||
142 | } | ||
143 | if (len > request + offset) | ||
144 | return request; | ||
145 | *eof = 1; | ||
146 | return len - offset; | ||
147 | } | ||
148 | |||
149 | static int i915_gem_request_info(char *buf, char **start, off_t offset, | ||
150 | int request, int *eof, void *data) | ||
151 | { | ||
152 | struct drm_minor *minor = (struct drm_minor *) data; | ||
153 | struct drm_device *dev = minor->dev; | ||
154 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
155 | struct drm_i915_gem_request *gem_request; | ||
156 | int len = 0; | ||
157 | |||
158 | if (offset > DRM_PROC_LIMIT) { | ||
159 | *eof = 1; | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | *start = &buf[offset]; | ||
164 | *eof = 0; | ||
165 | DRM_PROC_PRINT("Request:\n"); | ||
166 | list_for_each_entry(gem_request, &dev_priv->mm.request_list, | ||
167 | list) | ||
168 | { | ||
169 | DRM_PROC_PRINT(" %d @ %d\n", | ||
170 | gem_request->seqno, | ||
171 | (int) (jiffies - gem_request->emitted_jiffies)); | ||
172 | } | ||
173 | if (len > request + offset) | ||
174 | return request; | ||
175 | *eof = 1; | ||
176 | return len - offset; | ||
177 | } | ||
178 | |||
179 | static int i915_gem_seqno_info(char *buf, char **start, off_t offset, | ||
180 | int request, int *eof, void *data) | ||
181 | { | ||
182 | struct drm_minor *minor = (struct drm_minor *) data; | ||
183 | struct drm_device *dev = minor->dev; | ||
184 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
185 | int len = 0; | ||
186 | |||
187 | if (offset > DRM_PROC_LIMIT) { | ||
188 | *eof = 1; | ||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | *start = &buf[offset]; | ||
193 | *eof = 0; | ||
194 | if (dev_priv->hw_status_page != NULL) { | ||
195 | DRM_PROC_PRINT("Current sequence: %d\n", | ||
196 | i915_get_gem_seqno(dev)); | ||
197 | } else { | ||
198 | DRM_PROC_PRINT("Current sequence: hws uninitialized\n"); | ||
199 | } | ||
200 | DRM_PROC_PRINT("Waiter sequence: %d\n", | ||
201 | dev_priv->mm.waiting_gem_seqno); | ||
202 | DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); | ||
203 | if (len > request + offset) | ||
204 | return request; | ||
205 | *eof = 1; | ||
206 | return len - offset; | ||
207 | } | ||
208 | |||
209 | |||
210 | static int i915_interrupt_info(char *buf, char **start, off_t offset, | ||
211 | int request, int *eof, void *data) | ||
212 | { | ||
213 | struct drm_minor *minor = (struct drm_minor *) data; | ||
214 | struct drm_device *dev = minor->dev; | ||
215 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
216 | int len = 0; | ||
217 | |||
218 | if (offset > DRM_PROC_LIMIT) { | ||
219 | *eof = 1; | ||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | *start = &buf[offset]; | ||
224 | *eof = 0; | ||
225 | DRM_PROC_PRINT("Interrupt enable: %08x\n", | ||
226 | I915_READ(IER)); | ||
227 | DRM_PROC_PRINT("Interrupt identity: %08x\n", | ||
228 | I915_READ(IIR)); | ||
229 | DRM_PROC_PRINT("Interrupt mask: %08x\n", | ||
230 | I915_READ(IMR)); | ||
231 | DRM_PROC_PRINT("Pipe A stat: %08x\n", | ||
232 | I915_READ(PIPEASTAT)); | ||
233 | DRM_PROC_PRINT("Pipe B stat: %08x\n", | ||
234 | I915_READ(PIPEBSTAT)); | ||
235 | DRM_PROC_PRINT("Interrupts received: %d\n", | ||
236 | atomic_read(&dev_priv->irq_received)); | ||
237 | if (dev_priv->hw_status_page != NULL) { | ||
238 | DRM_PROC_PRINT("Current sequence: %d\n", | ||
239 | i915_get_gem_seqno(dev)); | ||
240 | } else { | ||
241 | DRM_PROC_PRINT("Current sequence: hws uninitialized\n"); | ||
242 | } | ||
243 | DRM_PROC_PRINT("Waiter sequence: %d\n", | ||
244 | dev_priv->mm.waiting_gem_seqno); | ||
245 | DRM_PROC_PRINT("IRQ sequence: %d\n", | ||
246 | dev_priv->mm.irq_gem_seqno); | ||
247 | if (len > request + offset) | ||
248 | return request; | ||
249 | *eof = 1; | ||
250 | return len - offset; | ||
251 | } | ||
252 | |||
253 | static int i915_hws_info(char *buf, char **start, off_t offset, | ||
254 | int request, int *eof, void *data) | ||
255 | { | ||
256 | struct drm_minor *minor = (struct drm_minor *) data; | ||
257 | struct drm_device *dev = minor->dev; | ||
258 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
259 | int len = 0, i; | ||
260 | volatile u32 *hws; | ||
261 | |||
262 | if (offset > DRM_PROC_LIMIT) { | ||
263 | *eof = 1; | ||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | hws = (volatile u32 *)dev_priv->hw_status_page; | ||
268 | if (hws == NULL) { | ||
269 | *eof = 1; | ||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | *start = &buf[offset]; | ||
274 | *eof = 0; | ||
275 | for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { | ||
276 | DRM_PROC_PRINT("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", | ||
277 | i * 4, | ||
278 | hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); | ||
279 | } | ||
280 | if (len > request + offset) | ||
281 | return request; | ||
282 | *eof = 1; | ||
283 | return len - offset; | ||
284 | } | ||
285 | |||
286 | static struct drm_proc_list { | ||
287 | /** file name */ | ||
288 | const char *name; | ||
289 | /** proc callback*/ | ||
290 | int (*f) (char *, char **, off_t, int, int *, void *); | ||
291 | } i915_gem_proc_list[] = { | ||
292 | {"i915_gem_active", i915_gem_active_info}, | ||
293 | {"i915_gem_flushing", i915_gem_flushing_info}, | ||
294 | {"i915_gem_inactive", i915_gem_inactive_info}, | ||
295 | {"i915_gem_request", i915_gem_request_info}, | ||
296 | {"i915_gem_seqno", i915_gem_seqno_info}, | ||
297 | {"i915_gem_interrupt", i915_interrupt_info}, | ||
298 | {"i915_gem_hws", i915_hws_info}, | ||
299 | }; | ||
300 | |||
301 | #define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list) | ||
302 | |||
303 | int i915_gem_proc_init(struct drm_minor *minor) | ||
304 | { | ||
305 | struct proc_dir_entry *ent; | ||
306 | int i, j; | ||
307 | |||
308 | for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) { | ||
309 | ent = create_proc_entry(i915_gem_proc_list[i].name, | ||
310 | S_IFREG | S_IRUGO, minor->dev_root); | ||
311 | if (!ent) { | ||
312 | DRM_ERROR("Cannot create /proc/dri/.../%s\n", | ||
313 | i915_gem_proc_list[i].name); | ||
314 | for (j = 0; j < i; j++) | ||
315 | remove_proc_entry(i915_gem_proc_list[i].name, | ||
316 | minor->dev_root); | ||
317 | return -1; | ||
318 | } | ||
319 | ent->read_proc = i915_gem_proc_list[i].f; | ||
320 | ent->data = minor; | ||
321 | } | ||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | void i915_gem_proc_cleanup(struct drm_minor *minor) | ||
326 | { | ||
327 | int i; | ||
328 | |||
329 | if (!minor->dev_root) | ||
330 | return; | ||
331 | |||
332 | for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) | ||
333 | remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root); | ||
334 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 7fb4191ef934..4cce1aef438e 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -96,16 +96,16 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
96 | */ | 96 | */ |
97 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | 97 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; |
98 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | 98 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; |
99 | } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) || | 99 | } else if (IS_MOBILE(dev)) { |
100 | IS_GM45(dev)) { | ||
101 | uint32_t dcc; | 100 | uint32_t dcc; |
102 | 101 | ||
103 | /* On 915-945 and GM965, channel interleave by the CPU is | 102 | /* On mobile 9xx chipsets, channel interleave by the CPU is |
104 | * determined by DCC. The CPU will alternate based on bit 6 | 103 | * determined by DCC. For single-channel, neither the CPU |
105 | * in interleaved mode, and the GPU will then also alternate | 104 | * nor the GPU do swizzling. For dual channel interleaved, |
106 | * on bit 6, 9, and 10 for X, but the CPU may also optionally | 105 | * the GPU's interleave is bit 9 and 10 for X tiled, and bit |
107 | * alternate based on bit 17 (XOR not disabled and XOR | 106 | * 9 for Y tiled. The CPU's interleave is independent, and |
108 | * bit == 17). | 107 | * can be based on either bit 11 (haven't seen this yet) or |
108 | * bit 17 (common). | ||
109 | */ | 109 | */ |
110 | dcc = I915_READ(DCC); | 110 | dcc = I915_READ(DCC); |
111 | switch (dcc & DCC_ADDRESSING_MODE_MASK) { | 111 | switch (dcc & DCC_ADDRESSING_MODE_MASK) { |
@@ -115,19 +115,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
115 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | 115 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; |
116 | break; | 116 | break; |
117 | case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: | 117 | case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: |
118 | if (IS_I915G(dev) || IS_I915GM(dev) || | 118 | if (dcc & DCC_CHANNEL_XOR_DISABLE) { |
119 | dcc & DCC_CHANNEL_XOR_DISABLE) { | 119 | /* This is the base swizzling by the GPU for |
120 | * tiled buffers. | ||
121 | */ | ||
120 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | 122 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
121 | swizzle_y = I915_BIT_6_SWIZZLE_9; | 123 | swizzle_y = I915_BIT_6_SWIZZLE_9; |
122 | } else if ((IS_I965GM(dev) || IS_GM45(dev)) && | 124 | } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { |
123 | (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { | 125 | /* Bit 11 swizzling by the CPU in addition. */ |
124 | /* GM965/GM45 does either bit 11 or bit 17 | ||
125 | * swizzling. | ||
126 | */ | ||
127 | swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; | 126 | swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; |
128 | swizzle_y = I915_BIT_6_SWIZZLE_9_11; | 127 | swizzle_y = I915_BIT_6_SWIZZLE_9_11; |
129 | } else { | 128 | } else { |
130 | /* Bit 17 or perhaps other swizzling */ | 129 | /* Bit 17 swizzling by the CPU in addition. */ |
131 | swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | 130 | swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; |
132 | swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | 131 | swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; |
133 | } | 132 | } |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 90600d899413..377cc588f5e9 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -359,6 +359,7 @@ | |||
359 | #define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ | 359 | #define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ |
360 | #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ | 360 | #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ |
361 | #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ | 361 | #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ |
362 | #define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */ | ||
362 | 363 | ||
363 | #define I915_FIFO_UNDERRUN_STATUS (1UL<<31) | 364 | #define I915_FIFO_UNDERRUN_STATUS (1UL<<31) |
364 | #define I915_CRC_ERROR_ENABLE (1UL<<29) | 365 | #define I915_CRC_ERROR_ENABLE (1UL<<29) |
@@ -435,6 +436,7 @@ | |||
435 | */ | 436 | */ |
436 | #define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 | 437 | #define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 |
437 | #define DPLL_FPA01_P1_POST_DIV_SHIFT 16 | 438 | #define DPLL_FPA01_P1_POST_DIV_SHIFT 16 |
439 | #define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15 | ||
438 | /* i830, required in DVO non-gang */ | 440 | /* i830, required in DVO non-gang */ |
439 | #define PLL_P2_DIVIDE_BY_4 (1 << 23) | 441 | #define PLL_P2_DIVIDE_BY_4 (1 << 23) |
440 | #define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ | 442 | #define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ |
@@ -501,10 +503,12 @@ | |||
501 | #define FPB0 0x06048 | 503 | #define FPB0 0x06048 |
502 | #define FPB1 0x0604c | 504 | #define FPB1 0x0604c |
503 | #define FP_N_DIV_MASK 0x003f0000 | 505 | #define FP_N_DIV_MASK 0x003f0000 |
506 | #define FP_N_IGD_DIV_MASK 0x00ff0000 | ||
504 | #define FP_N_DIV_SHIFT 16 | 507 | #define FP_N_DIV_SHIFT 16 |
505 | #define FP_M1_DIV_MASK 0x00003f00 | 508 | #define FP_M1_DIV_MASK 0x00003f00 |
506 | #define FP_M1_DIV_SHIFT 8 | 509 | #define FP_M1_DIV_SHIFT 8 |
507 | #define FP_M2_DIV_MASK 0x0000003f | 510 | #define FP_M2_DIV_MASK 0x0000003f |
511 | #define FP_M2_IGD_DIV_MASK 0x000000ff | ||
508 | #define FP_M2_DIV_SHIFT 0 | 512 | #define FP_M2_DIV_SHIFT 0 |
509 | #define DPLL_TEST 0x606c | 513 | #define DPLL_TEST 0x606c |
510 | #define DPLLB_TEST_SDVO_DIV_1 (0 << 22) | 514 | #define DPLLB_TEST_SDVO_DIV_1 (0 << 22) |
@@ -629,6 +633,22 @@ | |||
629 | #define TV_HOTPLUG_INT_EN (1 << 18) | 633 | #define TV_HOTPLUG_INT_EN (1 << 18) |
630 | #define CRT_HOTPLUG_INT_EN (1 << 9) | 634 | #define CRT_HOTPLUG_INT_EN (1 << 9) |
631 | #define CRT_HOTPLUG_FORCE_DETECT (1 << 3) | 635 | #define CRT_HOTPLUG_FORCE_DETECT (1 << 3) |
636 | #define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8) | ||
637 | /* must use period 64 on GM45 according to docs */ | ||
638 | #define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8) | ||
639 | #define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7) | ||
640 | #define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7) | ||
641 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5) | ||
642 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5) | ||
643 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5) | ||
644 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5) | ||
645 | #define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5) | ||
646 | #define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4) | ||
647 | #define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) | ||
648 | #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) | ||
649 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) | ||
650 | #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ | ||
651 | |||
632 | 652 | ||
633 | #define PORT_HOTPLUG_STAT 0x61114 | 653 | #define PORT_HOTPLUG_STAT 0x61114 |
634 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) | 654 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) |
@@ -856,7 +876,7 @@ | |||
856 | */ | 876 | */ |
857 | # define TV_ENC_C0_FIX (1 << 10) | 877 | # define TV_ENC_C0_FIX (1 << 10) |
858 | /** Bits that must be preserved by software */ | 878 | /** Bits that must be preserved by software */ |
859 | # define TV_CTL_SAVE ((3 << 8) | (3 << 6)) | 879 | # define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf) |
860 | # define TV_FUSE_STATE_MASK (3 << 4) | 880 | # define TV_FUSE_STATE_MASK (3 << 4) |
861 | /** Read-only state that reports all features enabled */ | 881 | /** Read-only state that reports all features enabled */ |
862 | # define TV_FUSE_STATE_ENABLED (0 << 4) | 882 | # define TV_FUSE_STATE_ENABLED (0 << 4) |
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 5ea715ace3a0..de621aad85b5 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h | |||
@@ -162,13 +162,13 @@ struct bdb_lvds_options { | |||
162 | u8 panel_type; | 162 | u8 panel_type; |
163 | u8 rsvd1; | 163 | u8 rsvd1; |
164 | /* LVDS capabilities, stored in a dword */ | 164 | /* LVDS capabilities, stored in a dword */ |
165 | u8 rsvd2:1; | ||
166 | u8 lvds_edid:1; | ||
167 | u8 pixel_dither:1; | ||
168 | u8 pfit_ratio_auto:1; | ||
169 | u8 pfit_gfx_mode_enhanced:1; | ||
170 | u8 pfit_text_mode_enhanced:1; | ||
171 | u8 pfit_mode:2; | 165 | u8 pfit_mode:2; |
166 | u8 pfit_text_mode_enhanced:1; | ||
167 | u8 pfit_gfx_mode_enhanced:1; | ||
168 | u8 pfit_ratio_auto:1; | ||
169 | u8 pixel_dither:1; | ||
170 | u8 lvds_edid:1; | ||
171 | u8 rsvd2:1; | ||
172 | u8 rsvd4; | 172 | u8 rsvd4; |
173 | } __attribute__((packed)); | 173 | } __attribute__((packed)); |
174 | 174 | ||
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index dcaed3466e83..2b6d44381c31 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -64,11 +64,21 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode) | |||
64 | static int intel_crt_mode_valid(struct drm_connector *connector, | 64 | static int intel_crt_mode_valid(struct drm_connector *connector, |
65 | struct drm_display_mode *mode) | 65 | struct drm_display_mode *mode) |
66 | { | 66 | { |
67 | struct drm_device *dev = connector->dev; | ||
68 | |||
69 | int max_clock = 0; | ||
67 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 70 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
68 | return MODE_NO_DBLESCAN; | 71 | return MODE_NO_DBLESCAN; |
69 | 72 | ||
70 | if (mode->clock > 400000 || mode->clock < 25000) | 73 | if (mode->clock < 25000) |
71 | return MODE_CLOCK_RANGE; | 74 | return MODE_CLOCK_LOW; |
75 | |||
76 | if (!IS_I9XX(dev)) | ||
77 | max_clock = 350000; | ||
78 | else | ||
79 | max_clock = 400000; | ||
80 | if (mode->clock > max_clock) | ||
81 | return MODE_CLOCK_HIGH; | ||
72 | 82 | ||
73 | return MODE_OK; | 83 | return MODE_OK; |
74 | } | 84 | } |
@@ -113,10 +123,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
113 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 123 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
114 | adpa |= ADPA_VSYNC_ACTIVE_HIGH; | 124 | adpa |= ADPA_VSYNC_ACTIVE_HIGH; |
115 | 125 | ||
116 | if (intel_crtc->pipe == 0) | 126 | if (intel_crtc->pipe == 0) { |
117 | adpa |= ADPA_PIPE_A_SELECT; | 127 | adpa |= ADPA_PIPE_A_SELECT; |
118 | else | 128 | I915_WRITE(BCLRPAT_A, 0); |
129 | } else { | ||
119 | adpa |= ADPA_PIPE_B_SELECT; | 130 | adpa |= ADPA_PIPE_B_SELECT; |
131 | I915_WRITE(BCLRPAT_B, 0); | ||
132 | } | ||
120 | 133 | ||
121 | I915_WRITE(ADPA, adpa); | 134 | I915_WRITE(ADPA, adpa); |
122 | } | 135 | } |
@@ -133,20 +146,39 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | |||
133 | { | 146 | { |
134 | struct drm_device *dev = connector->dev; | 147 | struct drm_device *dev = connector->dev; |
135 | struct drm_i915_private *dev_priv = dev->dev_private; | 148 | struct drm_i915_private *dev_priv = dev->dev_private; |
136 | u32 temp; | 149 | u32 hotplug_en; |
137 | 150 | int i, tries = 0; | |
138 | unsigned long timeout = jiffies + msecs_to_jiffies(1000); | 151 | /* |
139 | 152 | * On 4 series desktop, CRT detect sequence need to be done twice | |
140 | temp = I915_READ(PORT_HOTPLUG_EN); | 153 | * to get a reliable result. |
141 | 154 | */ | |
142 | I915_WRITE(PORT_HOTPLUG_EN, | ||
143 | temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5)); | ||
144 | 155 | ||
145 | do { | 156 | if (IS_G4X(dev) && !IS_GM45(dev)) |
146 | if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT)) | 157 | tries = 2; |
147 | break; | 158 | else |
148 | msleep(1); | 159 | tries = 1; |
149 | } while (time_after(timeout, jiffies)); | 160 | hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
161 | hotplug_en &= ~(CRT_HOTPLUG_MASK); | ||
162 | hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; | ||
163 | |||
164 | if (IS_GM45(dev)) | ||
165 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | ||
166 | |||
167 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; | ||
168 | |||
169 | for (i = 0; i < tries ; i++) { | ||
170 | unsigned long timeout; | ||
171 | /* turn on the FORCE_DETECT */ | ||
172 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | ||
173 | timeout = jiffies + msecs_to_jiffies(1000); | ||
174 | /* wait for FORCE_DETECT to go off */ | ||
175 | do { | ||
176 | if (!(I915_READ(PORT_HOTPLUG_EN) & | ||
177 | CRT_HOTPLUG_FORCE_DETECT)) | ||
178 | break; | ||
179 | msleep(1); | ||
180 | } while (time_after(timeout, jiffies)); | ||
181 | } | ||
150 | 182 | ||
151 | if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) == | 183 | if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) == |
152 | CRT_HOTPLUG_MONITOR_COLOR) | 184 | CRT_HOTPLUG_MONITOR_COLOR) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a2834276cb38..d9c50ff94d76 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -56,11 +56,13 @@ typedef struct { | |||
56 | } intel_p2_t; | 56 | } intel_p2_t; |
57 | 57 | ||
58 | #define INTEL_P2_NUM 2 | 58 | #define INTEL_P2_NUM 2 |
59 | 59 | typedef struct intel_limit intel_limit_t; | |
60 | typedef struct { | 60 | struct intel_limit { |
61 | intel_range_t dot, vco, n, m, m1, m2, p, p1; | 61 | intel_range_t dot, vco, n, m, m1, m2, p, p1; |
62 | intel_p2_t p2; | 62 | intel_p2_t p2; |
63 | } intel_limit_t; | 63 | bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, |
64 | int, int, intel_clock_t *); | ||
65 | }; | ||
64 | 66 | ||
65 | #define I8XX_DOT_MIN 25000 | 67 | #define I8XX_DOT_MIN 25000 |
66 | #define I8XX_DOT_MAX 350000 | 68 | #define I8XX_DOT_MAX 350000 |
@@ -90,18 +92,32 @@ typedef struct { | |||
90 | #define I9XX_DOT_MAX 400000 | 92 | #define I9XX_DOT_MAX 400000 |
91 | #define I9XX_VCO_MIN 1400000 | 93 | #define I9XX_VCO_MIN 1400000 |
92 | #define I9XX_VCO_MAX 2800000 | 94 | #define I9XX_VCO_MAX 2800000 |
95 | #define IGD_VCO_MIN 1700000 | ||
96 | #define IGD_VCO_MAX 3500000 | ||
93 | #define I9XX_N_MIN 1 | 97 | #define I9XX_N_MIN 1 |
94 | #define I9XX_N_MAX 6 | 98 | #define I9XX_N_MAX 6 |
99 | /* IGD's Ncounter is a ring counter */ | ||
100 | #define IGD_N_MIN 3 | ||
101 | #define IGD_N_MAX 6 | ||
95 | #define I9XX_M_MIN 70 | 102 | #define I9XX_M_MIN 70 |
96 | #define I9XX_M_MAX 120 | 103 | #define I9XX_M_MAX 120 |
104 | #define IGD_M_MIN 2 | ||
105 | #define IGD_M_MAX 256 | ||
97 | #define I9XX_M1_MIN 10 | 106 | #define I9XX_M1_MIN 10 |
98 | #define I9XX_M1_MAX 22 | 107 | #define I9XX_M1_MAX 22 |
99 | #define I9XX_M2_MIN 5 | 108 | #define I9XX_M2_MIN 5 |
100 | #define I9XX_M2_MAX 9 | 109 | #define I9XX_M2_MAX 9 |
110 | /* IGD M1 is reserved, and must be 0 */ | ||
111 | #define IGD_M1_MIN 0 | ||
112 | #define IGD_M1_MAX 0 | ||
113 | #define IGD_M2_MIN 0 | ||
114 | #define IGD_M2_MAX 254 | ||
101 | #define I9XX_P_SDVO_DAC_MIN 5 | 115 | #define I9XX_P_SDVO_DAC_MIN 5 |
102 | #define I9XX_P_SDVO_DAC_MAX 80 | 116 | #define I9XX_P_SDVO_DAC_MAX 80 |
103 | #define I9XX_P_LVDS_MIN 7 | 117 | #define I9XX_P_LVDS_MIN 7 |
104 | #define I9XX_P_LVDS_MAX 98 | 118 | #define I9XX_P_LVDS_MAX 98 |
119 | #define IGD_P_LVDS_MIN 7 | ||
120 | #define IGD_P_LVDS_MAX 112 | ||
105 | #define I9XX_P1_MIN 1 | 121 | #define I9XX_P1_MIN 1 |
106 | #define I9XX_P1_MAX 8 | 122 | #define I9XX_P1_MAX 8 |
107 | #define I9XX_P2_SDVO_DAC_SLOW 10 | 123 | #define I9XX_P2_SDVO_DAC_SLOW 10 |
@@ -115,6 +131,97 @@ typedef struct { | |||
115 | #define INTEL_LIMIT_I8XX_LVDS 1 | 131 | #define INTEL_LIMIT_I8XX_LVDS 1 |
116 | #define INTEL_LIMIT_I9XX_SDVO_DAC 2 | 132 | #define INTEL_LIMIT_I9XX_SDVO_DAC 2 |
117 | #define INTEL_LIMIT_I9XX_LVDS 3 | 133 | #define INTEL_LIMIT_I9XX_LVDS 3 |
134 | #define INTEL_LIMIT_G4X_SDVO 4 | ||
135 | #define INTEL_LIMIT_G4X_HDMI_DAC 5 | ||
136 | #define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6 | ||
137 | #define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7 | ||
138 | #define INTEL_LIMIT_IGD_SDVO_DAC 8 | ||
139 | #define INTEL_LIMIT_IGD_LVDS 9 | ||
140 | |||
141 | /*The parameter is for SDVO on G4x platform*/ | ||
142 | #define G4X_DOT_SDVO_MIN 25000 | ||
143 | #define G4X_DOT_SDVO_MAX 270000 | ||
144 | #define G4X_VCO_MIN 1750000 | ||
145 | #define G4X_VCO_MAX 3500000 | ||
146 | #define G4X_N_SDVO_MIN 1 | ||
147 | #define G4X_N_SDVO_MAX 4 | ||
148 | #define G4X_M_SDVO_MIN 104 | ||
149 | #define G4X_M_SDVO_MAX 138 | ||
150 | #define G4X_M1_SDVO_MIN 17 | ||
151 | #define G4X_M1_SDVO_MAX 23 | ||
152 | #define G4X_M2_SDVO_MIN 5 | ||
153 | #define G4X_M2_SDVO_MAX 11 | ||
154 | #define G4X_P_SDVO_MIN 10 | ||
155 | #define G4X_P_SDVO_MAX 30 | ||
156 | #define G4X_P1_SDVO_MIN 1 | ||
157 | #define G4X_P1_SDVO_MAX 3 | ||
158 | #define G4X_P2_SDVO_SLOW 10 | ||
159 | #define G4X_P2_SDVO_FAST 10 | ||
160 | #define G4X_P2_SDVO_LIMIT 270000 | ||
161 | |||
162 | /*The parameter is for HDMI_DAC on G4x platform*/ | ||
163 | #define G4X_DOT_HDMI_DAC_MIN 22000 | ||
164 | #define G4X_DOT_HDMI_DAC_MAX 400000 | ||
165 | #define G4X_N_HDMI_DAC_MIN 1 | ||
166 | #define G4X_N_HDMI_DAC_MAX 4 | ||
167 | #define G4X_M_HDMI_DAC_MIN 104 | ||
168 | #define G4X_M_HDMI_DAC_MAX 138 | ||
169 | #define G4X_M1_HDMI_DAC_MIN 16 | ||
170 | #define G4X_M1_HDMI_DAC_MAX 23 | ||
171 | #define G4X_M2_HDMI_DAC_MIN 5 | ||
172 | #define G4X_M2_HDMI_DAC_MAX 11 | ||
173 | #define G4X_P_HDMI_DAC_MIN 5 | ||
174 | #define G4X_P_HDMI_DAC_MAX 80 | ||
175 | #define G4X_P1_HDMI_DAC_MIN 1 | ||
176 | #define G4X_P1_HDMI_DAC_MAX 8 | ||
177 | #define G4X_P2_HDMI_DAC_SLOW 10 | ||
178 | #define G4X_P2_HDMI_DAC_FAST 5 | ||
179 | #define G4X_P2_HDMI_DAC_LIMIT 165000 | ||
180 | |||
181 | /*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/ | ||
182 | #define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000 | ||
183 | #define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000 | ||
184 | #define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1 | ||
185 | #define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3 | ||
186 | #define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104 | ||
187 | #define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138 | ||
188 | #define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17 | ||
189 | #define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23 | ||
190 | #define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5 | ||
191 | #define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11 | ||
192 | #define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28 | ||
193 | #define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112 | ||
194 | #define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2 | ||
195 | #define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8 | ||
196 | #define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14 | ||
197 | #define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14 | ||
198 | #define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0 | ||
199 | |||
200 | /*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/ | ||
201 | #define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000 | ||
202 | #define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000 | ||
203 | #define G4X_N_DUAL_CHANNEL_LVDS_MIN 1 | ||
204 | #define G4X_N_DUAL_CHANNEL_LVDS_MAX 3 | ||
205 | #define G4X_M_DUAL_CHANNEL_LVDS_MIN 104 | ||
206 | #define G4X_M_DUAL_CHANNEL_LVDS_MAX 138 | ||
207 | #define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17 | ||
208 | #define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23 | ||
209 | #define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5 | ||
210 | #define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11 | ||
211 | #define G4X_P_DUAL_CHANNEL_LVDS_MIN 14 | ||
212 | #define G4X_P_DUAL_CHANNEL_LVDS_MAX 42 | ||
213 | #define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2 | ||
214 | #define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6 | ||
215 | #define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7 | ||
216 | #define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 | ||
217 | #define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 | ||
218 | |||
219 | static bool | ||
220 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
221 | int target, int refclk, intel_clock_t *best_clock); | ||
222 | static bool | ||
223 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
224 | int target, int refclk, intel_clock_t *best_clock); | ||
118 | 225 | ||
119 | static const intel_limit_t intel_limits[] = { | 226 | static const intel_limit_t intel_limits[] = { |
120 | { /* INTEL_LIMIT_I8XX_DVO_DAC */ | 227 | { /* INTEL_LIMIT_I8XX_DVO_DAC */ |
@@ -128,6 +235,7 @@ static const intel_limit_t intel_limits[] = { | |||
128 | .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, | 235 | .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, |
129 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | 236 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, |
130 | .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, | 237 | .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, |
238 | .find_pll = intel_find_best_PLL, | ||
131 | }, | 239 | }, |
132 | { /* INTEL_LIMIT_I8XX_LVDS */ | 240 | { /* INTEL_LIMIT_I8XX_LVDS */ |
133 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, | 241 | .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, |
@@ -140,6 +248,7 @@ static const intel_limit_t intel_limits[] = { | |||
140 | .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, | 248 | .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, |
141 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | 249 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, |
142 | .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, | 250 | .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, |
251 | .find_pll = intel_find_best_PLL, | ||
143 | }, | 252 | }, |
144 | { /* INTEL_LIMIT_I9XX_SDVO_DAC */ | 253 | { /* INTEL_LIMIT_I9XX_SDVO_DAC */ |
145 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | 254 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, |
@@ -152,6 +261,7 @@ static const intel_limit_t intel_limits[] = { | |||
152 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | 261 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, |
153 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | 262 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, |
154 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | 263 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, |
264 | .find_pll = intel_find_best_PLL, | ||
155 | }, | 265 | }, |
156 | { /* INTEL_LIMIT_I9XX_LVDS */ | 266 | { /* INTEL_LIMIT_I9XX_LVDS */ |
157 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | 267 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, |
@@ -167,19 +277,157 @@ static const intel_limit_t intel_limits[] = { | |||
167 | */ | 277 | */ |
168 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | 278 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, |
169 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, | 279 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, |
280 | .find_pll = intel_find_best_PLL, | ||
281 | }, | ||
282 | /* below parameter and function is for G4X Chipset Family*/ | ||
283 | { /* INTEL_LIMIT_G4X_SDVO */ | ||
284 | .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, | ||
285 | .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, | ||
286 | .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, | ||
287 | .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX }, | ||
288 | .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX }, | ||
289 | .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX }, | ||
290 | .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX }, | ||
291 | .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX}, | ||
292 | .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT, | ||
293 | .p2_slow = G4X_P2_SDVO_SLOW, | ||
294 | .p2_fast = G4X_P2_SDVO_FAST | ||
295 | }, | ||
296 | .find_pll = intel_g4x_find_best_PLL, | ||
297 | }, | ||
298 | { /* INTEL_LIMIT_G4X_HDMI_DAC */ | ||
299 | .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, | ||
300 | .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, | ||
301 | .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, | ||
302 | .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX }, | ||
303 | .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX }, | ||
304 | .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX }, | ||
305 | .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX }, | ||
306 | .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX}, | ||
307 | .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT, | ||
308 | .p2_slow = G4X_P2_HDMI_DAC_SLOW, | ||
309 | .p2_fast = G4X_P2_HDMI_DAC_FAST | ||
310 | }, | ||
311 | .find_pll = intel_g4x_find_best_PLL, | ||
312 | }, | ||
313 | { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */ | ||
314 | .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, | ||
315 | .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, | ||
316 | .vco = { .min = G4X_VCO_MIN, | ||
317 | .max = G4X_VCO_MAX }, | ||
318 | .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN, | ||
319 | .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX }, | ||
320 | .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN, | ||
321 | .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX }, | ||
322 | .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN, | ||
323 | .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX }, | ||
324 | .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN, | ||
325 | .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX }, | ||
326 | .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN, | ||
327 | .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX }, | ||
328 | .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN, | ||
329 | .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX }, | ||
330 | .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT, | ||
331 | .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW, | ||
332 | .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST | ||
333 | }, | ||
334 | .find_pll = intel_g4x_find_best_PLL, | ||
335 | }, | ||
336 | { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */ | ||
337 | .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, | ||
338 | .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, | ||
339 | .vco = { .min = G4X_VCO_MIN, | ||
340 | .max = G4X_VCO_MAX }, | ||
341 | .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN, | ||
342 | .max = G4X_N_DUAL_CHANNEL_LVDS_MAX }, | ||
343 | .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN, | ||
344 | .max = G4X_M_DUAL_CHANNEL_LVDS_MAX }, | ||
345 | .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN, | ||
346 | .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX }, | ||
347 | .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN, | ||
348 | .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX }, | ||
349 | .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN, | ||
350 | .max = G4X_P_DUAL_CHANNEL_LVDS_MAX }, | ||
351 | .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN, | ||
352 | .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX }, | ||
353 | .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT, | ||
354 | .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW, | ||
355 | .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST | ||
356 | }, | ||
357 | .find_pll = intel_g4x_find_best_PLL, | ||
358 | }, | ||
359 | { /* INTEL_LIMIT_IGD_SDVO */ | ||
360 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, | ||
361 | .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, | ||
362 | .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, | ||
363 | .m = { .min = IGD_M_MIN, .max = IGD_M_MAX }, | ||
364 | .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX }, | ||
365 | .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX }, | ||
366 | .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, | ||
367 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | ||
368 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | ||
369 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | ||
170 | }, | 370 | }, |
371 | { /* INTEL_LIMIT_IGD_LVDS */ | ||
372 | .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, | ||
373 | .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, | ||
374 | .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, | ||
375 | .m = { .min = IGD_M_MIN, .max = IGD_M_MAX }, | ||
376 | .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX }, | ||
377 | .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX }, | ||
378 | .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX }, | ||
379 | .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, | ||
380 | /* IGD only supports single-channel mode. */ | ||
381 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | ||
382 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, | ||
383 | }, | ||
384 | |||
171 | }; | 385 | }; |
172 | 386 | ||
387 | static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) | ||
388 | { | ||
389 | struct drm_device *dev = crtc->dev; | ||
390 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
391 | const intel_limit_t *limit; | ||
392 | |||
393 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
394 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | ||
395 | LVDS_CLKB_POWER_UP) | ||
396 | /* LVDS with dual channel */ | ||
397 | limit = &intel_limits | ||
398 | [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS]; | ||
399 | else | ||
400 | /* LVDS with dual channel */ | ||
401 | limit = &intel_limits | ||
402 | [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS]; | ||
403 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || | ||
404 | intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { | ||
405 | limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC]; | ||
406 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { | ||
407 | limit = &intel_limits[INTEL_LIMIT_G4X_SDVO]; | ||
408 | } else /* The option is for other outputs */ | ||
409 | limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; | ||
410 | |||
411 | return limit; | ||
412 | } | ||
413 | |||
173 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc) | 414 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc) |
174 | { | 415 | { |
175 | struct drm_device *dev = crtc->dev; | 416 | struct drm_device *dev = crtc->dev; |
176 | const intel_limit_t *limit; | 417 | const intel_limit_t *limit; |
177 | 418 | ||
178 | if (IS_I9XX(dev)) { | 419 | if (IS_G4X(dev)) { |
420 | limit = intel_g4x_limit(crtc); | ||
421 | } else if (IS_I9XX(dev) && !IS_IGD(dev)) { | ||
179 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 422 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
180 | limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; | 423 | limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; |
181 | else | 424 | else |
182 | limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; | 425 | limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; |
426 | } else if (IS_IGD(dev)) { | ||
427 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | ||
428 | limit = &intel_limits[INTEL_LIMIT_IGD_LVDS]; | ||
429 | else | ||
430 | limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC]; | ||
183 | } else { | 431 | } else { |
184 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 432 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
185 | limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS]; | 433 | limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS]; |
@@ -189,8 +437,21 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) | |||
189 | return limit; | 437 | return limit; |
190 | } | 438 | } |
191 | 439 | ||
192 | static void intel_clock(int refclk, intel_clock_t *clock) | 440 | /* m1 is reserved as 0 in IGD, n is a ring counter */ |
441 | static void igd_clock(int refclk, intel_clock_t *clock) | ||
193 | { | 442 | { |
443 | clock->m = clock->m2 + 2; | ||
444 | clock->p = clock->p1 * clock->p2; | ||
445 | clock->vco = refclk * clock->m / clock->n; | ||
446 | clock->dot = clock->vco / clock->p; | ||
447 | } | ||
448 | |||
449 | static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) | ||
450 | { | ||
451 | if (IS_IGD(dev)) { | ||
452 | igd_clock(refclk, clock); | ||
453 | return; | ||
454 | } | ||
194 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); | 455 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); |
195 | clock->p = clock->p1 * clock->p2; | 456 | clock->p = clock->p1 * clock->p2; |
196 | clock->vco = refclk * clock->m / (clock->n + 2); | 457 | clock->vco = refclk * clock->m / (clock->n + 2); |
@@ -226,6 +487,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) | |||
226 | static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) | 487 | static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) |
227 | { | 488 | { |
228 | const intel_limit_t *limit = intel_limit (crtc); | 489 | const intel_limit_t *limit = intel_limit (crtc); |
490 | struct drm_device *dev = crtc->dev; | ||
229 | 491 | ||
230 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) | 492 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) |
231 | INTELPllInvalid ("p1 out of range\n"); | 493 | INTELPllInvalid ("p1 out of range\n"); |
@@ -235,7 +497,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) | |||
235 | INTELPllInvalid ("m2 out of range\n"); | 497 | INTELPllInvalid ("m2 out of range\n"); |
236 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) | 498 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) |
237 | INTELPllInvalid ("m1 out of range\n"); | 499 | INTELPllInvalid ("m1 out of range\n"); |
238 | if (clock->m1 <= clock->m2) | 500 | if (clock->m1 <= clock->m2 && !IS_IGD(dev)) |
239 | INTELPllInvalid ("m1 <= m2\n"); | 501 | INTELPllInvalid ("m1 <= m2\n"); |
240 | if (clock->m < limit->m.min || limit->m.max < clock->m) | 502 | if (clock->m < limit->m.min || limit->m.max < clock->m) |
241 | INTELPllInvalid ("m out of range\n"); | 503 | INTELPllInvalid ("m out of range\n"); |
@@ -252,18 +514,14 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) | |||
252 | return true; | 514 | return true; |
253 | } | 515 | } |
254 | 516 | ||
255 | /** | 517 | static bool |
256 | * Returns a set of divisors for the desired target clock with the given | 518 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
257 | * refclk, or FALSE. The returned values represent the clock equation: | 519 | int target, int refclk, intel_clock_t *best_clock) |
258 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. | 520 | |
259 | */ | ||
260 | static bool intel_find_best_PLL(struct drm_crtc *crtc, int target, | ||
261 | int refclk, intel_clock_t *best_clock) | ||
262 | { | 521 | { |
263 | struct drm_device *dev = crtc->dev; | 522 | struct drm_device *dev = crtc->dev; |
264 | struct drm_i915_private *dev_priv = dev->dev_private; | 523 | struct drm_i915_private *dev_priv = dev->dev_private; |
265 | intel_clock_t clock; | 524 | intel_clock_t clock; |
266 | const intel_limit_t *limit = intel_limit(crtc); | ||
267 | int err = target; | 525 | int err = target; |
268 | 526 | ||
269 | if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && | 527 | if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
@@ -289,15 +547,17 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target, | |||
289 | memset (best_clock, 0, sizeof (*best_clock)); | 547 | memset (best_clock, 0, sizeof (*best_clock)); |
290 | 548 | ||
291 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { | 549 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { |
292 | for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 && | 550 | for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) { |
293 | clock.m2 <= limit->m2.max; clock.m2++) { | 551 | /* m1 is always 0 in IGD */ |
552 | if (clock.m2 >= clock.m1 && !IS_IGD(dev)) | ||
553 | break; | ||
294 | for (clock.n = limit->n.min; clock.n <= limit->n.max; | 554 | for (clock.n = limit->n.min; clock.n <= limit->n.max; |
295 | clock.n++) { | 555 | clock.n++) { |
296 | for (clock.p1 = limit->p1.min; | 556 | for (clock.p1 = limit->p1.min; |
297 | clock.p1 <= limit->p1.max; clock.p1++) { | 557 | clock.p1 <= limit->p1.max; clock.p1++) { |
298 | int this_err; | 558 | int this_err; |
299 | 559 | ||
300 | intel_clock(refclk, &clock); | 560 | intel_clock(dev, refclk, &clock); |
301 | 561 | ||
302 | if (!intel_PLL_is_valid(crtc, &clock)) | 562 | if (!intel_PLL_is_valid(crtc, &clock)) |
303 | continue; | 563 | continue; |
@@ -315,6 +575,63 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target, | |||
315 | return (err != target); | 575 | return (err != target); |
316 | } | 576 | } |
317 | 577 | ||
578 | static bool | ||
579 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
580 | int target, int refclk, intel_clock_t *best_clock) | ||
581 | { | ||
582 | struct drm_device *dev = crtc->dev; | ||
583 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
584 | intel_clock_t clock; | ||
585 | int max_n; | ||
586 | bool found; | ||
587 | /* approximately equals target * 0.00488 */ | ||
588 | int err_most = (target >> 8) + (target >> 10); | ||
589 | found = false; | ||
590 | |||
591 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
592 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | ||
593 | LVDS_CLKB_POWER_UP) | ||
594 | clock.p2 = limit->p2.p2_fast; | ||
595 | else | ||
596 | clock.p2 = limit->p2.p2_slow; | ||
597 | } else { | ||
598 | if (target < limit->p2.dot_limit) | ||
599 | clock.p2 = limit->p2.p2_slow; | ||
600 | else | ||
601 | clock.p2 = limit->p2.p2_fast; | ||
602 | } | ||
603 | |||
604 | memset(best_clock, 0, sizeof(*best_clock)); | ||
605 | max_n = limit->n.max; | ||
606 | /* based on hardware requriment prefer smaller n to precision */ | ||
607 | for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { | ||
608 | /* based on hardware requirment prefere larger m1,m2, p1 */ | ||
609 | for (clock.m1 = limit->m1.max; | ||
610 | clock.m1 >= limit->m1.min; clock.m1--) { | ||
611 | for (clock.m2 = limit->m2.max; | ||
612 | clock.m2 >= limit->m2.min; clock.m2--) { | ||
613 | for (clock.p1 = limit->p1.max; | ||
614 | clock.p1 >= limit->p1.min; clock.p1--) { | ||
615 | int this_err; | ||
616 | |||
617 | intel_clock(dev, refclk, &clock); | ||
618 | if (!intel_PLL_is_valid(crtc, &clock)) | ||
619 | continue; | ||
620 | this_err = abs(clock.dot - target) ; | ||
621 | if (this_err < err_most) { | ||
622 | *best_clock = clock; | ||
623 | err_most = this_err; | ||
624 | max_n = clock.n; | ||
625 | found = true; | ||
626 | } | ||
627 | } | ||
628 | } | ||
629 | } | ||
630 | } | ||
631 | |||
632 | return found; | ||
633 | } | ||
634 | |||
318 | void | 635 | void |
319 | intel_wait_for_vblank(struct drm_device *dev) | 636 | intel_wait_for_vblank(struct drm_device *dev) |
320 | { | 637 | { |
@@ -634,7 +951,7 @@ static int intel_get_core_clock_speed(struct drm_device *dev) | |||
634 | return 400000; | 951 | return 400000; |
635 | else if (IS_I915G(dev)) | 952 | else if (IS_I915G(dev)) |
636 | return 333000; | 953 | return 333000; |
637 | else if (IS_I945GM(dev) || IS_845G(dev)) | 954 | else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev)) |
638 | return 200000; | 955 | return 200000; |
639 | else if (IS_I915GM(dev)) { | 956 | else if (IS_I915GM(dev)) { |
640 | u16 gcfgc = 0; | 957 | u16 gcfgc = 0; |
@@ -733,6 +1050,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
733 | bool is_crt = false, is_lvds = false, is_tv = false; | 1050 | bool is_crt = false, is_lvds = false, is_tv = false; |
734 | struct drm_mode_config *mode_config = &dev->mode_config; | 1051 | struct drm_mode_config *mode_config = &dev->mode_config; |
735 | struct drm_connector *connector; | 1052 | struct drm_connector *connector; |
1053 | const intel_limit_t *limit; | ||
736 | int ret; | 1054 | int ret; |
737 | 1055 | ||
738 | drm_vblank_pre_modeset(dev, pipe); | 1056 | drm_vblank_pre_modeset(dev, pipe); |
@@ -776,13 +1094,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
776 | refclk = 48000; | 1094 | refclk = 48000; |
777 | } | 1095 | } |
778 | 1096 | ||
779 | ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); | 1097 | /* |
1098 | * Returns a set of divisors for the desired target clock with the given | ||
1099 | * refclk, or FALSE. The returned values represent the clock equation: | ||
1100 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. | ||
1101 | */ | ||
1102 | limit = intel_limit(crtc); | ||
1103 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); | ||
780 | if (!ok) { | 1104 | if (!ok) { |
781 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | 1105 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
782 | return -EINVAL; | 1106 | return -EINVAL; |
783 | } | 1107 | } |
784 | 1108 | ||
785 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | 1109 | if (IS_IGD(dev)) |
1110 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; | ||
1111 | else | ||
1112 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | ||
786 | 1113 | ||
787 | dpll = DPLL_VGA_MODE_DIS; | 1114 | dpll = DPLL_VGA_MODE_DIS; |
788 | if (IS_I9XX(dev)) { | 1115 | if (IS_I9XX(dev)) { |
@@ -799,7 +1126,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
799 | } | 1126 | } |
800 | 1127 | ||
801 | /* compute bitmask from p1 value */ | 1128 | /* compute bitmask from p1 value */ |
802 | dpll |= (1 << (clock.p1 - 1)) << 16; | 1129 | if (IS_IGD(dev)) |
1130 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD; | ||
1131 | else | ||
1132 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; | ||
803 | switch (clock.p2) { | 1133 | switch (clock.p2) { |
804 | case 5: | 1134 | case 5: |
805 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; | 1135 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; |
@@ -1279,10 +1609,20 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
1279 | fp = I915_READ((pipe == 0) ? FPA1 : FPB1); | 1609 | fp = I915_READ((pipe == 0) ? FPA1 : FPB1); |
1280 | 1610 | ||
1281 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; | 1611 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; |
1282 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; | 1612 | if (IS_IGD(dev)) { |
1283 | clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; | 1613 | clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; |
1614 | clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT; | ||
1615 | } else { | ||
1616 | clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; | ||
1617 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; | ||
1618 | } | ||
1619 | |||
1284 | if (IS_I9XX(dev)) { | 1620 | if (IS_I9XX(dev)) { |
1285 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> | 1621 | if (IS_IGD(dev)) |
1622 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >> | ||
1623 | DPLL_FPA01_P1_POST_DIV_SHIFT_IGD); | ||
1624 | else | ||
1625 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> | ||
1286 | DPLL_FPA01_P1_POST_DIV_SHIFT); | 1626 | DPLL_FPA01_P1_POST_DIV_SHIFT); |
1287 | 1627 | ||
1288 | switch (dpll & DPLL_MODE_MASK) { | 1628 | switch (dpll & DPLL_MODE_MASK) { |
@@ -1301,7 +1641,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
1301 | } | 1641 | } |
1302 | 1642 | ||
1303 | /* XXX: Handle the 100Mhz refclk */ | 1643 | /* XXX: Handle the 100Mhz refclk */ |
1304 | intel_clock(96000, &clock); | 1644 | intel_clock(dev, 96000, &clock); |
1305 | } else { | 1645 | } else { |
1306 | bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); | 1646 | bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); |
1307 | 1647 | ||
@@ -1313,9 +1653,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
1313 | if ((dpll & PLL_REF_INPUT_MASK) == | 1653 | if ((dpll & PLL_REF_INPUT_MASK) == |
1314 | PLLB_REF_INPUT_SPREADSPECTRUMIN) { | 1654 | PLLB_REF_INPUT_SPREADSPECTRUMIN) { |
1315 | /* XXX: might not be 66MHz */ | 1655 | /* XXX: might not be 66MHz */ |
1316 | intel_clock(66000, &clock); | 1656 | intel_clock(dev, 66000, &clock); |
1317 | } else | 1657 | } else |
1318 | intel_clock(48000, &clock); | 1658 | intel_clock(dev, 48000, &clock); |
1319 | } else { | 1659 | } else { |
1320 | if (dpll & PLL_P1_DIVIDE_BY_TWO) | 1660 | if (dpll & PLL_P1_DIVIDE_BY_TWO) |
1321 | clock.p1 = 2; | 1661 | clock.p1 = 2; |
@@ -1328,7 +1668,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |||
1328 | else | 1668 | else |
1329 | clock.p2 = 2; | 1669 | clock.p2 = 2; |
1330 | 1670 | ||
1331 | intel_clock(48000, &clock); | 1671 | intel_clock(dev, 48000, &clock); |
1332 | } | 1672 | } |
1333 | } | 1673 | } |
1334 | 1674 | ||
@@ -1474,13 +1814,21 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
1474 | 1814 | ||
1475 | if (IS_I9XX(dev)) { | 1815 | if (IS_I9XX(dev)) { |
1476 | int found; | 1816 | int found; |
1817 | u32 reg; | ||
1477 | 1818 | ||
1478 | if (I915_READ(SDVOB) & SDVO_DETECTED) { | 1819 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
1479 | found = intel_sdvo_init(dev, SDVOB); | 1820 | found = intel_sdvo_init(dev, SDVOB); |
1480 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | 1821 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) |
1481 | intel_hdmi_init(dev, SDVOB); | 1822 | intel_hdmi_init(dev, SDVOB); |
1482 | } | 1823 | } |
1483 | if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) { | 1824 | |
1825 | /* Before G4X SDVOC doesn't have its own detect register */ | ||
1826 | if (IS_G4X(dev)) | ||
1827 | reg = SDVOC; | ||
1828 | else | ||
1829 | reg = SDVOB; | ||
1830 | |||
1831 | if (I915_READ(reg) & SDVO_DETECTED) { | ||
1484 | found = intel_sdvo_init(dev, SDVOC); | 1832 | found = intel_sdvo_init(dev, SDVOC); |
1485 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | 1833 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) |
1486 | intel_hdmi_init(dev, SDVOC); | 1834 | intel_hdmi_init(dev, SDVOC); |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 0d211af98854..6619f26e46a5 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -265,7 +265,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
265 | pfit_control = 0; | 265 | pfit_control = 0; |
266 | 266 | ||
267 | if (!IS_I965G(dev)) { | 267 | if (!IS_I965G(dev)) { |
268 | if (dev_priv->panel_wants_dither) | 268 | if (dev_priv->panel_wants_dither || dev_priv->lvds_dither) |
269 | pfit_control |= PANEL_8TO6_DITHER_ENABLE; | 269 | pfit_control |= PANEL_8TO6_DITHER_ENABLE; |
270 | } | 270 | } |
271 | else | 271 | else |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 56485d67369b..ceca9471a75a 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -217,8 +217,8 @@ static const u32 filter_table[] = { | |||
217 | */ | 217 | */ |
218 | static const struct color_conversion ntsc_m_csc_composite = { | 218 | static const struct color_conversion ntsc_m_csc_composite = { |
219 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, | 219 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, |
220 | .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, | 220 | .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, |
221 | .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, | 221 | .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, |
222 | }; | 222 | }; |
223 | 223 | ||
224 | static const struct video_levels ntsc_m_levels_composite = { | 224 | static const struct video_levels ntsc_m_levels_composite = { |
@@ -226,9 +226,9 @@ static const struct video_levels ntsc_m_levels_composite = { | |||
226 | }; | 226 | }; |
227 | 227 | ||
228 | static const struct color_conversion ntsc_m_csc_svideo = { | 228 | static const struct color_conversion ntsc_m_csc_svideo = { |
229 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, | 229 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, |
230 | .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, | 230 | .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, |
231 | .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, | 231 | .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, |
232 | }; | 232 | }; |
233 | 233 | ||
234 | static const struct video_levels ntsc_m_levels_svideo = { | 234 | static const struct video_levels ntsc_m_levels_svideo = { |
@@ -237,8 +237,8 @@ static const struct video_levels ntsc_m_levels_svideo = { | |||
237 | 237 | ||
238 | static const struct color_conversion ntsc_j_csc_composite = { | 238 | static const struct color_conversion ntsc_j_csc_composite = { |
239 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119, | 239 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119, |
240 | .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0f00, | 240 | .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200, |
241 | .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0f00, | 241 | .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200, |
242 | }; | 242 | }; |
243 | 243 | ||
244 | static const struct video_levels ntsc_j_levels_composite = { | 244 | static const struct video_levels ntsc_j_levels_composite = { |
@@ -247,8 +247,8 @@ static const struct video_levels ntsc_j_levels_composite = { | |||
247 | 247 | ||
248 | static const struct color_conversion ntsc_j_csc_svideo = { | 248 | static const struct color_conversion ntsc_j_csc_svideo = { |
249 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c, | 249 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c, |
250 | .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0f00, | 250 | .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200, |
251 | .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0f00, | 251 | .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200, |
252 | }; | 252 | }; |
253 | 253 | ||
254 | static const struct video_levels ntsc_j_levels_svideo = { | 254 | static const struct video_levels ntsc_j_levels_svideo = { |
@@ -257,8 +257,8 @@ static const struct video_levels ntsc_j_levels_svideo = { | |||
257 | 257 | ||
258 | static const struct color_conversion pal_csc_composite = { | 258 | static const struct color_conversion pal_csc_composite = { |
259 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113, | 259 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113, |
260 | .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0f00, | 260 | .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200, |
261 | .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0f00, | 261 | .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200, |
262 | }; | 262 | }; |
263 | 263 | ||
264 | static const struct video_levels pal_levels_composite = { | 264 | static const struct video_levels pal_levels_composite = { |
@@ -267,8 +267,8 @@ static const struct video_levels pal_levels_composite = { | |||
267 | 267 | ||
268 | static const struct color_conversion pal_csc_svideo = { | 268 | static const struct color_conversion pal_csc_svideo = { |
269 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, | 269 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, |
270 | .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0f00, | 270 | .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200, |
271 | .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0f00, | 271 | .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200, |
272 | }; | 272 | }; |
273 | 273 | ||
274 | static const struct video_levels pal_levels_svideo = { | 274 | static const struct video_levels pal_levels_svideo = { |
@@ -277,8 +277,8 @@ static const struct video_levels pal_levels_svideo = { | |||
277 | 277 | ||
278 | static const struct color_conversion pal_m_csc_composite = { | 278 | static const struct color_conversion pal_m_csc_composite = { |
279 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, | 279 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, |
280 | .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, | 280 | .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, |
281 | .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, | 281 | .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, |
282 | }; | 282 | }; |
283 | 283 | ||
284 | static const struct video_levels pal_m_levels_composite = { | 284 | static const struct video_levels pal_m_levels_composite = { |
@@ -286,9 +286,9 @@ static const struct video_levels pal_m_levels_composite = { | |||
286 | }; | 286 | }; |
287 | 287 | ||
288 | static const struct color_conversion pal_m_csc_svideo = { | 288 | static const struct color_conversion pal_m_csc_svideo = { |
289 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, | 289 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, |
290 | .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, | 290 | .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, |
291 | .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, | 291 | .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, |
292 | }; | 292 | }; |
293 | 293 | ||
294 | static const struct video_levels pal_m_levels_svideo = { | 294 | static const struct video_levels pal_m_levels_svideo = { |
@@ -297,8 +297,8 @@ static const struct video_levels pal_m_levels_svideo = { | |||
297 | 297 | ||
298 | static const struct color_conversion pal_n_csc_composite = { | 298 | static const struct color_conversion pal_n_csc_composite = { |
299 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, | 299 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, |
300 | .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, | 300 | .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200, |
301 | .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, | 301 | .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, |
302 | }; | 302 | }; |
303 | 303 | ||
304 | static const struct video_levels pal_n_levels_composite = { | 304 | static const struct video_levels pal_n_levels_composite = { |
@@ -306,9 +306,9 @@ static const struct video_levels pal_n_levels_composite = { | |||
306 | }; | 306 | }; |
307 | 307 | ||
308 | static const struct color_conversion pal_n_csc_svideo = { | 308 | static const struct color_conversion pal_n_csc_svideo = { |
309 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, | 309 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133, |
310 | .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, | 310 | .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200, |
311 | .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, | 311 | .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, |
312 | }; | 312 | }; |
313 | 313 | ||
314 | static const struct video_levels pal_n_levels_svideo = { | 314 | static const struct video_levels pal_n_levels_svideo = { |
@@ -319,9 +319,9 @@ static const struct video_levels pal_n_levels_svideo = { | |||
319 | * Component connections | 319 | * Component connections |
320 | */ | 320 | */ |
321 | static const struct color_conversion sdtv_csc_yprpb = { | 321 | static const struct color_conversion sdtv_csc_yprpb = { |
322 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0146, | 322 | .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, |
323 | .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0f00, | 323 | .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200, |
324 | .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0f00, | 324 | .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200, |
325 | }; | 325 | }; |
326 | 326 | ||
327 | static const struct color_conversion sdtv_csc_rgb = { | 327 | static const struct color_conversion sdtv_csc_rgb = { |
@@ -331,9 +331,9 @@ static const struct color_conversion sdtv_csc_rgb = { | |||
331 | }; | 331 | }; |
332 | 332 | ||
333 | static const struct color_conversion hdtv_csc_yprpb = { | 333 | static const struct color_conversion hdtv_csc_yprpb = { |
334 | .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0146, | 334 | .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145, |
335 | .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0f00, | 335 | .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200, |
336 | .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0f00, | 336 | .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200, |
337 | }; | 337 | }; |
338 | 338 | ||
339 | static const struct color_conversion hdtv_csc_rgb = { | 339 | static const struct color_conversion hdtv_csc_rgb = { |
@@ -414,7 +414,7 @@ struct tv_mode { | |||
414 | static const struct tv_mode tv_modes[] = { | 414 | static const struct tv_mode tv_modes[] = { |
415 | { | 415 | { |
416 | .name = "NTSC-M", | 416 | .name = "NTSC-M", |
417 | .clock = 107520, | 417 | .clock = 108000, |
418 | .refresh = 29970, | 418 | .refresh = 29970, |
419 | .oversample = TV_OVERSAMPLE_8X, | 419 | .oversample = TV_OVERSAMPLE_8X, |
420 | .component_only = 0, | 420 | .component_only = 0, |
@@ -442,8 +442,8 @@ static const struct tv_mode tv_modes[] = { | |||
442 | .vburst_start_f4 = 10, .vburst_end_f4 = 240, | 442 | .vburst_start_f4 = 10, .vburst_end_f4 = 240, |
443 | 443 | ||
444 | /* desired 3.5800000 actual 3.5800000 clock 107.52 */ | 444 | /* desired 3.5800000 actual 3.5800000 clock 107.52 */ |
445 | .dda1_inc = 136, | 445 | .dda1_inc = 135, |
446 | .dda2_inc = 7624, .dda2_size = 20013, | 446 | .dda2_inc = 20800, .dda2_size = 27456, |
447 | .dda3_inc = 0, .dda3_size = 0, | 447 | .dda3_inc = 0, .dda3_size = 0, |
448 | .sc_reset = TV_SC_RESET_EVERY_4, | 448 | .sc_reset = TV_SC_RESET_EVERY_4, |
449 | .pal_burst = false, | 449 | .pal_burst = false, |
@@ -457,7 +457,7 @@ static const struct tv_mode tv_modes[] = { | |||
457 | }, | 457 | }, |
458 | { | 458 | { |
459 | .name = "NTSC-443", | 459 | .name = "NTSC-443", |
460 | .clock = 107520, | 460 | .clock = 108000, |
461 | .refresh = 29970, | 461 | .refresh = 29970, |
462 | .oversample = TV_OVERSAMPLE_8X, | 462 | .oversample = TV_OVERSAMPLE_8X, |
463 | .component_only = 0, | 463 | .component_only = 0, |
@@ -485,10 +485,10 @@ static const struct tv_mode tv_modes[] = { | |||
485 | 485 | ||
486 | /* desired 4.4336180 actual 4.4336180 clock 107.52 */ | 486 | /* desired 4.4336180 actual 4.4336180 clock 107.52 */ |
487 | .dda1_inc = 168, | 487 | .dda1_inc = 168, |
488 | .dda2_inc = 18557, .dda2_size = 20625, | 488 | .dda2_inc = 4093, .dda2_size = 27456, |
489 | .dda3_inc = 0, .dda3_size = 0, | 489 | .dda3_inc = 310, .dda3_size = 525, |
490 | .sc_reset = TV_SC_RESET_EVERY_8, | 490 | .sc_reset = TV_SC_RESET_NEVER, |
491 | .pal_burst = true, | 491 | .pal_burst = false, |
492 | 492 | ||
493 | .composite_levels = &ntsc_m_levels_composite, | 493 | .composite_levels = &ntsc_m_levels_composite, |
494 | .composite_color = &ntsc_m_csc_composite, | 494 | .composite_color = &ntsc_m_csc_composite, |
@@ -499,7 +499,7 @@ static const struct tv_mode tv_modes[] = { | |||
499 | }, | 499 | }, |
500 | { | 500 | { |
501 | .name = "NTSC-J", | 501 | .name = "NTSC-J", |
502 | .clock = 107520, | 502 | .clock = 108000, |
503 | .refresh = 29970, | 503 | .refresh = 29970, |
504 | .oversample = TV_OVERSAMPLE_8X, | 504 | .oversample = TV_OVERSAMPLE_8X, |
505 | .component_only = 0, | 505 | .component_only = 0, |
@@ -527,8 +527,8 @@ static const struct tv_mode tv_modes[] = { | |||
527 | .vburst_start_f4 = 10, .vburst_end_f4 = 240, | 527 | .vburst_start_f4 = 10, .vburst_end_f4 = 240, |
528 | 528 | ||
529 | /* desired 3.5800000 actual 3.5800000 clock 107.52 */ | 529 | /* desired 3.5800000 actual 3.5800000 clock 107.52 */ |
530 | .dda1_inc = 136, | 530 | .dda1_inc = 135, |
531 | .dda2_inc = 7624, .dda2_size = 20013, | 531 | .dda2_inc = 20800, .dda2_size = 27456, |
532 | .dda3_inc = 0, .dda3_size = 0, | 532 | .dda3_inc = 0, .dda3_size = 0, |
533 | .sc_reset = TV_SC_RESET_EVERY_4, | 533 | .sc_reset = TV_SC_RESET_EVERY_4, |
534 | .pal_burst = false, | 534 | .pal_burst = false, |
@@ -542,7 +542,7 @@ static const struct tv_mode tv_modes[] = { | |||
542 | }, | 542 | }, |
543 | { | 543 | { |
544 | .name = "PAL-M", | 544 | .name = "PAL-M", |
545 | .clock = 107520, | 545 | .clock = 108000, |
546 | .refresh = 29970, | 546 | .refresh = 29970, |
547 | .oversample = TV_OVERSAMPLE_8X, | 547 | .oversample = TV_OVERSAMPLE_8X, |
548 | .component_only = 0, | 548 | .component_only = 0, |
@@ -570,11 +570,11 @@ static const struct tv_mode tv_modes[] = { | |||
570 | .vburst_start_f4 = 10, .vburst_end_f4 = 240, | 570 | .vburst_start_f4 = 10, .vburst_end_f4 = 240, |
571 | 571 | ||
572 | /* desired 3.5800000 actual 3.5800000 clock 107.52 */ | 572 | /* desired 3.5800000 actual 3.5800000 clock 107.52 */ |
573 | .dda1_inc = 136, | 573 | .dda1_inc = 135, |
574 | .dda2_inc = 7624, .dda2_size = 20013, | 574 | .dda2_inc = 16704, .dda2_size = 27456, |
575 | .dda3_inc = 0, .dda3_size = 0, | 575 | .dda3_inc = 0, .dda3_size = 0, |
576 | .sc_reset = TV_SC_RESET_EVERY_4, | 576 | .sc_reset = TV_SC_RESET_EVERY_8, |
577 | .pal_burst = false, | 577 | .pal_burst = true, |
578 | 578 | ||
579 | .composite_levels = &pal_m_levels_composite, | 579 | .composite_levels = &pal_m_levels_composite, |
580 | .composite_color = &pal_m_csc_composite, | 580 | .composite_color = &pal_m_csc_composite, |
@@ -586,7 +586,7 @@ static const struct tv_mode tv_modes[] = { | |||
586 | { | 586 | { |
587 | /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ | 587 | /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ |
588 | .name = "PAL-N", | 588 | .name = "PAL-N", |
589 | .clock = 107520, | 589 | .clock = 108000, |
590 | .refresh = 25000, | 590 | .refresh = 25000, |
591 | .oversample = TV_OVERSAMPLE_8X, | 591 | .oversample = TV_OVERSAMPLE_8X, |
592 | .component_only = 0, | 592 | .component_only = 0, |
@@ -615,9 +615,9 @@ static const struct tv_mode tv_modes[] = { | |||
615 | 615 | ||
616 | 616 | ||
617 | /* desired 4.4336180 actual 4.4336180 clock 107.52 */ | 617 | /* desired 4.4336180 actual 4.4336180 clock 107.52 */ |
618 | .dda1_inc = 168, | 618 | .dda1_inc = 135, |
619 | .dda2_inc = 18557, .dda2_size = 20625, | 619 | .dda2_inc = 23578, .dda2_size = 27648, |
620 | .dda3_inc = 0, .dda3_size = 0, | 620 | .dda3_inc = 134, .dda3_size = 625, |
621 | .sc_reset = TV_SC_RESET_EVERY_8, | 621 | .sc_reset = TV_SC_RESET_EVERY_8, |
622 | .pal_burst = true, | 622 | .pal_burst = true, |
623 | 623 | ||
@@ -631,12 +631,12 @@ static const struct tv_mode tv_modes[] = { | |||
631 | { | 631 | { |
632 | /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ | 632 | /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ |
633 | .name = "PAL", | 633 | .name = "PAL", |
634 | .clock = 107520, | 634 | .clock = 108000, |
635 | .refresh = 25000, | 635 | .refresh = 25000, |
636 | .oversample = TV_OVERSAMPLE_8X, | 636 | .oversample = TV_OVERSAMPLE_8X, |
637 | .component_only = 0, | 637 | .component_only = 0, |
638 | 638 | ||
639 | .hsync_end = 64, .hblank_end = 128, | 639 | .hsync_end = 64, .hblank_end = 142, |
640 | .hblank_start = 844, .htotal = 863, | 640 | .hblank_start = 844, .htotal = 863, |
641 | 641 | ||
642 | .progressive = false, .trilevel_sync = false, | 642 | .progressive = false, .trilevel_sync = false, |
@@ -659,8 +659,8 @@ static const struct tv_mode tv_modes[] = { | |||
659 | 659 | ||
660 | /* desired 4.4336180 actual 4.4336180 clock 107.52 */ | 660 | /* desired 4.4336180 actual 4.4336180 clock 107.52 */ |
661 | .dda1_inc = 168, | 661 | .dda1_inc = 168, |
662 | .dda2_inc = 18557, .dda2_size = 20625, | 662 | .dda2_inc = 4122, .dda2_size = 27648, |
663 | .dda3_inc = 0, .dda3_size = 0, | 663 | .dda3_inc = 67, .dda3_size = 625, |
664 | .sc_reset = TV_SC_RESET_EVERY_8, | 664 | .sc_reset = TV_SC_RESET_EVERY_8, |
665 | .pal_burst = true, | 665 | .pal_burst = true, |
666 | 666 | ||
@@ -689,7 +689,7 @@ static const struct tv_mode tv_modes[] = { | |||
689 | .veq_ena = false, | 689 | .veq_ena = false, |
690 | 690 | ||
691 | .vi_end_f1 = 44, .vi_end_f2 = 44, | 691 | .vi_end_f1 = 44, .vi_end_f2 = 44, |
692 | .nbr_end = 496, | 692 | .nbr_end = 479, |
693 | 693 | ||
694 | .burst_ena = false, | 694 | .burst_ena = false, |
695 | 695 | ||
@@ -713,7 +713,7 @@ static const struct tv_mode tv_modes[] = { | |||
713 | .veq_ena = false, | 713 | .veq_ena = false, |
714 | 714 | ||
715 | .vi_end_f1 = 44, .vi_end_f2 = 44, | 715 | .vi_end_f1 = 44, .vi_end_f2 = 44, |
716 | .nbr_end = 496, | 716 | .nbr_end = 479, |
717 | 717 | ||
718 | .burst_ena = false, | 718 | .burst_ena = false, |
719 | 719 | ||
@@ -876,7 +876,7 @@ static const struct tv_mode tv_modes[] = { | |||
876 | .component_only = 1, | 876 | .component_only = 1, |
877 | 877 | ||
878 | .hsync_end = 88, .hblank_end = 235, | 878 | .hsync_end = 88, .hblank_end = 235, |
879 | .hblank_start = 2155, .htotal = 2200, | 879 | .hblank_start = 2155, .htotal = 2201, |
880 | 880 | ||
881 | .progressive = false, .trilevel_sync = true, | 881 | .progressive = false, .trilevel_sync = true, |
882 | 882 | ||
@@ -1082,7 +1082,7 @@ intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mo | |||
1082 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1082 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); |
1083 | 1083 | ||
1084 | /* Ensure TV refresh is close to desired refresh */ | 1084 | /* Ensure TV refresh is close to desired refresh */ |
1085 | if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 1) | 1085 | if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 10) |
1086 | return MODE_OK; | 1086 | return MODE_OK; |
1087 | return MODE_CLOCK_RANGE; | 1087 | return MODE_CLOCK_RANGE; |
1088 | } | 1088 | } |
@@ -1135,7 +1135,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1135 | if (!tv_mode) | 1135 | if (!tv_mode) |
1136 | return; /* can't happen (mode_prepare prevents this) */ | 1136 | return; /* can't happen (mode_prepare prevents this) */ |
1137 | 1137 | ||
1138 | tv_ctl = 0; | 1138 | tv_ctl = I915_READ(TV_CTL); |
1139 | tv_ctl &= TV_CTL_SAVE; | ||
1139 | 1140 | ||
1140 | switch (tv_priv->type) { | 1141 | switch (tv_priv->type) { |
1141 | default: | 1142 | default: |
@@ -1215,7 +1216,6 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1215 | /* dda1 implies valid video levels */ | 1216 | /* dda1 implies valid video levels */ |
1216 | if (tv_mode->dda1_inc) { | 1217 | if (tv_mode->dda1_inc) { |
1217 | scctl1 |= TV_SC_DDA1_EN; | 1218 | scctl1 |= TV_SC_DDA1_EN; |
1218 | scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; | ||
1219 | } | 1219 | } |
1220 | 1220 | ||
1221 | if (tv_mode->dda2_inc) | 1221 | if (tv_mode->dda2_inc) |
@@ -1225,6 +1225,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1225 | scctl1 |= TV_SC_DDA3_EN; | 1225 | scctl1 |= TV_SC_DDA3_EN; |
1226 | 1226 | ||
1227 | scctl1 |= tv_mode->sc_reset; | 1227 | scctl1 |= tv_mode->sc_reset; |
1228 | scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; | ||
1228 | scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; | 1229 | scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; |
1229 | 1230 | ||
1230 | scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | | 1231 | scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | |
@@ -1266,7 +1267,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
1266 | color_conversion->av); | 1267 | color_conversion->av); |
1267 | } | 1268 | } |
1268 | 1269 | ||
1269 | I915_WRITE(TV_CLR_KNOBS, 0x00606000); | 1270 | if (IS_I965G(dev)) |
1271 | I915_WRITE(TV_CLR_KNOBS, 0x00404000); | ||
1272 | else | ||
1273 | I915_WRITE(TV_CLR_KNOBS, 0x00606000); | ||
1274 | |||
1270 | if (video_levels) | 1275 | if (video_levels) |
1271 | I915_WRITE(TV_CLR_LEVEL, | 1276 | I915_WRITE(TV_CLR_LEVEL, |
1272 | ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | | 1277 | ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | |
@@ -1401,6 +1406,7 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) | |||
1401 | tv_dac = I915_READ(TV_DAC); | 1406 | tv_dac = I915_READ(TV_DAC); |
1402 | I915_WRITE(TV_DAC, save_tv_dac); | 1407 | I915_WRITE(TV_DAC, save_tv_dac); |
1403 | I915_WRITE(TV_CTL, save_tv_ctl); | 1408 | I915_WRITE(TV_CTL, save_tv_ctl); |
1409 | intel_wait_for_vblank(dev); | ||
1404 | } | 1410 | } |
1405 | /* | 1411 | /* |
1406 | * A B C | 1412 | * A B C |
@@ -1451,7 +1457,7 @@ intel_tv_detect(struct drm_connector *connector) | |||
1451 | mode = reported_modes[0]; | 1457 | mode = reported_modes[0]; |
1452 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); | 1458 | drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); |
1453 | 1459 | ||
1454 | if (encoder->crtc) { | 1460 | if (encoder->crtc && encoder->crtc->enabled) { |
1455 | type = intel_tv_detect_type(encoder->crtc, intel_output); | 1461 | type = intel_tv_detect_type(encoder->crtc, intel_output); |
1456 | } else { | 1462 | } else { |
1457 | crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); | 1463 | crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); |
@@ -1462,6 +1468,8 @@ intel_tv_detect(struct drm_connector *connector) | |||
1462 | type = -1; | 1468 | type = -1; |
1463 | } | 1469 | } |
1464 | 1470 | ||
1471 | tv_priv->type = type; | ||
1472 | |||
1465 | if (type < 0) | 1473 | if (type < 0) |
1466 | return connector_status_disconnected; | 1474 | return connector_status_disconnected; |
1467 | 1475 | ||
@@ -1495,7 +1503,8 @@ intel_tv_get_modes(struct drm_connector *connector) | |||
1495 | struct drm_display_mode *mode_ptr; | 1503 | struct drm_display_mode *mode_ptr; |
1496 | struct intel_output *intel_output = to_intel_output(connector); | 1504 | struct intel_output *intel_output = to_intel_output(connector); |
1497 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); | 1505 | const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); |
1498 | int j; | 1506 | int j, count = 0; |
1507 | u64 tmp; | ||
1499 | 1508 | ||
1500 | for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]); | 1509 | for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]); |
1501 | j++) { | 1510 | j++) { |
@@ -1510,8 +1519,9 @@ intel_tv_get_modes(struct drm_connector *connector) | |||
1510 | && !tv_mode->component_only)) | 1519 | && !tv_mode->component_only)) |
1511 | continue; | 1520 | continue; |
1512 | 1521 | ||
1513 | mode_ptr = drm_calloc(1, sizeof(struct drm_display_mode), | 1522 | mode_ptr = drm_mode_create(connector->dev); |
1514 | DRM_MEM_DRIVER); | 1523 | if (!mode_ptr) |
1524 | continue; | ||
1515 | strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); | 1525 | strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); |
1516 | 1526 | ||
1517 | mode_ptr->hdisplay = hactive_s; | 1527 | mode_ptr->hdisplay = hactive_s; |
@@ -1528,15 +1538,17 @@ intel_tv_get_modes(struct drm_connector *connector) | |||
1528 | mode_ptr->vsync_end = mode_ptr->vsync_start + 1; | 1538 | mode_ptr->vsync_end = mode_ptr->vsync_start + 1; |
1529 | mode_ptr->vtotal = vactive_s + 33; | 1539 | mode_ptr->vtotal = vactive_s + 33; |
1530 | 1540 | ||
1531 | mode_ptr->clock = (int) (tv_mode->refresh * | 1541 | tmp = (u64) tv_mode->refresh * mode_ptr->vtotal; |
1532 | mode_ptr->vtotal * | 1542 | tmp *= mode_ptr->htotal; |
1533 | mode_ptr->htotal / 1000) / 1000; | 1543 | tmp = div_u64(tmp, 1000000); |
1544 | mode_ptr->clock = (int) tmp; | ||
1534 | 1545 | ||
1535 | mode_ptr->type = DRM_MODE_TYPE_DRIVER; | 1546 | mode_ptr->type = DRM_MODE_TYPE_DRIVER; |
1536 | drm_mode_probed_add(connector, mode_ptr); | 1547 | drm_mode_probed_add(connector, mode_ptr); |
1548 | count++; | ||
1537 | } | 1549 | } |
1538 | 1550 | ||
1539 | return 0; | 1551 | return count; |
1540 | } | 1552 | } |
1541 | 1553 | ||
1542 | static void | 1554 | static void |
diff --git a/drivers/ieee1394/csr.c b/drivers/ieee1394/csr.c index 31400c8ae051..d696f69ebce5 100644 --- a/drivers/ieee1394/csr.c +++ b/drivers/ieee1394/csr.c | |||
@@ -68,22 +68,22 @@ static struct hpsb_highlevel csr_highlevel = { | |||
68 | .host_reset = host_reset, | 68 | .host_reset = host_reset, |
69 | }; | 69 | }; |
70 | 70 | ||
71 | const static struct hpsb_address_ops map_ops = { | 71 | static const struct hpsb_address_ops map_ops = { |
72 | .read = read_maps, | 72 | .read = read_maps, |
73 | }; | 73 | }; |
74 | 74 | ||
75 | const static struct hpsb_address_ops fcp_ops = { | 75 | static const struct hpsb_address_ops fcp_ops = { |
76 | .write = write_fcp, | 76 | .write = write_fcp, |
77 | }; | 77 | }; |
78 | 78 | ||
79 | const static struct hpsb_address_ops reg_ops = { | 79 | static const struct hpsb_address_ops reg_ops = { |
80 | .read = read_regs, | 80 | .read = read_regs, |
81 | .write = write_regs, | 81 | .write = write_regs, |
82 | .lock = lock_regs, | 82 | .lock = lock_regs, |
83 | .lock64 = lock64_regs, | 83 | .lock64 = lock64_regs, |
84 | }; | 84 | }; |
85 | 85 | ||
86 | const static struct hpsb_address_ops config_rom_ops = { | 86 | static const struct hpsb_address_ops config_rom_ops = { |
87 | .read = read_config_rom, | 87 | .read = read_config_rom, |
88 | }; | 88 | }; |
89 | 89 | ||
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c index cb15bfa38d70..823a6297a1af 100644 --- a/drivers/ieee1394/dv1394.c +++ b/drivers/ieee1394/dv1394.c | |||
@@ -2171,7 +2171,7 @@ static const struct file_operations dv1394_fops= | |||
2171 | * Export information about protocols/devices supported by this driver. | 2171 | * Export information about protocols/devices supported by this driver. |
2172 | */ | 2172 | */ |
2173 | #ifdef MODULE | 2173 | #ifdef MODULE |
2174 | static struct ieee1394_device_id dv1394_id_table[] = { | 2174 | static const struct ieee1394_device_id dv1394_id_table[] = { |
2175 | { | 2175 | { |
2176 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, | 2176 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, |
2177 | .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, | 2177 | .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, |
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c index 1a919df809f8..4ca103577c0a 100644 --- a/drivers/ieee1394/eth1394.c +++ b/drivers/ieee1394/eth1394.c | |||
@@ -181,7 +181,7 @@ static void ether1394_remove_host(struct hpsb_host *host); | |||
181 | static void ether1394_host_reset(struct hpsb_host *host); | 181 | static void ether1394_host_reset(struct hpsb_host *host); |
182 | 182 | ||
183 | /* Function for incoming 1394 packets */ | 183 | /* Function for incoming 1394 packets */ |
184 | const static struct hpsb_address_ops addr_ops = { | 184 | static const struct hpsb_address_ops addr_ops = { |
185 | .write = ether1394_write, | 185 | .write = ether1394_write, |
186 | }; | 186 | }; |
187 | 187 | ||
@@ -438,7 +438,7 @@ static int eth1394_update(struct unit_directory *ud) | |||
438 | return eth1394_new_node(hi, ud); | 438 | return eth1394_new_node(hi, ud); |
439 | } | 439 | } |
440 | 440 | ||
441 | static struct ieee1394_device_id eth1394_id_table[] = { | 441 | static const struct ieee1394_device_id eth1394_id_table[] = { |
442 | { | 442 | { |
443 | .match_flags = (IEEE1394_MATCH_SPECIFIER_ID | | 443 | .match_flags = (IEEE1394_MATCH_SPECIFIER_ID | |
444 | IEEE1394_MATCH_VERSION), | 444 | IEEE1394_MATCH_VERSION), |
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c index 600e391c8fe7..4bc443546e04 100644 --- a/drivers/ieee1394/highlevel.c +++ b/drivers/ieee1394/highlevel.c | |||
@@ -478,7 +478,7 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host, | |||
478 | return retval; | 478 | return retval; |
479 | } | 479 | } |
480 | 480 | ||
481 | const static struct hpsb_address_ops dummy_ops; | 481 | static const struct hpsb_address_ops dummy_ops; |
482 | 482 | ||
483 | /* dummy address spaces as lower and upper bounds of the host's a.s. list */ | 483 | /* dummy address spaces as lower and upper bounds of the host's a.s. list */ |
484 | static void init_hpsb_highlevel(struct hpsb_host *host) | 484 | static void init_hpsb_highlevel(struct hpsb_host *host) |
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c index 53aada5bbe1e..a6d55bebe61a 100644 --- a/drivers/ieee1394/nodemgr.c +++ b/drivers/ieee1394/nodemgr.c | |||
@@ -484,7 +484,7 @@ static struct device_attribute *const fw_host_attrs[] = { | |||
484 | static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf) | 484 | static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf) |
485 | { | 485 | { |
486 | struct hpsb_protocol_driver *driver; | 486 | struct hpsb_protocol_driver *driver; |
487 | struct ieee1394_device_id *id; | 487 | const struct ieee1394_device_id *id; |
488 | int length = 0; | 488 | int length = 0; |
489 | char *scratch = buf; | 489 | char *scratch = buf; |
490 | 490 | ||
@@ -658,7 +658,7 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv) | |||
658 | { | 658 | { |
659 | struct hpsb_protocol_driver *driver; | 659 | struct hpsb_protocol_driver *driver; |
660 | struct unit_directory *ud; | 660 | struct unit_directory *ud; |
661 | struct ieee1394_device_id *id; | 661 | const struct ieee1394_device_id *id; |
662 | 662 | ||
663 | /* We only match unit directories */ | 663 | /* We only match unit directories */ |
664 | if (dev->platform_data != &nodemgr_ud_platform_data) | 664 | if (dev->platform_data != &nodemgr_ud_platform_data) |
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h index ee5acdbd114a..749b271d3107 100644 --- a/drivers/ieee1394/nodemgr.h +++ b/drivers/ieee1394/nodemgr.h | |||
@@ -125,7 +125,7 @@ struct hpsb_protocol_driver { | |||
125 | * probe function below can implement further protocol | 125 | * probe function below can implement further protocol |
126 | * dependent or vendor dependent checking. | 126 | * dependent or vendor dependent checking. |
127 | */ | 127 | */ |
128 | struct ieee1394_device_id *id_table; | 128 | const struct ieee1394_device_id *id_table; |
129 | 129 | ||
130 | /* | 130 | /* |
131 | * The update function is called when the node has just | 131 | * The update function is called when the node has just |
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index bad66c65b0d6..da5f8829b503 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c | |||
@@ -90,7 +90,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store, | |||
90 | static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, | 90 | static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, |
91 | u64 addr, octlet_t data, octlet_t arg, int ext_tcode, | 91 | u64 addr, octlet_t data, octlet_t arg, int ext_tcode, |
92 | u16 flags); | 92 | u16 flags); |
93 | const static struct hpsb_address_ops arm_ops = { | 93 | static const struct hpsb_address_ops arm_ops = { |
94 | .read = arm_read, | 94 | .read = arm_read, |
95 | .write = arm_write, | 95 | .write = arm_write, |
96 | .lock = arm_lock, | 96 | .lock = arm_lock, |
@@ -369,6 +369,7 @@ static const char __user *raw1394_compat_write(const char __user *buf) | |||
369 | { | 369 | { |
370 | struct compat_raw1394_req __user *cr = (typeof(cr)) buf; | 370 | struct compat_raw1394_req __user *cr = (typeof(cr)) buf; |
371 | struct raw1394_request __user *r; | 371 | struct raw1394_request __user *r; |
372 | |||
372 | r = compat_alloc_user_space(sizeof(struct raw1394_request)); | 373 | r = compat_alloc_user_space(sizeof(struct raw1394_request)); |
373 | 374 | ||
374 | #define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x)) | 375 | #define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x)) |
@@ -378,7 +379,8 @@ static const char __user *raw1394_compat_write(const char __user *buf) | |||
378 | C(tag) || | 379 | C(tag) || |
379 | C(sendb) || | 380 | C(sendb) || |
380 | C(recvb)) | 381 | C(recvb)) |
381 | return ERR_PTR(-EFAULT); | 382 | return (__force const char __user *)ERR_PTR(-EFAULT); |
383 | |||
382 | return (const char __user *)r; | 384 | return (const char __user *)r; |
383 | } | 385 | } |
384 | #undef C | 386 | #undef C |
@@ -389,6 +391,7 @@ static int | |||
389 | raw1394_compat_read(const char __user *buf, struct raw1394_request *r) | 391 | raw1394_compat_read(const char __user *buf, struct raw1394_request *r) |
390 | { | 392 | { |
391 | struct compat_raw1394_req __user *cr = (typeof(cr)) buf; | 393 | struct compat_raw1394_req __user *cr = (typeof(cr)) buf; |
394 | |||
392 | if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) || | 395 | if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) || |
393 | P(type) || | 396 | P(type) || |
394 | P(error) || | 397 | P(error) || |
@@ -400,6 +403,7 @@ raw1394_compat_read(const char __user *buf, struct raw1394_request *r) | |||
400 | P(sendb) || | 403 | P(sendb) || |
401 | P(recvb)) | 404 | P(recvb)) |
402 | return -EFAULT; | 405 | return -EFAULT; |
406 | |||
403 | return sizeof(struct compat_raw1394_req); | 407 | return sizeof(struct compat_raw1394_req); |
404 | } | 408 | } |
405 | #undef P | 409 | #undef P |
@@ -2249,8 +2253,8 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer, | |||
2249 | sizeof(struct compat_raw1394_req) != | 2253 | sizeof(struct compat_raw1394_req) != |
2250 | sizeof(struct raw1394_request)) { | 2254 | sizeof(struct raw1394_request)) { |
2251 | buffer = raw1394_compat_write(buffer); | 2255 | buffer = raw1394_compat_write(buffer); |
2252 | if (IS_ERR(buffer)) | 2256 | if (IS_ERR((__force void *)buffer)) |
2253 | return PTR_ERR(buffer); | 2257 | return PTR_ERR((__force void *)buffer); |
2254 | } else | 2258 | } else |
2255 | #endif | 2259 | #endif |
2256 | if (count != sizeof(struct raw1394_request)) { | 2260 | if (count != sizeof(struct raw1394_request)) { |
@@ -2978,7 +2982,7 @@ static int raw1394_release(struct inode *inode, struct file *file) | |||
2978 | * Export information about protocols/devices supported by this driver. | 2982 | * Export information about protocols/devices supported by this driver. |
2979 | */ | 2983 | */ |
2980 | #ifdef MODULE | 2984 | #ifdef MODULE |
2981 | static struct ieee1394_device_id raw1394_id_table[] = { | 2985 | static const struct ieee1394_device_id raw1394_id_table[] = { |
2982 | { | 2986 | { |
2983 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, | 2987 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, |
2984 | .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, | 2988 | .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, |
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index f3fd8657ce4b..a51ab233342d 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c | |||
@@ -265,7 +265,7 @@ static struct hpsb_highlevel sbp2_highlevel = { | |||
265 | .host_reset = sbp2_host_reset, | 265 | .host_reset = sbp2_host_reset, |
266 | }; | 266 | }; |
267 | 267 | ||
268 | const static struct hpsb_address_ops sbp2_ops = { | 268 | static const struct hpsb_address_ops sbp2_ops = { |
269 | .write = sbp2_handle_status_write | 269 | .write = sbp2_handle_status_write |
270 | }; | 270 | }; |
271 | 271 | ||
@@ -275,7 +275,7 @@ static int sbp2_handle_physdma_write(struct hpsb_host *, int, int, quadlet_t *, | |||
275 | static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64, | 275 | static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64, |
276 | size_t, u16); | 276 | size_t, u16); |
277 | 277 | ||
278 | const static struct hpsb_address_ops sbp2_physdma_ops = { | 278 | static const struct hpsb_address_ops sbp2_physdma_ops = { |
279 | .read = sbp2_handle_physdma_read, | 279 | .read = sbp2_handle_physdma_read, |
280 | .write = sbp2_handle_physdma_write, | 280 | .write = sbp2_handle_physdma_write, |
281 | }; | 281 | }; |
@@ -285,7 +285,7 @@ const static struct hpsb_address_ops sbp2_physdma_ops = { | |||
285 | /* | 285 | /* |
286 | * Interface to driver core and IEEE 1394 core | 286 | * Interface to driver core and IEEE 1394 core |
287 | */ | 287 | */ |
288 | static struct ieee1394_device_id sbp2_id_table[] = { | 288 | static const struct ieee1394_device_id sbp2_id_table[] = { |
289 | { | 289 | { |
290 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, | 290 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, |
291 | .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff, | 291 | .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff, |
@@ -1413,8 +1413,7 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu, | |||
1413 | "(firmware_revision 0x%06x, vendor_id 0x%06x," | 1413 | "(firmware_revision 0x%06x, vendor_id 0x%06x," |
1414 | " model_id 0x%06x)", | 1414 | " model_id 0x%06x)", |
1415 | NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), | 1415 | NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), |
1416 | workarounds, firmware_revision, | 1416 | workarounds, firmware_revision, ud->vendor_id, |
1417 | ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id, | ||
1418 | model); | 1417 | model); |
1419 | 1418 | ||
1420 | /* We would need one SCSI host template for each target to adjust | 1419 | /* We would need one SCSI host template for each target to adjust |
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c index 679a918a5cc7..d287ba79821d 100644 --- a/drivers/ieee1394/video1394.c +++ b/drivers/ieee1394/video1394.c | |||
@@ -1294,7 +1294,7 @@ static const struct file_operations video1394_fops= | |||
1294 | * Export information about protocols/devices supported by this driver. | 1294 | * Export information about protocols/devices supported by this driver. |
1295 | */ | 1295 | */ |
1296 | #ifdef MODULE | 1296 | #ifdef MODULE |
1297 | static struct ieee1394_device_id video1394_id_table[] = { | 1297 | static const struct ieee1394_device_id video1394_id_table[] = { |
1298 | { | 1298 | { |
1299 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, | 1299 | .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, |
1300 | .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff, | 1300 | .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff, |
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c index b55d9ccaf33e..32526f103b59 100644 --- a/drivers/media/dvb/firewire/firedtv-avc.c +++ b/drivers/media/dvb/firewire/firedtv-avc.c | |||
@@ -115,7 +115,7 @@ static const char *debug_fcp_ctype(unsigned int ctype) | |||
115 | } | 115 | } |
116 | 116 | ||
117 | static const char *debug_fcp_opcode(unsigned int opcode, | 117 | static const char *debug_fcp_opcode(unsigned int opcode, |
118 | const u8 *data, size_t length) | 118 | const u8 *data, int length) |
119 | { | 119 | { |
120 | switch (opcode) { | 120 | switch (opcode) { |
121 | case AVC_OPCODE_VENDOR: break; | 121 | case AVC_OPCODE_VENDOR: break; |
@@ -135,13 +135,14 @@ static const char *debug_fcp_opcode(unsigned int opcode, | |||
135 | case SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL: return "RegisterRC"; | 135 | case SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL: return "RegisterRC"; |
136 | case SFE_VENDOR_OPCODE_LNB_CONTROL: return "LNBControl"; | 136 | case SFE_VENDOR_OPCODE_LNB_CONTROL: return "LNBControl"; |
137 | case SFE_VENDOR_OPCODE_TUNE_QPSK: return "TuneQPSK"; | 137 | case SFE_VENDOR_OPCODE_TUNE_QPSK: return "TuneQPSK"; |
138 | case SFE_VENDOR_OPCODE_TUNE_QPSK2: return "TuneQPSK2"; | ||
138 | case SFE_VENDOR_OPCODE_HOST2CA: return "Host2CA"; | 139 | case SFE_VENDOR_OPCODE_HOST2CA: return "Host2CA"; |
139 | case SFE_VENDOR_OPCODE_CA2HOST: return "CA2Host"; | 140 | case SFE_VENDOR_OPCODE_CA2HOST: return "CA2Host"; |
140 | } | 141 | } |
141 | return "Vendor"; | 142 | return "Vendor"; |
142 | } | 143 | } |
143 | 144 | ||
144 | static void debug_fcp(const u8 *data, size_t length) | 145 | static void debug_fcp(const u8 *data, int length) |
145 | { | 146 | { |
146 | unsigned int subunit_type, subunit_id, op; | 147 | unsigned int subunit_type, subunit_id, op; |
147 | const char *prefix = data[0] > 7 ? "FCP <- " : "FCP -> "; | 148 | const char *prefix = data[0] > 7 ? "FCP <- " : "FCP -> "; |
@@ -266,7 +267,10 @@ static void avc_tuner_tuneqpsk(struct firedtv *fdtv, | |||
266 | c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; | 267 | c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; |
267 | c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; | 268 | c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; |
268 | c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; | 269 | c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; |
269 | c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK; | 270 | if (fdtv->type == FIREDTV_DVB_S2) |
271 | c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK2; | ||
272 | else | ||
273 | c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK; | ||
270 | 274 | ||
271 | c->operand[4] = (params->frequency >> 24) & 0xff; | 275 | c->operand[4] = (params->frequency >> 24) & 0xff; |
272 | c->operand[5] = (params->frequency >> 16) & 0xff; | 276 | c->operand[5] = (params->frequency >> 16) & 0xff; |
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c index 00d46e137b2a..92285d0089c2 100644 --- a/drivers/mtd/mtdsuper.c +++ b/drivers/mtd/mtdsuper.c | |||
@@ -81,13 +81,16 @@ static int get_sb_mtd_aux(struct file_system_type *fs_type, int flags, | |||
81 | 81 | ||
82 | /* go */ | 82 | /* go */ |
83 | sb->s_flags |= MS_ACTIVE; | 83 | sb->s_flags |= MS_ACTIVE; |
84 | return simple_set_mnt(mnt, sb); | 84 | simple_set_mnt(mnt, sb); |
85 | |||
86 | return 0; | ||
85 | 87 | ||
86 | /* new mountpoint for an already mounted superblock */ | 88 | /* new mountpoint for an already mounted superblock */ |
87 | already_mounted: | 89 | already_mounted: |
88 | DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n", | 90 | DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n", |
89 | mtd->index, mtd->name); | 91 | mtd->index, mtd->name); |
90 | ret = simple_set_mnt(mnt, sb); | 92 | simple_set_mnt(mnt, sb); |
93 | ret = 0; | ||
91 | goto out_put; | 94 | goto out_put; |
92 | 95 | ||
93 | out_error: | 96 | out_error: |