aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
authorIan Campbell <ian.campbell@citrix.com>2013-02-20 06:48:06 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-02-20 08:45:07 -0500
commitc81611c4e96f595a80d8be9367c385d2c116428b (patch)
tree94c1b31034506d8b73b6d9b2c2e0e2331d0fd450 /drivers/xen
parent76eaca031f0af2bb303e405986f637811956a422 (diff)
xen: event channel arrays are xen_ulong_t and not unsigned long
On ARM we want these to be the same size on 32- and 64-bit. This is an ABI change on ARM. X86 does not change. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Cc: Jan Beulich <JBeulich@suse.com> Cc: Keir (Xen.org) <keir@xen.org> Cc: Tim Deegan <tim@xen.org> Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Cc: linux-arm-kernel@lists.infradead.org Cc: xen-devel@lists.xen.org Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/events.c115
1 files changed, 67 insertions, 48 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 0be4df39e953..342edc0495cc 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -120,7 +120,22 @@ static unsigned long *pirq_eoi_map;
120#endif 120#endif
121static bool (*pirq_needs_eoi)(unsigned irq); 121static bool (*pirq_needs_eoi)(unsigned irq);
122 122
123static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG], 123/*
124 * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
125 * careful to only use bitops which allow for this (e.g
126 * test_bit/find_first_bit and friends but not __ffs) and to pass
127 * BITS_PER_EVTCHN_WORD as the bitmask length.
128 */
129#define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
130/*
131 * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
132 * array. Primarily to avoid long lines (hence the terse name).
133 */
134#define BM(x) (unsigned long *)(x)
135/* Find the first set bit in a evtchn mask */
136#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
137
138static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD],
124 cpu_evtchn_mask); 139 cpu_evtchn_mask);
125 140
126/* Xen will never allocate port zero for any purpose. */ 141/* Xen will never allocate port zero for any purpose. */
@@ -294,9 +309,9 @@ static bool pirq_needs_eoi_flag(unsigned irq)
294 return info->u.pirq.flags & PIRQ_NEEDS_EOI; 309 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
295} 310}
296 311
297static inline unsigned long active_evtchns(unsigned int cpu, 312static inline xen_ulong_t active_evtchns(unsigned int cpu,
298 struct shared_info *sh, 313 struct shared_info *sh,
299 unsigned int idx) 314 unsigned int idx)
300{ 315{
301 return sh->evtchn_pending[idx] & 316 return sh->evtchn_pending[idx] &
302 per_cpu(cpu_evtchn_mask, cpu)[idx] & 317 per_cpu(cpu_evtchn_mask, cpu)[idx] &
@@ -312,8 +327,8 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
312 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu)); 327 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
313#endif 328#endif
314 329
315 clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq))); 330 clear_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu_from_irq(irq))));
316 set_bit(chn, per_cpu(cpu_evtchn_mask, cpu)); 331 set_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu)));
317 332
318 info_for_irq(irq)->cpu = cpu; 333 info_for_irq(irq)->cpu = cpu;
319} 334}
@@ -339,19 +354,19 @@ static void init_evtchn_cpu_bindings(void)
339static inline void clear_evtchn(int port) 354static inline void clear_evtchn(int port)
340{ 355{
341 struct shared_info *s = HYPERVISOR_shared_info; 356 struct shared_info *s = HYPERVISOR_shared_info;
342 sync_clear_bit(port, &s->evtchn_pending[0]); 357 sync_clear_bit(port, BM(&s->evtchn_pending[0]));
343} 358}
344 359
345static inline void set_evtchn(int port) 360static inline void set_evtchn(int port)
346{ 361{
347 struct shared_info *s = HYPERVISOR_shared_info; 362 struct shared_info *s = HYPERVISOR_shared_info;
348 sync_set_bit(port, &s->evtchn_pending[0]); 363 sync_set_bit(port, BM(&s->evtchn_pending[0]));
349} 364}
350 365
351static inline int test_evtchn(int port) 366static inline int test_evtchn(int port)
352{ 367{
353 struct shared_info *s = HYPERVISOR_shared_info; 368 struct shared_info *s = HYPERVISOR_shared_info;
354 return sync_test_bit(port, &s->evtchn_pending[0]); 369 return sync_test_bit(port, BM(&s->evtchn_pending[0]));
355} 370}
356 371
357 372
@@ -375,7 +390,7 @@ EXPORT_SYMBOL_GPL(notify_remote_via_irq);
375static void mask_evtchn(int port) 390static void mask_evtchn(int port)
376{ 391{
377 struct shared_info *s = HYPERVISOR_shared_info; 392 struct shared_info *s = HYPERVISOR_shared_info;
378 sync_set_bit(port, &s->evtchn_mask[0]); 393 sync_set_bit(port, BM(&s->evtchn_mask[0]));
379} 394}
380 395
381static void unmask_evtchn(int port) 396static void unmask_evtchn(int port)
@@ -389,7 +404,7 @@ static void unmask_evtchn(int port)
389 if (unlikely((cpu != cpu_from_evtchn(port)))) 404 if (unlikely((cpu != cpu_from_evtchn(port))))
390 do_hypercall = 1; 405 do_hypercall = 1;
391 else 406 else
392 evtchn_pending = sync_test_bit(port, &s->evtchn_pending[0]); 407 evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
393 408
394 if (unlikely(evtchn_pending && xen_hvm_domain())) 409 if (unlikely(evtchn_pending && xen_hvm_domain()))
395 do_hypercall = 1; 410 do_hypercall = 1;
@@ -403,7 +418,7 @@ static void unmask_evtchn(int port)
403 } else { 418 } else {
404 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); 419 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
405 420
406 sync_clear_bit(port, &s->evtchn_mask[0]); 421 sync_clear_bit(port, BM(&s->evtchn_mask[0]));
407 422
408 /* 423 /*
409 * The following is basically the equivalent of 424 * The following is basically the equivalent of
@@ -411,8 +426,8 @@ static void unmask_evtchn(int port)
411 * the interrupt edge' if the channel is masked. 426 * the interrupt edge' if the channel is masked.
412 */ 427 */
413 if (evtchn_pending && 428 if (evtchn_pending &&
414 !sync_test_and_set_bit(port / BITS_PER_LONG, 429 !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
415 &vcpu_info->evtchn_pending_sel)) 430 BM(&vcpu_info->evtchn_pending_sel)))
416 vcpu_info->evtchn_upcall_pending = 1; 431 vcpu_info->evtchn_upcall_pending = 1;
417 } 432 }
418 433
@@ -1189,7 +1204,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
1189{ 1204{
1190 struct shared_info *sh = HYPERVISOR_shared_info; 1205 struct shared_info *sh = HYPERVISOR_shared_info;
1191 int cpu = smp_processor_id(); 1206 int cpu = smp_processor_id();
1192 unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu); 1207 xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
1193 int i; 1208 int i;
1194 unsigned long flags; 1209 unsigned long flags;
1195 static DEFINE_SPINLOCK(debug_lock); 1210 static DEFINE_SPINLOCK(debug_lock);
@@ -1205,7 +1220,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
1205 pending = (get_irq_regs() && i == cpu) 1220 pending = (get_irq_regs() && i == cpu)
1206 ? xen_irqs_disabled(get_irq_regs()) 1221 ? xen_irqs_disabled(get_irq_regs())
1207 : v->evtchn_upcall_mask; 1222 : v->evtchn_upcall_mask;
1208 printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i, 1223 printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n ", i,
1209 pending, v->evtchn_upcall_pending, 1224 pending, v->evtchn_upcall_pending,
1210 (int)(sizeof(v->evtchn_pending_sel)*2), 1225 (int)(sizeof(v->evtchn_pending_sel)*2),
1211 v->evtchn_pending_sel); 1226 v->evtchn_pending_sel);
@@ -1214,49 +1229,52 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
1214 1229
1215 printk("\npending:\n "); 1230 printk("\npending:\n ");
1216 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) 1231 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
1217 printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2, 1232 printk("%0*"PRI_xen_ulong"%s",
1233 (int)sizeof(sh->evtchn_pending[0])*2,
1218 sh->evtchn_pending[i], 1234 sh->evtchn_pending[i],
1219 i % 8 == 0 ? "\n " : " "); 1235 i % 8 == 0 ? "\n " : " ");
1220 printk("\nglobal mask:\n "); 1236 printk("\nglobal mask:\n ");
1221 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) 1237 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1222 printk("%0*lx%s", 1238 printk("%0*"PRI_xen_ulong"%s",
1223 (int)(sizeof(sh->evtchn_mask[0])*2), 1239 (int)(sizeof(sh->evtchn_mask[0])*2),
1224 sh->evtchn_mask[i], 1240 sh->evtchn_mask[i],
1225 i % 8 == 0 ? "\n " : " "); 1241 i % 8 == 0 ? "\n " : " ");
1226 1242
1227 printk("\nglobally unmasked:\n "); 1243 printk("\nglobally unmasked:\n ");
1228 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) 1244 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1229 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2), 1245 printk("%0*"PRI_xen_ulong"%s",
1246 (int)(sizeof(sh->evtchn_mask[0])*2),
1230 sh->evtchn_pending[i] & ~sh->evtchn_mask[i], 1247 sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
1231 i % 8 == 0 ? "\n " : " "); 1248 i % 8 == 0 ? "\n " : " ");
1232 1249
1233 printk("\nlocal cpu%d mask:\n ", cpu); 1250 printk("\nlocal cpu%d mask:\n ", cpu);
1234 for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--) 1251 for (i = (NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
1235 printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2), 1252 printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
1236 cpu_evtchn[i], 1253 cpu_evtchn[i],
1237 i % 8 == 0 ? "\n " : " "); 1254 i % 8 == 0 ? "\n " : " ");
1238 1255
1239 printk("\nlocally unmasked:\n "); 1256 printk("\nlocally unmasked:\n ");
1240 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) { 1257 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
1241 unsigned long pending = sh->evtchn_pending[i] 1258 xen_ulong_t pending = sh->evtchn_pending[i]
1242 & ~sh->evtchn_mask[i] 1259 & ~sh->evtchn_mask[i]
1243 & cpu_evtchn[i]; 1260 & cpu_evtchn[i];
1244 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2), 1261 printk("%0*"PRI_xen_ulong"%s",
1262 (int)(sizeof(sh->evtchn_mask[0])*2),
1245 pending, i % 8 == 0 ? "\n " : " "); 1263 pending, i % 8 == 0 ? "\n " : " ");
1246 } 1264 }
1247 1265
1248 printk("\npending list:\n"); 1266 printk("\npending list:\n");
1249 for (i = 0; i < NR_EVENT_CHANNELS; i++) { 1267 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1250 if (sync_test_bit(i, sh->evtchn_pending)) { 1268 if (sync_test_bit(i, BM(sh->evtchn_pending))) {
1251 int word_idx = i / BITS_PER_LONG; 1269 int word_idx = i / BITS_PER_EVTCHN_WORD;
1252 printk(" %d: event %d -> irq %d%s%s%s\n", 1270 printk(" %d: event %d -> irq %d%s%s%s\n",
1253 cpu_from_evtchn(i), i, 1271 cpu_from_evtchn(i), i,
1254 evtchn_to_irq[i], 1272 evtchn_to_irq[i],
1255 sync_test_bit(word_idx, &v->evtchn_pending_sel) 1273 sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
1256 ? "" : " l2-clear", 1274 ? "" : " l2-clear",
1257 !sync_test_bit(i, sh->evtchn_mask) 1275 !sync_test_bit(i, BM(sh->evtchn_mask))
1258 ? "" : " globally-masked", 1276 ? "" : " globally-masked",
1259 sync_test_bit(i, cpu_evtchn) 1277 sync_test_bit(i, BM(cpu_evtchn))
1260 ? "" : " locally-masked"); 1278 ? "" : " locally-masked");
1261 } 1279 }
1262 } 1280 }
@@ -1273,7 +1291,7 @@ static DEFINE_PER_CPU(unsigned int, current_bit_idx);
1273/* 1291/*
1274 * Mask out the i least significant bits of w 1292 * Mask out the i least significant bits of w
1275 */ 1293 */
1276#define MASK_LSBS(w, i) (w & ((~0UL) << i)) 1294#define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
1277 1295
1278/* 1296/*
1279 * Search the CPUs pending events bitmasks. For each one found, map 1297 * Search the CPUs pending events bitmasks. For each one found, map
@@ -1295,18 +1313,19 @@ static void __xen_evtchn_do_upcall(void)
1295 unsigned count; 1313 unsigned count;
1296 1314
1297 do { 1315 do {
1298 unsigned long pending_words; 1316 xen_ulong_t pending_words;
1299 1317
1300 vcpu_info->evtchn_upcall_pending = 0; 1318 vcpu_info->evtchn_upcall_pending = 0;
1301 1319
1302 if (__this_cpu_inc_return(xed_nesting_count) - 1) 1320 if (__this_cpu_inc_return(xed_nesting_count) - 1)
1303 goto out; 1321 goto out;
1304 1322
1305#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ 1323 /*
1306 /* Clear master flag /before/ clearing selector flag. */ 1324 * Master flag must be cleared /before/ clearing
1307 wmb(); 1325 * selector flag. xchg_xen_ulong must contain an
1308#endif 1326 * appropriate barrier.
1309 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); 1327 */
1328 pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
1310 1329
1311 start_word_idx = __this_cpu_read(current_word_idx); 1330 start_word_idx = __this_cpu_read(current_word_idx);
1312 start_bit_idx = __this_cpu_read(current_bit_idx); 1331 start_bit_idx = __this_cpu_read(current_bit_idx);
@@ -1314,8 +1333,8 @@ static void __xen_evtchn_do_upcall(void)
1314 word_idx = start_word_idx; 1333 word_idx = start_word_idx;
1315 1334
1316 for (i = 0; pending_words != 0; i++) { 1335 for (i = 0; pending_words != 0; i++) {
1317 unsigned long pending_bits; 1336 xen_ulong_t pending_bits;
1318 unsigned long words; 1337 xen_ulong_t words;
1319 1338
1320 words = MASK_LSBS(pending_words, word_idx); 1339 words = MASK_LSBS(pending_words, word_idx);
1321 1340
@@ -1327,7 +1346,7 @@ static void __xen_evtchn_do_upcall(void)
1327 bit_idx = 0; 1346 bit_idx = 0;
1328 continue; 1347 continue;
1329 } 1348 }
1330 word_idx = __ffs(words); 1349 word_idx = EVTCHN_FIRST_BIT(words);
1331 1350
1332 pending_bits = active_evtchns(cpu, s, word_idx); 1351 pending_bits = active_evtchns(cpu, s, word_idx);
1333 bit_idx = 0; /* usually scan entire word from start */ 1352 bit_idx = 0; /* usually scan entire word from start */
@@ -1342,7 +1361,7 @@ static void __xen_evtchn_do_upcall(void)
1342 } 1361 }
1343 1362
1344 do { 1363 do {
1345 unsigned long bits; 1364 xen_ulong_t bits;
1346 int port, irq; 1365 int port, irq;
1347 struct irq_desc *desc; 1366 struct irq_desc *desc;
1348 1367
@@ -1352,10 +1371,10 @@ static void __xen_evtchn_do_upcall(void)
1352 if (bits == 0) 1371 if (bits == 0)
1353 break; 1372 break;
1354 1373
1355 bit_idx = __ffs(bits); 1374 bit_idx = EVTCHN_FIRST_BIT(bits);
1356 1375
1357 /* Process port. */ 1376 /* Process port. */
1358 port = (word_idx * BITS_PER_LONG) + bit_idx; 1377 port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
1359 irq = evtchn_to_irq[port]; 1378 irq = evtchn_to_irq[port];
1360 1379
1361 if (irq != -1) { 1380 if (irq != -1) {
@@ -1364,12 +1383,12 @@ static void __xen_evtchn_do_upcall(void)
1364 generic_handle_irq_desc(irq, desc); 1383 generic_handle_irq_desc(irq, desc);
1365 } 1384 }
1366 1385
1367 bit_idx = (bit_idx + 1) % BITS_PER_LONG; 1386 bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
1368 1387
1369 /* Next caller starts at last processed + 1 */ 1388 /* Next caller starts at last processed + 1 */
1370 __this_cpu_write(current_word_idx, 1389 __this_cpu_write(current_word_idx,
1371 bit_idx ? word_idx : 1390 bit_idx ? word_idx :
1372 (word_idx+1) % BITS_PER_LONG); 1391 (word_idx+1) % BITS_PER_EVTCHN_WORD);
1373 __this_cpu_write(current_bit_idx, bit_idx); 1392 __this_cpu_write(current_bit_idx, bit_idx);
1374 } while (bit_idx != 0); 1393 } while (bit_idx != 0);
1375 1394
@@ -1377,7 +1396,7 @@ static void __xen_evtchn_do_upcall(void)
1377 if ((word_idx != start_word_idx) || (i != 0)) 1396 if ((word_idx != start_word_idx) || (i != 0))
1378 pending_words &= ~(1UL << word_idx); 1397 pending_words &= ~(1UL << word_idx);
1379 1398
1380 word_idx = (word_idx + 1) % BITS_PER_LONG; 1399 word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
1381 } 1400 }
1382 1401
1383 BUG_ON(!irqs_disabled()); 1402 BUG_ON(!irqs_disabled());
@@ -1487,8 +1506,8 @@ int resend_irq_on_evtchn(unsigned int irq)
1487 if (!VALID_EVTCHN(evtchn)) 1506 if (!VALID_EVTCHN(evtchn))
1488 return 1; 1507 return 1;
1489 1508
1490 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask); 1509 masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
1491 sync_set_bit(evtchn, s->evtchn_pending); 1510 sync_set_bit(evtchn, BM(s->evtchn_pending));
1492 if (!masked) 1511 if (!masked)
1493 unmask_evtchn(evtchn); 1512 unmask_evtchn(evtchn);
1494 1513
@@ -1536,8 +1555,8 @@ static int retrigger_dynirq(struct irq_data *data)
1536 if (VALID_EVTCHN(evtchn)) { 1555 if (VALID_EVTCHN(evtchn)) {
1537 int masked; 1556 int masked;
1538 1557
1539 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask); 1558 masked = sync_test_and_set_bit(evtchn, BM(sh->evtchn_mask));
1540 sync_set_bit(evtchn, sh->evtchn_pending); 1559 sync_set_bit(evtchn, BM(sh->evtchn_pending));
1541 if (!masked) 1560 if (!masked)
1542 unmask_evtchn(evtchn); 1561 unmask_evtchn(evtchn);
1543 ret = 1; 1562 ret = 1;