diff options
-rw-r--r-- | arch/mips/kernel/mips_ksyms.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/time.c | 2 | ||||
-rw-r--r-- | arch/mips/lib/Makefile | 2 | ||||
-rw-r--r-- | arch/mips/lib/csum_partial.S | 442 | ||||
-rw-r--r-- | arch/mips/lib/csum_partial_copy.c | 52 | ||||
-rw-r--r-- | arch/mips/mips-boards/generic/time.c | 9 | ||||
-rw-r--r-- | arch/mips/mips-boards/malta/malta_mtd.c | 63 | ||||
-rw-r--r-- | arch/mips/mips-boards/sead/sead_int.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/pg-r4k.c | 9 | ||||
-rw-r--r-- | arch/mips/pci/ops-pnx8550.c | 2 | ||||
-rw-r--r-- | arch/mips/philips/pnx8550/common/time.c | 44 | ||||
-rw-r--r-- | include/asm-mips/checksum.h | 31 | ||||
-rw-r--r-- | include/asm-mips/irq.h | 6 |
13 files changed, 581 insertions, 87 deletions
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index f44a01357ada..2ef857c3ee53 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c | |||
@@ -46,5 +46,7 @@ EXPORT_SYMBOL(__strnlen_user_nocheck_asm); | |||
46 | EXPORT_SYMBOL(__strnlen_user_asm); | 46 | EXPORT_SYMBOL(__strnlen_user_asm); |
47 | 47 | ||
48 | EXPORT_SYMBOL(csum_partial); | 48 | EXPORT_SYMBOL(csum_partial); |
49 | EXPORT_SYMBOL(csum_partial_copy_nocheck); | ||
50 | EXPORT_SYMBOL(__csum_partial_copy_user); | ||
49 | 51 | ||
50 | EXPORT_SYMBOL(invalid_pte_table); | 52 | EXPORT_SYMBOL(invalid_pte_table); |
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index 11aab6d6bfe5..8aa544f73a5e 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c | |||
@@ -94,10 +94,8 @@ static void c0_timer_ack(void) | |||
94 | { | 94 | { |
95 | unsigned int count; | 95 | unsigned int count; |
96 | 96 | ||
97 | #ifndef CONFIG_SOC_PNX8550 /* pnx8550 resets to zero */ | ||
98 | /* Ack this timer interrupt and set the next one. */ | 97 | /* Ack this timer interrupt and set the next one. */ |
99 | expirelo += cycles_per_jiffy; | 98 | expirelo += cycles_per_jiffy; |
100 | #endif | ||
101 | write_c0_compare(expirelo); | 99 | write_c0_compare(expirelo); |
102 | 100 | ||
103 | /* Check to see if we have missed any timer interrupts. */ | 101 | /* Check to see if we have missed any timer interrupts. */ |
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index 888b61ea12fe..989c900b8b14 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for MIPS-specific library files.. | 2 | # Makefile for MIPS-specific library files.. |
3 | # | 3 | # |
4 | 4 | ||
5 | lib-y += csum_partial.o csum_partial_copy.o memcpy.o promlib.o \ | 5 | lib-y += csum_partial.o memcpy.o promlib.o \ |
6 | strlen_user.o strncpy_user.o strnlen_user.o uncached.o | 6 | strlen_user.o strncpy_user.o strnlen_user.o uncached.o |
7 | 7 | ||
8 | obj-y += iomap.o | 8 | obj-y += iomap.o |
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index 9db357294be1..c0a77fe038be 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S | |||
@@ -8,7 +8,9 @@ | |||
8 | * Copyright (C) 1998, 1999 Ralf Baechle | 8 | * Copyright (C) 1998, 1999 Ralf Baechle |
9 | * Copyright (C) 1999 Silicon Graphics, Inc. | 9 | * Copyright (C) 1999 Silicon Graphics, Inc. |
10 | */ | 10 | */ |
11 | #include <linux/errno.h> | ||
11 | #include <asm/asm.h> | 12 | #include <asm/asm.h> |
13 | #include <asm/asm-offsets.h> | ||
12 | #include <asm/regdef.h> | 14 | #include <asm/regdef.h> |
13 | 15 | ||
14 | #ifdef CONFIG_64BIT | 16 | #ifdef CONFIG_64BIT |
@@ -271,3 +273,443 @@ small_csumcpy: | |||
271 | jr ra | 273 | jr ra |
272 | .set noreorder | 274 | .set noreorder |
273 | END(csum_partial) | 275 | END(csum_partial) |
276 | |||
277 | |||
278 | /* | ||
279 | * checksum and copy routines based on memcpy.S | ||
280 | * | ||
281 | * csum_partial_copy_nocheck(src, dst, len, sum) | ||
282 | * __csum_partial_copy_user(src, dst, len, sum, errp) | ||
283 | * | ||
284 | * See "Spec" in memcpy.S for details. Unlike __copy_user, all | ||
285 | * function in this file use the standard calling convention. | ||
286 | */ | ||
287 | |||
288 | #define src a0 | ||
289 | #define dst a1 | ||
290 | #define len a2 | ||
291 | #define psum a3 | ||
292 | #define sum v0 | ||
293 | #define odd t8 | ||
294 | #define errptr t9 | ||
295 | |||
296 | /* | ||
297 | * The exception handler for loads requires that: | ||
298 | * 1- AT contain the address of the byte just past the end of the source | ||
299 | * of the copy, | ||
300 | * 2- src_entry <= src < AT, and | ||
301 | * 3- (dst - src) == (dst_entry - src_entry), | ||
302 | * The _entry suffix denotes values when __copy_user was called. | ||
303 | * | ||
304 | * (1) is set up up by __csum_partial_copy_from_user and maintained by | ||
305 | * not writing AT in __csum_partial_copy | ||
306 | * (2) is met by incrementing src by the number of bytes copied | ||
307 | * (3) is met by not doing loads between a pair of increments of dst and src | ||
308 | * | ||
309 | * The exception handlers for stores stores -EFAULT to errptr and return. | ||
310 | * These handlers do not need to overwrite any data. | ||
311 | */ | ||
312 | |||
313 | #define EXC(inst_reg,addr,handler) \ | ||
314 | 9: inst_reg, addr; \ | ||
315 | .section __ex_table,"a"; \ | ||
316 | PTR 9b, handler; \ | ||
317 | .previous | ||
318 | |||
319 | #ifdef USE_DOUBLE | ||
320 | |||
321 | #define LOAD ld | ||
322 | #define LOADL ldl | ||
323 | #define LOADR ldr | ||
324 | #define STOREL sdl | ||
325 | #define STORER sdr | ||
326 | #define STORE sd | ||
327 | #define ADD daddu | ||
328 | #define SUB dsubu | ||
329 | #define SRL dsrl | ||
330 | #define SLL dsll | ||
331 | #define SLLV dsllv | ||
332 | #define SRLV dsrlv | ||
333 | #define NBYTES 8 | ||
334 | #define LOG_NBYTES 3 | ||
335 | |||
336 | #else | ||
337 | |||
338 | #define LOAD lw | ||
339 | #define LOADL lwl | ||
340 | #define LOADR lwr | ||
341 | #define STOREL swl | ||
342 | #define STORER swr | ||
343 | #define STORE sw | ||
344 | #define ADD addu | ||
345 | #define SUB subu | ||
346 | #define SRL srl | ||
347 | #define SLL sll | ||
348 | #define SLLV sllv | ||
349 | #define SRLV srlv | ||
350 | #define NBYTES 4 | ||
351 | #define LOG_NBYTES 2 | ||
352 | |||
353 | #endif /* USE_DOUBLE */ | ||
354 | |||
355 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
356 | #define LDFIRST LOADR | ||
357 | #define LDREST LOADL | ||
358 | #define STFIRST STORER | ||
359 | #define STREST STOREL | ||
360 | #define SHIFT_DISCARD SLLV | ||
361 | #define SHIFT_DISCARD_REVERT SRLV | ||
362 | #else | ||
363 | #define LDFIRST LOADL | ||
364 | #define LDREST LOADR | ||
365 | #define STFIRST STOREL | ||
366 | #define STREST STORER | ||
367 | #define SHIFT_DISCARD SRLV | ||
368 | #define SHIFT_DISCARD_REVERT SLLV | ||
369 | #endif | ||
370 | |||
371 | #define FIRST(unit) ((unit)*NBYTES) | ||
372 | #define REST(unit) (FIRST(unit)+NBYTES-1) | ||
373 | |||
374 | #define ADDRMASK (NBYTES-1) | ||
375 | |||
376 | .set noat | ||
377 | |||
378 | LEAF(__csum_partial_copy_user) | ||
379 | PTR_ADDU AT, src, len /* See (1) above. */ | ||
380 | #ifdef CONFIG_64BIT | ||
381 | move errptr, a4 | ||
382 | #else | ||
383 | lw errptr, 16(sp) | ||
384 | #endif | ||
385 | FEXPORT(csum_partial_copy_nocheck) | ||
386 | move sum, zero | ||
387 | move odd, zero | ||
388 | /* | ||
389 | * Note: dst & src may be unaligned, len may be 0 | ||
390 | * Temps | ||
391 | */ | ||
392 | /* | ||
393 | * The "issue break"s below are very approximate. | ||
394 | * Issue delays for dcache fills will perturb the schedule, as will | ||
395 | * load queue full replay traps, etc. | ||
396 | * | ||
397 | * If len < NBYTES use byte operations. | ||
398 | */ | ||
399 | sltu t2, len, NBYTES | ||
400 | and t1, dst, ADDRMASK | ||
401 | bnez t2, copy_bytes_checklen | ||
402 | and t0, src, ADDRMASK | ||
403 | andi odd, dst, 0x1 /* odd buffer? */ | ||
404 | bnez t1, dst_unaligned | ||
405 | nop | ||
406 | bnez t0, src_unaligned_dst_aligned | ||
407 | /* | ||
408 | * use delay slot for fall-through | ||
409 | * src and dst are aligned; need to compute rem | ||
410 | */ | ||
411 | both_aligned: | ||
412 | SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter | ||
413 | beqz t0, cleanup_both_aligned # len < 8*NBYTES | ||
414 | nop | ||
415 | SUB len, 8*NBYTES # subtract here for bgez loop | ||
416 | .align 4 | ||
417 | 1: | ||
418 | EXC( LOAD t0, UNIT(0)(src), l_exc) | ||
419 | EXC( LOAD t1, UNIT(1)(src), l_exc_copy) | ||
420 | EXC( LOAD t2, UNIT(2)(src), l_exc_copy) | ||
421 | EXC( LOAD t3, UNIT(3)(src), l_exc_copy) | ||
422 | EXC( LOAD t4, UNIT(4)(src), l_exc_copy) | ||
423 | EXC( LOAD t5, UNIT(5)(src), l_exc_copy) | ||
424 | EXC( LOAD t6, UNIT(6)(src), l_exc_copy) | ||
425 | EXC( LOAD t7, UNIT(7)(src), l_exc_copy) | ||
426 | SUB len, len, 8*NBYTES | ||
427 | ADD src, src, 8*NBYTES | ||
428 | EXC( STORE t0, UNIT(0)(dst), s_exc) | ||
429 | ADDC(sum, t0) | ||
430 | EXC( STORE t1, UNIT(1)(dst), s_exc) | ||
431 | ADDC(sum, t1) | ||
432 | EXC( STORE t2, UNIT(2)(dst), s_exc) | ||
433 | ADDC(sum, t2) | ||
434 | EXC( STORE t3, UNIT(3)(dst), s_exc) | ||
435 | ADDC(sum, t3) | ||
436 | EXC( STORE t4, UNIT(4)(dst), s_exc) | ||
437 | ADDC(sum, t4) | ||
438 | EXC( STORE t5, UNIT(5)(dst), s_exc) | ||
439 | ADDC(sum, t5) | ||
440 | EXC( STORE t6, UNIT(6)(dst), s_exc) | ||
441 | ADDC(sum, t6) | ||
442 | EXC( STORE t7, UNIT(7)(dst), s_exc) | ||
443 | ADDC(sum, t7) | ||
444 | bgez len, 1b | ||
445 | ADD dst, dst, 8*NBYTES | ||
446 | ADD len, 8*NBYTES # revert len (see above) | ||
447 | |||
448 | /* | ||
449 | * len == the number of bytes left to copy < 8*NBYTES | ||
450 | */ | ||
451 | cleanup_both_aligned: | ||
452 | #define rem t7 | ||
453 | beqz len, done | ||
454 | sltu t0, len, 4*NBYTES | ||
455 | bnez t0, less_than_4units | ||
456 | and rem, len, (NBYTES-1) # rem = len % NBYTES | ||
457 | /* | ||
458 | * len >= 4*NBYTES | ||
459 | */ | ||
460 | EXC( LOAD t0, UNIT(0)(src), l_exc) | ||
461 | EXC( LOAD t1, UNIT(1)(src), l_exc_copy) | ||
462 | EXC( LOAD t2, UNIT(2)(src), l_exc_copy) | ||
463 | EXC( LOAD t3, UNIT(3)(src), l_exc_copy) | ||
464 | SUB len, len, 4*NBYTES | ||
465 | ADD src, src, 4*NBYTES | ||
466 | EXC( STORE t0, UNIT(0)(dst), s_exc) | ||
467 | ADDC(sum, t0) | ||
468 | EXC( STORE t1, UNIT(1)(dst), s_exc) | ||
469 | ADDC(sum, t1) | ||
470 | EXC( STORE t2, UNIT(2)(dst), s_exc) | ||
471 | ADDC(sum, t2) | ||
472 | EXC( STORE t3, UNIT(3)(dst), s_exc) | ||
473 | ADDC(sum, t3) | ||
474 | beqz len, done | ||
475 | ADD dst, dst, 4*NBYTES | ||
476 | less_than_4units: | ||
477 | /* | ||
478 | * rem = len % NBYTES | ||
479 | */ | ||
480 | beq rem, len, copy_bytes | ||
481 | nop | ||
482 | 1: | ||
483 | EXC( LOAD t0, 0(src), l_exc) | ||
484 | ADD src, src, NBYTES | ||
485 | SUB len, len, NBYTES | ||
486 | EXC( STORE t0, 0(dst), s_exc) | ||
487 | ADDC(sum, t0) | ||
488 | bne rem, len, 1b | ||
489 | ADD dst, dst, NBYTES | ||
490 | |||
491 | /* | ||
492 | * src and dst are aligned, need to copy rem bytes (rem < NBYTES) | ||
493 | * A loop would do only a byte at a time with possible branch | ||
494 | * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE | ||
495 | * because can't assume read-access to dst. Instead, use | ||
496 | * STREST dst, which doesn't require read access to dst. | ||
497 | * | ||
498 | * This code should perform better than a simple loop on modern, | ||
499 | * wide-issue mips processors because the code has fewer branches and | ||
500 | * more instruction-level parallelism. | ||
501 | */ | ||
502 | #define bits t2 | ||
503 | beqz len, done | ||
504 | ADD t1, dst, len # t1 is just past last byte of dst | ||
505 | li bits, 8*NBYTES | ||
506 | SLL rem, len, 3 # rem = number of bits to keep | ||
507 | EXC( LOAD t0, 0(src), l_exc) | ||
508 | SUB bits, bits, rem # bits = number of bits to discard | ||
509 | SHIFT_DISCARD t0, t0, bits | ||
510 | EXC( STREST t0, -1(t1), s_exc) | ||
511 | SHIFT_DISCARD_REVERT t0, t0, bits | ||
512 | .set reorder | ||
513 | ADDC(sum, t0) | ||
514 | b done | ||
515 | .set noreorder | ||
516 | dst_unaligned: | ||
517 | /* | ||
518 | * dst is unaligned | ||
519 | * t0 = src & ADDRMASK | ||
520 | * t1 = dst & ADDRMASK; T1 > 0 | ||
521 | * len >= NBYTES | ||
522 | * | ||
523 | * Copy enough bytes to align dst | ||
524 | * Set match = (src and dst have same alignment) | ||
525 | */ | ||
526 | #define match rem | ||
527 | EXC( LDFIRST t3, FIRST(0)(src), l_exc) | ||
528 | ADD t2, zero, NBYTES | ||
529 | EXC( LDREST t3, REST(0)(src), l_exc_copy) | ||
530 | SUB t2, t2, t1 # t2 = number of bytes copied | ||
531 | xor match, t0, t1 | ||
532 | EXC( STFIRST t3, FIRST(0)(dst), s_exc) | ||
533 | SLL t4, t1, 3 # t4 = number of bits to discard | ||
534 | SHIFT_DISCARD t3, t3, t4 | ||
535 | /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */ | ||
536 | ADDC(sum, t3) | ||
537 | beq len, t2, done | ||
538 | SUB len, len, t2 | ||
539 | ADD dst, dst, t2 | ||
540 | beqz match, both_aligned | ||
541 | ADD src, src, t2 | ||
542 | |||
543 | src_unaligned_dst_aligned: | ||
544 | SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter | ||
545 | beqz t0, cleanup_src_unaligned | ||
546 | and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES | ||
547 | 1: | ||
548 | /* | ||
549 | * Avoid consecutive LD*'s to the same register since some mips | ||
550 | * implementations can't issue them in the same cycle. | ||
551 | * It's OK to load FIRST(N+1) before REST(N) because the two addresses | ||
552 | * are to the same unit (unless src is aligned, but it's not). | ||
553 | */ | ||
554 | EXC( LDFIRST t0, FIRST(0)(src), l_exc) | ||
555 | EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy) | ||
556 | SUB len, len, 4*NBYTES | ||
557 | EXC( LDREST t0, REST(0)(src), l_exc_copy) | ||
558 | EXC( LDREST t1, REST(1)(src), l_exc_copy) | ||
559 | EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy) | ||
560 | EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy) | ||
561 | EXC( LDREST t2, REST(2)(src), l_exc_copy) | ||
562 | EXC( LDREST t3, REST(3)(src), l_exc_copy) | ||
563 | ADD src, src, 4*NBYTES | ||
564 | #ifdef CONFIG_CPU_SB1 | ||
565 | nop # improves slotting | ||
566 | #endif | ||
567 | EXC( STORE t0, UNIT(0)(dst), s_exc) | ||
568 | ADDC(sum, t0) | ||
569 | EXC( STORE t1, UNIT(1)(dst), s_exc) | ||
570 | ADDC(sum, t1) | ||
571 | EXC( STORE t2, UNIT(2)(dst), s_exc) | ||
572 | ADDC(sum, t2) | ||
573 | EXC( STORE t3, UNIT(3)(dst), s_exc) | ||
574 | ADDC(sum, t3) | ||
575 | bne len, rem, 1b | ||
576 | ADD dst, dst, 4*NBYTES | ||
577 | |||
578 | cleanup_src_unaligned: | ||
579 | beqz len, done | ||
580 | and rem, len, NBYTES-1 # rem = len % NBYTES | ||
581 | beq rem, len, copy_bytes | ||
582 | nop | ||
583 | 1: | ||
584 | EXC( LDFIRST t0, FIRST(0)(src), l_exc) | ||
585 | EXC( LDREST t0, REST(0)(src), l_exc_copy) | ||
586 | ADD src, src, NBYTES | ||
587 | SUB len, len, NBYTES | ||
588 | EXC( STORE t0, 0(dst), s_exc) | ||
589 | ADDC(sum, t0) | ||
590 | bne len, rem, 1b | ||
591 | ADD dst, dst, NBYTES | ||
592 | |||
593 | copy_bytes_checklen: | ||
594 | beqz len, done | ||
595 | nop | ||
596 | copy_bytes: | ||
597 | /* 0 < len < NBYTES */ | ||
598 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
599 | #define SHIFT_START 0 | ||
600 | #define SHIFT_INC 8 | ||
601 | #else | ||
602 | #define SHIFT_START 8*(NBYTES-1) | ||
603 | #define SHIFT_INC -8 | ||
604 | #endif | ||
605 | move t2, zero # partial word | ||
606 | li t3, SHIFT_START # shift | ||
607 | /* use l_exc_copy here to return correct sum on fault */ | ||
608 | #define COPY_BYTE(N) \ | ||
609 | EXC( lbu t0, N(src), l_exc_copy); \ | ||
610 | SUB len, len, 1; \ | ||
611 | EXC( sb t0, N(dst), s_exc); \ | ||
612 | SLLV t0, t0, t3; \ | ||
613 | addu t3, SHIFT_INC; \ | ||
614 | beqz len, copy_bytes_done; \ | ||
615 | or t2, t0 | ||
616 | |||
617 | COPY_BYTE(0) | ||
618 | COPY_BYTE(1) | ||
619 | #ifdef USE_DOUBLE | ||
620 | COPY_BYTE(2) | ||
621 | COPY_BYTE(3) | ||
622 | COPY_BYTE(4) | ||
623 | COPY_BYTE(5) | ||
624 | #endif | ||
625 | EXC( lbu t0, NBYTES-2(src), l_exc_copy) | ||
626 | SUB len, len, 1 | ||
627 | EXC( sb t0, NBYTES-2(dst), s_exc) | ||
628 | SLLV t0, t0, t3 | ||
629 | or t2, t0 | ||
630 | copy_bytes_done: | ||
631 | ADDC(sum, t2) | ||
632 | done: | ||
633 | /* fold checksum */ | ||
634 | #ifdef USE_DOUBLE | ||
635 | dsll32 v1, sum, 0 | ||
636 | daddu sum, v1 | ||
637 | sltu v1, sum, v1 | ||
638 | dsra32 sum, sum, 0 | ||
639 | addu sum, v1 | ||
640 | #endif | ||
641 | sll v1, sum, 16 | ||
642 | addu sum, v1 | ||
643 | sltu v1, sum, v1 | ||
644 | srl sum, sum, 16 | ||
645 | addu sum, v1 | ||
646 | |||
647 | /* odd buffer alignment? */ | ||
648 | beqz odd, 1f | ||
649 | nop | ||
650 | sll v1, sum, 8 | ||
651 | srl sum, sum, 8 | ||
652 | or sum, v1 | ||
653 | andi sum, 0xffff | ||
654 | 1: | ||
655 | .set reorder | ||
656 | ADDC(sum, psum) | ||
657 | jr ra | ||
658 | .set noreorder | ||
659 | |||
660 | l_exc_copy: | ||
661 | /* | ||
662 | * Copy bytes from src until faulting load address (or until a | ||
663 | * lb faults) | ||
664 | * | ||
665 | * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28) | ||
666 | * may be more than a byte beyond the last address. | ||
667 | * Hence, the lb below may get an exception. | ||
668 | * | ||
669 | * Assumes src < THREAD_BUADDR($28) | ||
670 | */ | ||
671 | LOAD t0, TI_TASK($28) | ||
672 | li t2, SHIFT_START | ||
673 | LOAD t0, THREAD_BUADDR(t0) | ||
674 | 1: | ||
675 | EXC( lbu t1, 0(src), l_exc) | ||
676 | ADD src, src, 1 | ||
677 | sb t1, 0(dst) # can't fault -- we're copy_from_user | ||
678 | SLLV t1, t1, t2 | ||
679 | addu t2, SHIFT_INC | ||
680 | ADDC(sum, t1) | ||
681 | bne src, t0, 1b | ||
682 | ADD dst, dst, 1 | ||
683 | l_exc: | ||
684 | LOAD t0, TI_TASK($28) | ||
685 | nop | ||
686 | LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address | ||
687 | nop | ||
688 | SUB len, AT, t0 # len number of uncopied bytes | ||
689 | /* | ||
690 | * Here's where we rely on src and dst being incremented in tandem, | ||
691 | * See (3) above. | ||
692 | * dst += (fault addr - src) to put dst at first byte to clear | ||
693 | */ | ||
694 | ADD dst, t0 # compute start address in a1 | ||
695 | SUB dst, src | ||
696 | /* | ||
697 | * Clear len bytes starting at dst. Can't call __bzero because it | ||
698 | * might modify len. An inefficient loop for these rare times... | ||
699 | */ | ||
700 | beqz len, done | ||
701 | SUB src, len, 1 | ||
702 | 1: sb zero, 0(dst) | ||
703 | ADD dst, dst, 1 | ||
704 | bnez src, 1b | ||
705 | SUB src, src, 1 | ||
706 | li v1, -EFAULT | ||
707 | b done | ||
708 | sw v1, (errptr) | ||
709 | |||
710 | s_exc: | ||
711 | li v0, -1 /* invalid checksum */ | ||
712 | li v1, -EFAULT | ||
713 | jr ra | ||
714 | sw v1, (errptr) | ||
715 | END(__csum_partial_copy_user) | ||
diff --git a/arch/mips/lib/csum_partial_copy.c b/arch/mips/lib/csum_partial_copy.c deleted file mode 100644 index 06771040a267..000000000000 --- a/arch/mips/lib/csum_partial_copy.c +++ /dev/null | |||
@@ -1,52 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1994, 1995 Waldorf Electronics GmbH | ||
7 | * Copyright (C) 1998, 1999 Ralf Baechle | ||
8 | */ | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <asm/byteorder.h> | ||
13 | #include <asm/string.h> | ||
14 | #include <asm/uaccess.h> | ||
15 | #include <net/checksum.h> | ||
16 | |||
17 | /* | ||
18 | * copy while checksumming, otherwise like csum_partial | ||
19 | */ | ||
20 | __wsum csum_partial_copy_nocheck(const void *src, | ||
21 | void *dst, int len, __wsum sum) | ||
22 | { | ||
23 | /* | ||
24 | * It's 2:30 am and I don't feel like doing it real ... | ||
25 | * This is lots slower than the real thing (tm) | ||
26 | */ | ||
27 | sum = csum_partial(src, len, sum); | ||
28 | memcpy(dst, src, len); | ||
29 | |||
30 | return sum; | ||
31 | } | ||
32 | |||
33 | EXPORT_SYMBOL(csum_partial_copy_nocheck); | ||
34 | |||
35 | /* | ||
36 | * Copy from userspace and compute checksum. If we catch an exception | ||
37 | * then zero the rest of the buffer. | ||
38 | */ | ||
39 | __wsum csum_partial_copy_from_user (const void __user *src, | ||
40 | void *dst, int len, __wsum sum, int *err_ptr) | ||
41 | { | ||
42 | int missing; | ||
43 | |||
44 | might_sleep(); | ||
45 | missing = copy_from_user(dst, src, len); | ||
46 | if (missing) { | ||
47 | memset(dst + len - missing, 0, missing); | ||
48 | *err_ptr = -EFAULT; | ||
49 | } | ||
50 | |||
51 | return csum_partial(dst, len, sum); | ||
52 | } | ||
diff --git a/arch/mips/mips-boards/generic/time.c b/arch/mips/mips-boards/generic/time.c index e4604c73f02e..a3c3a1d462b2 100644 --- a/arch/mips/mips-boards/generic/time.c +++ b/arch/mips/mips-boards/generic/time.c | |||
@@ -47,6 +47,9 @@ | |||
47 | #ifdef CONFIG_MIPS_MALTA | 47 | #ifdef CONFIG_MIPS_MALTA |
48 | #include <asm/mips-boards/maltaint.h> | 48 | #include <asm/mips-boards/maltaint.h> |
49 | #endif | 49 | #endif |
50 | #ifdef CONFIG_MIPS_SEAD | ||
51 | #include <asm/mips-boards/seadint.h> | ||
52 | #endif | ||
50 | 53 | ||
51 | unsigned long cpu_khz; | 54 | unsigned long cpu_khz; |
52 | 55 | ||
@@ -263,11 +266,13 @@ void __init mips_time_init(void) | |||
263 | 266 | ||
264 | void __init plat_timer_setup(struct irqaction *irq) | 267 | void __init plat_timer_setup(struct irqaction *irq) |
265 | { | 268 | { |
269 | #ifdef MSC01E_INT_BASE | ||
266 | if (cpu_has_veic) { | 270 | if (cpu_has_veic) { |
267 | set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch); | 271 | set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch); |
268 | mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR; | 272 | mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR; |
269 | } | 273 | } else |
270 | else { | 274 | #endif |
275 | { | ||
271 | if (cpu_has_vint) | 276 | if (cpu_has_vint) |
272 | set_vi_handler (MIPSCPU_INT_CPUCTR, mips_timer_dispatch); | 277 | set_vi_handler (MIPSCPU_INT_CPUCTR, mips_timer_dispatch); |
273 | mips_cpu_timer_irq = MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR; | 278 | mips_cpu_timer_irq = MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR; |
diff --git a/arch/mips/mips-boards/malta/malta_mtd.c b/arch/mips/mips-boards/malta/malta_mtd.c new file mode 100644 index 000000000000..8ad9bdf25dce --- /dev/null +++ b/arch/mips/mips-boards/malta/malta_mtd.c | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2006 MIPS Technologies, Inc. | ||
7 | * written by Ralf Baechle <ralf@linux-mips.org> | ||
8 | */ | ||
9 | |||
10 | #include <linux/init.h> | ||
11 | #include <linux/platform_device.h> | ||
12 | #include <linux/mtd/partitions.h> | ||
13 | #include <linux/mtd/physmap.h> | ||
14 | #include <mtd/mtd-abi.h> | ||
15 | |||
16 | static struct mtd_partition malta_mtd_partitions[] = { | ||
17 | { | ||
18 | .name = "YAMON", | ||
19 | .offset = 0x0, | ||
20 | .size = 0x100000, | ||
21 | .mask_flags = MTD_WRITEABLE | ||
22 | }, { | ||
23 | .name = "User FS", | ||
24 | .offset = 0x100000, | ||
25 | .size = 0x2e0000 | ||
26 | }, { | ||
27 | .name = "Board Config", | ||
28 | .offset = 0x3e0000, | ||
29 | .size = 0x020000, | ||
30 | .mask_flags = MTD_WRITEABLE | ||
31 | } | ||
32 | }; | ||
33 | |||
34 | static struct physmap_flash_data malta_flash_data = { | ||
35 | .width = 4, | ||
36 | .nr_parts = ARRAY_SIZE(malta_mtd_partitions), | ||
37 | .parts = malta_mtd_partitions | ||
38 | }; | ||
39 | |||
40 | static struct resource malta_flash_resource = { | ||
41 | .start = 0x1e000000, | ||
42 | .end = 0x1e3fffff, | ||
43 | .flags = IORESOURCE_MEM | ||
44 | }; | ||
45 | |||
46 | static struct platform_device malta_flash = { | ||
47 | .name = "physmap-flash", | ||
48 | .id = 0, | ||
49 | .dev = { | ||
50 | .platform_data = &malta_flash_data, | ||
51 | }, | ||
52 | .num_resources = 1, | ||
53 | .resource = &malta_flash_resource, | ||
54 | }; | ||
55 | |||
56 | static int __init malta_mtd_init(void) | ||
57 | { | ||
58 | platform_device_register(&malta_flash); | ||
59 | |||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | module_init(malta_mtd_init) | ||
diff --git a/arch/mips/mips-boards/sead/sead_int.c b/arch/mips/mips-boards/sead/sead_int.c index f445fcddfdfd..874ccb0066b8 100644 --- a/arch/mips/mips-boards/sead/sead_int.c +++ b/arch/mips/mips-boards/sead/sead_int.c | |||
@@ -21,7 +21,7 @@ | |||
21 | * Sead board. | 21 | * Sead board. |
22 | */ | 22 | */ |
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/irq.h> | 24 | #include <linux/interrupt.h> |
25 | 25 | ||
26 | #include <asm/irq_cpu.h> | 26 | #include <asm/irq_cpu.h> |
27 | #include <asm/mipsregs.h> | 27 | #include <asm/mipsregs.h> |
@@ -108,7 +108,7 @@ asmlinkage void plat_irq_dispatch(void) | |||
108 | if (irq >= 0) | 108 | if (irq >= 0) |
109 | do_IRQ(MIPSCPU_INT_BASE + irq); | 109 | do_IRQ(MIPSCPU_INT_BASE + irq); |
110 | else | 110 | else |
111 | spurious_interrupt(regs); | 111 | spurious_interrupt(); |
112 | } | 112 | } |
113 | 113 | ||
114 | void __init arch_init_irq(void) | 114 | void __init arch_init_irq(void) |
diff --git a/arch/mips/mm/pg-r4k.c b/arch/mips/mm/pg-r4k.c index d41fc5885e87..dc795be62807 100644 --- a/arch/mips/mm/pg-r4k.c +++ b/arch/mips/mm/pg-r4k.c | |||
@@ -243,11 +243,10 @@ static void __init __build_store_reg(int reg) | |||
243 | 243 | ||
244 | static inline void build_store_reg(int reg) | 244 | static inline void build_store_reg(int reg) |
245 | { | 245 | { |
246 | if (cpu_has_prefetch) | 246 | int pref_off = cpu_has_prefetch ? |
247 | if (reg) | 247 | (reg ? pref_offset_copy : pref_offset_clear) : 0; |
248 | build_dst_pref(pref_offset_copy); | 248 | if (pref_off) |
249 | else | 249 | build_dst_pref(pref_off); |
250 | build_dst_pref(pref_offset_clear); | ||
251 | else if (cpu_has_cache_cdex_s) | 250 | else if (cpu_has_cache_cdex_s) |
252 | build_cdex_s(); | 251 | build_cdex_s(); |
253 | else if (cpu_has_cache_cdex_p) | 252 | else if (cpu_has_cache_cdex_p) |
diff --git a/arch/mips/pci/ops-pnx8550.c b/arch/mips/pci/ops-pnx8550.c index 454b65cc3354..f556b7a8dccd 100644 --- a/arch/mips/pci/ops-pnx8550.c +++ b/arch/mips/pci/ops-pnx8550.c | |||
@@ -202,7 +202,7 @@ write_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 val) | |||
202 | break; | 202 | break; |
203 | } | 203 | } |
204 | 204 | ||
205 | err = config_access(PCI_CMD_CONFIG_READ, bus, devfn, where, ~(1 << (where & 3)), &data); | 205 | err = config_access(PCI_CMD_CONFIG_WRITE, bus, devfn, where, ~(1 << (where & 3)), &data); |
206 | 206 | ||
207 | return err; | 207 | return err; |
208 | } | 208 | } |
diff --git a/arch/mips/philips/pnx8550/common/time.c b/arch/mips/philips/pnx8550/common/time.c index 65c440e8480b..f80acae07cee 100644 --- a/arch/mips/philips/pnx8550/common/time.c +++ b/arch/mips/philips/pnx8550/common/time.c | |||
@@ -33,7 +33,17 @@ | |||
33 | #include <int.h> | 33 | #include <int.h> |
34 | #include <cm.h> | 34 | #include <cm.h> |
35 | 35 | ||
36 | extern unsigned int mips_hpt_frequency; | 36 | static unsigned long cpj; |
37 | |||
38 | static cycle_t hpt_read(void) | ||
39 | { | ||
40 | return read_c0_count2(); | ||
41 | } | ||
42 | |||
43 | static void timer_ack(void) | ||
44 | { | ||
45 | write_c0_compare(cpj); | ||
46 | } | ||
37 | 47 | ||
38 | /* | 48 | /* |
39 | * pnx8550_time_init() - it does the following things: | 49 | * pnx8550_time_init() - it does the following things: |
@@ -68,27 +78,47 @@ void pnx8550_time_init(void) | |||
68 | * HZ timer interrupts per second. | 78 | * HZ timer interrupts per second. |
69 | */ | 79 | */ |
70 | mips_hpt_frequency = 27UL * ((1000000UL * n)/(m * pow2p)); | 80 | mips_hpt_frequency = 27UL * ((1000000UL * n)/(m * pow2p)); |
81 | cpj = (mips_hpt_frequency + HZ / 2) / HZ; | ||
82 | timer_ack(); | ||
83 | |||
84 | /* Setup Timer 2 */ | ||
85 | write_c0_count2(0); | ||
86 | write_c0_compare2(0xffffffff); | ||
87 | |||
88 | clocksource_mips.read = hpt_read; | ||
89 | mips_timer_ack = timer_ack; | ||
90 | } | ||
91 | |||
92 | static irqreturn_t monotonic_interrupt(int irq, void *dev_id) | ||
93 | { | ||
94 | /* Timer 2 clear interrupt */ | ||
95 | write_c0_compare2(-1); | ||
96 | return IRQ_HANDLED; | ||
71 | } | 97 | } |
72 | 98 | ||
99 | static struct irqaction monotonic_irqaction = { | ||
100 | .handler = monotonic_interrupt, | ||
101 | .flags = IRQF_DISABLED, | ||
102 | .name = "Monotonic timer", | ||
103 | }; | ||
104 | |||
73 | void __init plat_timer_setup(struct irqaction *irq) | 105 | void __init plat_timer_setup(struct irqaction *irq) |
74 | { | 106 | { |
75 | int configPR; | 107 | int configPR; |
76 | 108 | ||
77 | setup_irq(PNX8550_INT_TIMER1, irq); | 109 | setup_irq(PNX8550_INT_TIMER1, irq); |
110 | setup_irq(PNX8550_INT_TIMER2, &monotonic_irqaction); | ||
78 | 111 | ||
79 | /* Start timer1 */ | 112 | /* Timer 1 start */ |
80 | configPR = read_c0_config7(); | 113 | configPR = read_c0_config7(); |
81 | configPR &= ~0x00000008; | 114 | configPR &= ~0x00000008; |
82 | write_c0_config7(configPR); | 115 | write_c0_config7(configPR); |
83 | 116 | ||
84 | /* Timer 2 stop */ | 117 | /* Timer 2 start */ |
85 | configPR = read_c0_config7(); | 118 | configPR = read_c0_config7(); |
86 | configPR |= 0x00000010; | 119 | configPR &= ~0x00000010; |
87 | write_c0_config7(configPR); | 120 | write_c0_config7(configPR); |
88 | 121 | ||
89 | write_c0_count2(0); | ||
90 | write_c0_compare2(0xffffffff); | ||
91 | |||
92 | /* Timer 3 stop */ | 122 | /* Timer 3 stop */ |
93 | configPR = read_c0_config7(); | 123 | configPR = read_c0_config7(); |
94 | configPR |= 0x00000020; | 124 | configPR |= 0x00000020; |
diff --git a/include/asm-mips/checksum.h b/include/asm-mips/checksum.h index 9b768c3b96b3..24cdcc6eaab8 100644 --- a/include/asm-mips/checksum.h +++ b/include/asm-mips/checksum.h | |||
@@ -29,31 +29,38 @@ | |||
29 | */ | 29 | */ |
30 | __wsum csum_partial(const void *buff, int len, __wsum sum); | 30 | __wsum csum_partial(const void *buff, int len, __wsum sum); |
31 | 31 | ||
32 | __wsum __csum_partial_copy_user(const void *src, void *dst, | ||
33 | int len, __wsum sum, int *err_ptr); | ||
34 | |||
32 | /* | 35 | /* |
33 | * this is a new version of the above that records errors it finds in *errp, | 36 | * this is a new version of the above that records errors it finds in *errp, |
34 | * but continues and zeros the rest of the buffer. | 37 | * but continues and zeros the rest of the buffer. |
35 | */ | 38 | */ |
36 | __wsum csum_partial_copy_from_user(const void __user *src, | 39 | static inline |
37 | void *dst, int len, | 40 | __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, |
38 | __wsum sum, int *errp); | 41 | __wsum sum, int *err_ptr) |
42 | { | ||
43 | might_sleep(); | ||
44 | return __csum_partial_copy_user((__force void *)src, dst, | ||
45 | len, sum, err_ptr); | ||
46 | } | ||
39 | 47 | ||
40 | /* | 48 | /* |
41 | * Copy and checksum to user | 49 | * Copy and checksum to user |
42 | */ | 50 | */ |
43 | #define HAVE_CSUM_COPY_USER | 51 | #define HAVE_CSUM_COPY_USER |
44 | static inline __wsum csum_and_copy_to_user (const void *src, void __user *dst, | 52 | static inline |
45 | int len, __wsum sum, | 53 | __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, |
46 | int *err_ptr) | 54 | __wsum sum, int *err_ptr) |
47 | { | 55 | { |
48 | might_sleep(); | 56 | might_sleep(); |
49 | sum = csum_partial(src, len, sum); | 57 | if (access_ok(VERIFY_WRITE, dst, len)) |
50 | 58 | return __csum_partial_copy_user(src, (__force void *)dst, | |
51 | if (copy_to_user(dst, src, len)) { | 59 | len, sum, err_ptr); |
60 | if (len) | ||
52 | *err_ptr = -EFAULT; | 61 | *err_ptr = -EFAULT; |
53 | return (__force __wsum)-1; | ||
54 | } | ||
55 | 62 | ||
56 | return sum; | 63 | return (__force __wsum)-1; /* invalid checksum */ |
57 | } | 64 | } |
58 | 65 | ||
59 | /* | 66 | /* |
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h index 67657089efa7..386da82e5774 100644 --- a/include/asm-mips/irq.h +++ b/include/asm-mips/irq.h | |||
@@ -31,14 +31,14 @@ static inline int irq_canonicalize(int irq) | |||
31 | * functions will take over re-enabling the low-level mask. | 31 | * functions will take over re-enabling the low-level mask. |
32 | * Otherwise it will be done on return from exception. | 32 | * Otherwise it will be done on return from exception. |
33 | */ | 33 | */ |
34 | #define __DO_IRQ_SMTC_HOOK() \ | 34 | #define __DO_IRQ_SMTC_HOOK(irq) \ |
35 | do { \ | 35 | do { \ |
36 | if (irq_hwmask[irq] & 0x0000ff00) \ | 36 | if (irq_hwmask[irq] & 0x0000ff00) \ |
37 | write_c0_tccontext(read_c0_tccontext() & \ | 37 | write_c0_tccontext(read_c0_tccontext() & \ |
38 | ~(irq_hwmask[irq] & 0x0000ff00)); \ | 38 | ~(irq_hwmask[irq] & 0x0000ff00)); \ |
39 | } while (0) | 39 | } while (0) |
40 | #else | 40 | #else |
41 | #define __DO_IRQ_SMTC_HOOK() do { } while (0) | 41 | #define __DO_IRQ_SMTC_HOOK(irq) do { } while (0) |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | /* | 44 | /* |
@@ -52,7 +52,7 @@ do { \ | |||
52 | #define do_IRQ(irq) \ | 52 | #define do_IRQ(irq) \ |
53 | do { \ | 53 | do { \ |
54 | irq_enter(); \ | 54 | irq_enter(); \ |
55 | __DO_IRQ_SMTC_HOOK(); \ | 55 | __DO_IRQ_SMTC_HOOK(irq); \ |
56 | generic_handle_irq(irq); \ | 56 | generic_handle_irq(irq); \ |
57 | irq_exit(); \ | 57 | irq_exit(); \ |
58 | } while (0) | 58 | } while (0) |