aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/isdn/i4l/isdn_net.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-11-22 09:57:56 -0500
committerDavid Howells <dhowells@redhat.com>2006-11-22 09:57:56 -0500
commitc4028958b6ecad064b1a6303a6a5906d4fe48d73 (patch)
tree1c4c89652c62a75da09f9b9442012007e4ac6250 /drivers/isdn/i4l/isdn_net.c
parent65f27f38446e1976cc98fd3004b110fedcddd189 (diff)
WorkStruct: make allyesconfig
Fix up for make allyesconfig. Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'drivers/isdn/i4l/isdn_net.c')
-rw-r--r--drivers/isdn/i4l/isdn_net.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 1f8d6ae66b41..2e4daebfb7e0 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -984,9 +984,9 @@ void isdn_net_write_super(isdn_net_local *lp, struct sk_buff *skb)
984/* 984/*
985 * called from tq_immediate 985 * called from tq_immediate
986 */ 986 */
987static void isdn_net_softint(void *private) 987static void isdn_net_softint(struct work_struct *work)
988{ 988{
989 isdn_net_local *lp = private; 989 isdn_net_local *lp = container_of(work, isdn_net_local, tqueue);
990 struct sk_buff *skb; 990 struct sk_buff *skb;
991 991
992 spin_lock_bh(&lp->xmit_lock); 992 spin_lock_bh(&lp->xmit_lock);
@@ -2596,7 +2596,7 @@ isdn_net_new(char *name, struct net_device *master)
2596 netdev->local->netdev = netdev; 2596 netdev->local->netdev = netdev;
2597 netdev->local->next = netdev->local; 2597 netdev->local->next = netdev->local;
2598 2598
2599 INIT_WORK(&netdev->local->tqueue, (void *)(void *) isdn_net_softint, netdev->local); 2599 INIT_WORK(&netdev->local->tqueue, isdn_net_softint);
2600 spin_lock_init(&netdev->local->xmit_lock); 2600 spin_lock_init(&netdev->local->xmit_lock);
2601 2601
2602 netdev->local->isdn_device = -1; 2602 netdev->local->isdn_device = -1;
size of an area which will be invalidated * using the single invalidate entry instructions. Anything larger * than this, and we go for the whole cache. * * This value should be chosen such that we choose the cheapest * alternative. */ #define MAX_AREA_SIZE 32768 /* * The size of one data cache line. */ #define CACHE_DLINESIZE 32 /* * The number of data cache segments. */ #define CACHE_DSEGMENTS 16 /* * The number of lines in a cache segment. */ #define CACHE_DENTRIES 64 /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual * cache line maintainence instructions. */ #define CACHE_DLIMIT 32768 .text /* * cpu_arm1022_proc_init() */ ENTRY(cpu_arm1022_proc_init) mov pc, lr /* * cpu_arm1022_proc_fin() */ ENTRY(cpu_arm1022_proc_fin) stmfd sp!, {lr} mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE msr cpsr_c, ip bl arm1022_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches ldmfd sp!, {pc} /* * cpu_arm1022_reset(loc) * * Perform a soft reset of the system. Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset */ .align 5 ENTRY(cpu_arm1022_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches mcr p15, 0, ip, c7, c10, 4 @ drain WB #ifdef CONFIG_MMU mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs #endif mrc p15, 0, ip, c1, c0, 0 @ ctrl register bic ip, ip, #0x000f @ ............wcam bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 /* * cpu_arm1022_do_idle() */ .align 5 ENTRY(cpu_arm1022_do_idle) mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt mov pc, lr /* ================================= CACHE ================================ */ .align 5 /* * flush_user_cache_all() * * Invalidate all cache entries in a particular address * space. */ ENTRY(arm1022_flush_user_cache_all) /* FALLTHROUGH */ /* * flush_kern_cache_all() * * Clean and invalidate the entire cache. */ ENTRY(arm1022_flush_kern_cache_all) mov r2, #VM_EXEC mov ip, #0 __flush_whole_cache: #ifndef CONFIG_CPU_DCACHE_DISABLE mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index subs r3, r3, #1 << 26 bcs 2b @ entries 63 to 0 subs r1, r1, #1 << 5 bcs 1b @ segments 15 to 0 #endif tst r2, #VM_EXEC #ifndef CONFIG_CPU_ICACHE_DISABLE mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache #endif mcrne p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr /* * flush_user_cache_range(start, end, flags) * * Invalidate a range of cache entries in the specified * address space. * * - start - start address (inclusive) * - end - end address (exclusive) * - flags - vm_flags for this space */ ENTRY(arm1022_flush_user_cache_range) mov ip, #0 sub r3, r1, r0 @ calculate total size cmp r3, #CACHE_DLIMIT bhs __flush_whole_cache #ifndef CONFIG_CPU_DCACHE_DISABLE 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif tst r2, #VM_EXEC #ifndef CONFIG_CPU_ICACHE_DISABLE mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache #endif mcrne p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr /* * coherent_kern_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1022_coherent_kern_range) /* FALLTHROUGH */ /* * coherent_user_range(start, end) * * Ensure coherency between the Icache and the Dcache in the * region described by start. If you have non-snooping * Harvard caches, you need to implement this function. * * - start - virtual start address * - end - virtual end address */ ENTRY(arm1022_coherent_user_range) mov ip, #0 bic r0, r0, #CACHE_DLINESIZE - 1 1: #ifndef CONFIG_CPU_DCACHE_DISABLE mcr p15, 0, r0, c7, c10, 1 @ clean D entry #endif #ifndef CONFIG_CPU_ICACHE_DISABLE mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry #endif add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr /* * flush_kern_dcache_page(void *page) * * Ensure no D cache aliasing occurs, either with itself or * the I cache * * - page - page aligned address */ ENTRY(arm1022_flush_kern_dcache_page) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE add r1, r0, #PAGE_SZ 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b #endif