aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh64/kernel
stat options
Period:
Authors:

Commits per author per week (path 'arch/sh64/kernel')

AuthorW34 2025W35 2025W36 2025W37 2025Total
Total00000
"hl opt">} /* * Try to grab the swapout protection token. We only try to * grab it once every TOKEN_CHECK_INTERVAL, both to prevent * SMP lock contention and to check that the process that held * the token before is no longer thrashing. */ void grab_swap_token(void) { struct mm_struct *mm; int reason; /* We have the token. Let others know we still need it. */ if (has_swap_token(current->mm)) { current->mm->recent_pagein = 1; if (unlikely(!swap_token_default_timeout)) disable_swap_token(); return; } if (time_after(jiffies, swap_token_check)) { if (!swap_token_default_timeout) { swap_token_check = jiffies + SWAP_TOKEN_CHECK_INTERVAL; return; } /* ... or if we recently held the token. */ if (time_before(jiffies, current->mm->swap_token_time)) return; if (!spin_trylock(&swap_token_lock)) return; swap_token_check = jiffies + SWAP_TOKEN_CHECK_INTERVAL; mm = swap_token_mm; if ((reason = should_release_swap_token(mm))) { unsigned long eligible = jiffies; if (reason == SWAP_TOKEN_TIMED_OUT) { eligible += swap_token_default_timeout; } mm->swap_token_time = eligible; swap_token_timeout = jiffies + swap_token_default_timeout; swap_token_mm = current->mm; } spin_unlock(&swap_token_lock); } return; } /* Called on process exit. */ void __put_swap_token(struct mm_struct *mm) { spin_lock(&swap_token_lock); if (likely(mm == swap_token_mm)) { mm->swap_token_time = jiffies + SWAP_TOKEN_CHECK_INTERVAL; swap_token_mm = &init_mm; swap_token_check = jiffies; } spin_unlock(&swap_token_lock); }