aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-mn10300
ModeNameSize
-rw-r--r--.gitignore10logstatsplainblame
-rw-r--r--Kbuild59logstatsplainblame
-rw-r--r--atomic.h3751logstatsplainblame
-rw-r--r--auxvec.h52logstatsplainblame
-rw-r--r--bitops.h5716logstatsplainblame
-rw-r--r--bug.h862logstatsplainblame
-rw-r--r--bugs.h561logstatsplainblame
-rw-r--r--busctl-regs.h7985logstatsplainblame
-rw-r--r--byteorder.h147logstatsplainblame
-rw-r--r--cache.h2025logstatsplainblame
-rw-r--r--cacheflush.h4330logstatsplainblame
-rw-r--r--checksum.h2256logstatsplainblame
-rw-r--r--cpu-regs.h14601logstatsplainblame
-rw-r--r--cputime.h33logstatsplainblame
-rw-r--r--current.h922logstatsplainblame
-rw-r--r--delay.h597logstatsplainblame
-rw-r--r--device.h32logstatsplainblame
-rw-r--r--div64.h2744logstatsplainblame
-rw-r--r--dma-mapping.h6544logstatsplainblame
-rw-r--r--dma.h3190logstatsplainblame
-rw-r--r--dmactl-regs.h4663logstatsplainblame
-rw-r--r--elf.h4398logstatsplainblame
-rw-r--r--emergency-restart.h43logstatsplainblame
-rw-r--r--errno.h31logstatsplainblame
-rw-r--r--exceptions.h4938logstatsplainblame
-rw-r--r--fb.h602logstatsplainblame
-rw-r--r--fcntl.h31logstatsplainblame
-rw-r--r--fpu.h2320logstatsplainblame
-rw-r--r--frame.inc2293logstatsplainblame
-rw-r--r--futex.h31logstatsplainblame
-rw-r--r--gdb-stub.h5515logstatsplainblame
-rw-r--r--hardirq.h1711logstatsplainblame
-rw-r--r--highmem.h2854logstatsplainblame
-rw-r--r--hw_irq.h483logstatsplainblame
-rw-r--r--ide.h1101logstatsplainblame
-rw-r--r--intctl-regs.h2427logstatsplainblame
-rw-r--r--io.h6987logstatsplainblame
-rw-r--r--ioctl.h31logstatsplainblame
-rw-r--r--ioctls.h2765logstatsplainblame
-rw-r--r--ipc.h29logstatsplainblame
-rw-r--r--ipcbuf.h619logstatsplainblame
-rw-r--r--irq.h977logstatsplainblame
-rw-r--r--irq_regs.h683logstatsplainblame
-rw-r--r--kdebug.h561logstatsplainblame
-rw-r--r--kmap_types.h702logstatsplainblame
-rw-r--r--kprobes.h1546logstatsplainblame
-rw-r--r--linkage.h593logstatsplainblame
-rw-r--r--local.h31logstatsplainblame
-rw-r--r--mc146818rtc.h26logstatsplainblame
-rw-r--r--mman.h1057logstatsplainblame
-rw-r--r--mmu.h395logstatsplainblame
-rw-r--r--mmu_context.h3929logstatsplainblame
-rw-r--r--module.h759logstatsplainblame
-rw-r--r--msgbuf.h982logstatsplainblame
-rw-r--r--mutex.h637logstatsplainblame
-rw-r--r--nmi.h456logstatsplainblame
-rw-r--r--page.h3645logstatsplainblame
-rw-r--r--page_offset.h263logstatsplainblame
-rw-r--r--param.h888logstatsplainblame
-rw-r--r--pci.h3369logstatsplainblame
-rw-r--r--percpu.h32logstatsplainblame
-rw-r--r--pgalloc.h1525logstatsplainblame
-rw-r--r--pgtable.h15987logstatsplainblame
-rw-r--r--pio-regs.h7707logstatsplainblame
-rw-r--r--poll.h30logstatsplainblame
-rw-r--r--posix_types.h3600logstatsplainblame
d---------proc-mn103e010137logstatsplain
-rw-r--r--processor.h4632logstatsplainblame
-rw-r--r--ptrace.h2645logstatsplainblame
-rw-r--r--reset-regs.h1944logstatsplainblame
-rw-r--r--resource.h34logstatsplainblame
-rw-r--r--rtc-regs.h3648logstatsplainblame
-rw-r--r--rtc.h928logstatsplainblame
-rw-r--r--scatterlist.h1704logstatsplainblame
-rw-r--r--sections.h34logstatsplainblame
-rw-r--r--sembuf.h696logstatsplainblame
-rw-r--r--serial-regs.h7629logstatsplainblame
-rw-r--r--serial.h1078logstatsplainblame
-rw-r--r--setup.h546logstatsplainblame
-rw-r--r--shmbuf.h1163logstatsplainblame
-rw-r--r--shmparam.h143logstatsplainblame
-rw-r--r--sigcontext.h1225logstatsplainblame
-rw-r--r--siginfo.h33logstatsplainblame
-rw-r--r--signal.h3874logstatsplainblame
-rw-r--r--smp.h505logstatsplainblame
-rw-r--r--socket.h1244logstatsplainblame
-rw-r--r--sockios.h340logstatsplainblame
-rw-r--r--spinlock.h525logstatsplainblame
-rw-r--r--stat.h1669logstatsplainblame
-rw-r--r--statfs.h32logstatsplainblame
-rw-r--r--string.h1053logstatsplainblame
-rw-r--r--swab.h986logstatsplainblame
-rw-r--r--system.h5700logstatsplainblame
-rw-r--r--termbits.h4686logstatsplainblame
-rw-r--r--termios.h2654logstatsplainblame
-rw-r--r--thread_info.h4762logstatsplainblame
-rw-r--r--timer-regs.h14716logstatsplainblame
-rw-r--r--timex.h868logstatsplainblame
-rw-r--r--tlb.h941logstatsplainblame
-rw-r--r--tlbflush.h2180logstatsplainblame
-rw-r--r--topology.h34logstatsplainblame
-rw-r--r--types.h864logstatsplainblame
-rw-r--r--uaccess.h13451logstatsplainblame
-rw-r--r--ucontext.h673logstatsplainblame
-rw-r--r--unaligned.h678logstatsplainblame
-rw-r--r--unistd.h11151logstatsplainblame
d---------unit-asb2303178logstatsplain
d---------unit-asb2305140logstatsplain
-rw-r--r--user.h1958logstatsplainblame
-rw-r--r--vga.h471logstatsplainblame
-rw-r--r--xor.h29logstatsplainblame

                               






















                                                                       

                                                                  










































































































                                                                                

































































                                                                               


























                                                                       























































































































































































                                                                             
                                                  



































































                                                                      

                                                               










































                                                                              
                                                         













                                                                    
                            






                                      







                                                                 







































































                                                                             

                                                                               









































































































                                                                              
/*
 *	An async IO implementation for Linux
 *	Written by Benjamin LaHaise <bcrl@kvack.org>
 *
 *	Implements an efficient asynchronous io interface.
 *
 *	Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
 *
 *	See ../COPYING for licensing terms.
 */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/time.h>
#include <linux/aio_abi.h>
#include <linux/module.h>
#include <linux/syscalls.h>

#define DEBUG 0

#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/aio.h>
#include <linux/highmem.h>
#include <linux/workqueue.h>
#include <linux/security.h>

#include <asm/kmap_types.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>

#if DEBUG > 1
#define dprintk		printk
#else
#define dprintk(x...)	do { ; } while (0)
#endif

/*------ sysctl variables----*/
atomic_t aio_nr = ATOMIC_INIT(0);	/* current system wide number of aio requests */
unsigned aio_max_nr = 0x10000;	/* system wide maximum number of aio requests */
/*----end sysctl variables---*/

static kmem_cache_t	*kiocb_cachep;
static kmem_cache_t	*kioctx_cachep;

static struct workqueue_struct *aio_wq;

/* Used for rare fput completion. */
static void aio_fput_routine(void *);
static DECLARE_WORK(fput_work, aio_fput_routine, NULL);

static DEFINE_SPINLOCK(fput_lock);
static LIST_HEAD(fput_head);

static void aio_kick_handler(void *);
static void aio_queue_work(struct kioctx *);

/* aio_setup
 *	Creates the slab caches used by the aio routines, panic on
 *	failure as this is done early during the boot sequence.
 */
static int __init aio_setup(void)
{
	kiocb_cachep = kmem_cache_create("kiocb", sizeof(struct kiocb),
				0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
	kioctx_cachep = kmem_cache_create("kioctx", sizeof(struct kioctx),
				0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);

	aio_wq = create_workqueue("aio");

	pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));

	return 0;
}

static void aio_free_ring(struct kioctx *ctx)
{
	struct aio_ring_info *info = &ctx->ring_info;
	long i;

	for (i=0; i<info->nr_pages; i++)
		put_page(info->ring_pages[i]);

	if (info->mmap_size) {
		down_write(&ctx->mm->mmap_sem);
		do_munmap(ctx->mm, info->mmap_base, info->mmap_size);
		up_write(&ctx->mm->mmap_sem);
	}

	if (info->ring_pages && info->ring_pages != info->internal_pages)
		kfree(info->ring_pages);
	info->ring_pages = NULL;
	info->nr = 0;
}

static int aio_setup_ring(struct kioctx *ctx)
{
	struct aio_ring *ring;
	struct aio_ring_info *info = &ctx->ring_info;
	unsigned nr_events = ctx->max_reqs;
	unsigned long size;
	int nr_pages;

	/* Compensate for the ring buffer's head/tail overlap entry */
	nr_events += 2;	/* 1 is required, 2 for good luck */

	size = sizeof(struct aio_ring);
	size += sizeof(struct io_event) * nr_events;
	nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;

	if (nr_pages < 0)
		return -EINVAL;

	nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);

	info->nr = 0;
	info->ring_pages = info->internal_pages;
	if (nr_pages > AIO_RING_PAGES) {
		info->ring_pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_KERNEL);
		if (!info->ring_pages)
			return -ENOMEM;
		memset(info->ring_pages, 0, sizeof(struct page *) * nr_pages);
	}

	info->mmap_size = nr_pages * PAGE_SIZE;
	dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
	down_write(&ctx->mm->mmap_sem);
	info->mmap_base = do_mmap(NULL, 0, info->mmap_size, 
				  PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
				  0);
	if (IS_ERR((void *)info->mmap_base)) {
		up_write(&ctx->mm->mmap_sem);
		printk("mmap err: %ld\n", -info->mmap_base);
		info->mmap_size = 0;
		aio_free_ring(ctx);
		return -EAGAIN;
	}

	dprintk("mmap address: 0x%08lx\n", info->mmap_base);
	info->nr_pages = get_user_pages(current, ctx->mm,
					info->mmap_base, nr_pages, 
					1, 0, info->ring_pages, NULL);
	up_write(&ctx->mm->mmap_sem);

	if (unlikely(info->nr_pages != nr_pages)) {
		aio_free_ring(ctx);
		return -EAGAIN;
	}

	ctx->user_id = info->mmap_base;

	info->nr = nr_events;		/* trusted copy */

	ring = kmap_atomic(info->ring_pages[0], KM_USER0);
	ring->nr = nr_events;	/* user copy */
	ring->id = ctx->user_id;
	ring->head = ring->tail = 0;
	ring->magic = AIO_RING_MAGIC;
	ring->compat_features = AIO_RING_COMPAT_FEATURES;
	ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
	ring->header_length = sizeof(struct aio_ring);
	kunmap_atomic(ring, KM_USER0);

	return 0;
}


/* aio_ring_event: returns a pointer to the event at the given index from
 * kmap_atomic(, km).  Release the pointer with put_aio_ring_event();
 */
#define AIO_EVENTS_PER_PAGE	(PAGE_SIZE / sizeof(struct io_event))
#define AIO_EVENTS_FIRST_PAGE	((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
#define AIO_EVENTS_OFFSET	(AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)

#define aio_ring_event(info, nr, km) ({					\
	unsigned pos = (nr) + AIO_EVENTS_OFFSET;			\
	struct io_event *__event;					\
	__event = kmap_atomic(						\
			(info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \
	__event += pos % AIO_EVENTS_PER_PAGE;				\
	__event;							\
})

#define put_aio_ring_event(event, km) do {	\
	struct io_event *__event = (event);	\
	(void)__event;				\
	kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
} while(0)

/* ioctx_alloc
 *	Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
 */
static struct kioctx *ioctx_alloc(unsigned nr_events)
{
	struct mm_struct *mm;
	struct kioctx *ctx;

	/* Prevent overflows */
	if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
	    (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
		pr_debug("ENOMEM: nr_events too high\n");
		return ERR_PTR(-EINVAL);
	}

	if (nr_events > aio_max_nr)
		return ERR_PTR(-EAGAIN);

	ctx = kmem_cache_alloc(kioctx_cachep, GFP_KERNEL);
	if (!ctx)
		return ERR_PTR(-ENOMEM);

	memset(ctx, 0, sizeof(*ctx));
	ctx->max_reqs = nr_events;
	mm = ctx->mm = current->mm;
	atomic_inc(&mm->mm_count);

	atomic_set(&ctx->users, 1);
	spin_lock_init(&ctx->ctx_lock);
	spin_lock_init(&ctx->ring_info.ring_lock);
	init_waitqueue_head(&ctx->wait);

	INIT_LIST_HEAD(&ctx->active_reqs);
	INIT_LIST_HEAD(&ctx->run_list);
	INIT_WORK(&ctx->wq, aio_kick_handler, ctx);

	if (aio_setup_ring(ctx) < 0)
		goto out_freectx;

	/* limit the number of system wide aios */
	atomic_add(ctx->max_reqs, &aio_nr);	/* undone by __put_ioctx */
	if (unlikely(atomic_read(&aio_nr) > aio_max_nr))
		goto out_cleanup;

	/* now link into global list.  kludge.  FIXME */
	write_lock(&mm->ioctx_list_lock);
	ctx->next = mm->ioctx_list;
	mm->ioctx_list = ctx;
	write_unlock(&mm->ioctx_list_lock);

	dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
		ctx, ctx->user_id, current->mm, ctx->ring_info.nr);
	return ctx;

out_cleanup:
	atomic_sub(ctx->max_reqs, &aio_nr);
	ctx->max_reqs = 0;	/* prevent __put_ioctx from sub'ing aio_nr */
	__put_ioctx(ctx);
	return ERR_PTR(-EAGAIN);

out_freectx:
	mmdrop(mm);
	kmem_cache_free(kioctx_cachep, ctx);
	ctx = ERR_PTR(-ENOMEM);

	dprintk("aio: error allocating ioctx %p\n", ctx);
	return ctx;
}

/* aio_cancel_all
 *	Cancels all outstanding aio requests on an aio context.  Used 
 *	when the processes owning a context have all exited to encourage 
 *	the rapid destruction of the kioctx.
 */
static void aio_cancel_all(struct kioctx *ctx)
{
	int (*cancel)(struct kiocb *, struct io_event *);
	struct io_event res;
	spin_lock_irq(&ctx->ctx_lock);
	ctx->dead = 1;
	while (!list_empty(&ctx->active_reqs)) {
		struct list_head *pos = ctx->active_reqs.next;
		struct kiocb *iocb = list_kiocb(pos);
		list_del_init(&iocb->ki_list);
		cancel = iocb->ki_cancel;
		kiocbSetCancelled(iocb);
		if (cancel) {
			iocb->ki_users++;
			spin_unlock_irq(&ctx->ctx_lock);
			cancel(iocb, &res);
			spin_lock_irq(&ctx->ctx_lock);
		}
	}
	spin_unlock_irq(&ctx->ctx_lock);
}

static void wait_for_all_aios(struct kioctx *ctx)
{
	struct task_struct *tsk = current;
	DECLARE_WAITQUEUE(wait, tsk);

	if (!ctx->reqs_active)
		return;

	add_wait_queue(&ctx->wait, &wait);
	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
	while (ctx->reqs_active) {
		schedule();
		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
	}
	__set_task_state(tsk, TASK_RUNNING);
	remove_wait_queue(&ctx->wait, &wait);
}

/* wait_on_sync_kiocb:
 *	Waits on the given sync kiocb to complete.
 */
ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb)
{
	while (iocb->ki_users) {
		set_current_state(TASK_UNINTERRUPTIBLE);
		if (!iocb->ki_users)
			break;
		schedule();
	}
	__set_current_state(TASK_RUNNING);
	return iocb->ki_user_data;
}

/* exit_aio: called when the last user of mm goes away.  At this point, 
 * there is no way for any new requests to be submited or any of the 
 * io_* syscalls to be called on the context.  However, there may be 
 * outstanding requests which hold references to the context; as they 
 * go away, they will call put_ioctx and release any pinned memory
 * associated with the request (held via struct page * references).
 */
void fastcall exit_aio(struct mm_struct *mm)
{
	struct kioctx *ctx = mm->ioctx_list;
	mm->ioctx_list = NULL;
	while (ctx) {
		struct kioctx *next = ctx->next;
		ctx->next = NULL;
		aio_cancel_all(ctx);

		wait_for_all_aios(ctx);
		/*
		 * this is an overkill, but ensures we don't leave
		 * the ctx on the aio_wq
		 */
		flush_workqueue(aio_wq);

		if (1 != atomic_read(&ctx->users))
			printk(KERN_DEBUG
				"exit_aio:ioctx still alive: %d %d %d\n",
				atomic_read(&ctx->users), ctx->dead,
				ctx->reqs_active);
		put_ioctx(ctx);
		ctx = next;
	}
}

/* __put_ioctx
 *	Called when the last user of an aio context has gone away,
 *	and the struct needs to be freed.
 */
void fastcall __put_ioctx(struct kioctx *ctx)
{
	unsigned nr_events = ctx->max_reqs;

	if (unlikely(ctx->reqs_active))
		BUG();

	cancel_delayed_work(&ctx->wq);
	flush_workqueue(aio_wq);
	aio_free_ring(ctx);
	mmdrop(ctx->mm);
	ctx->mm = NULL;
	pr_debug("__put_ioctx: freeing %p\n", ctx);
	kmem_cache_free(kioctx_cachep, ctx);

	atomic_sub(nr_events, &aio_nr);
}

/* aio_get_req
 *	Allocate a slot for an aio request.  Increments the users count
 * of the kioctx so that the kioctx stays around until all requests are
 * complete.  Returns NULL if no requests are free.
 *
 * Returns with kiocb->users set to 2.  The io submit code path holds
 * an extra reference while submitting the i/o.
 * This prevents races between the aio code path referencing the
 * req (after submitting it) and aio_complete() freeing the req.
 */
static struct kiocb *FASTCALL(__aio_get_req(struct kioctx *ctx));
static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx)
{
	struct kiocb *req = NULL;
	struct aio_ring *ring;
	int okay = 0;

	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
	if (unlikely(!req))
		return NULL;

	req->ki_flags = 1 << KIF_LOCKED;
	req->ki_users = 2;
	req->ki_key = 0;
	req->ki_ctx = ctx;
	req->ki_cancel = NULL;
	req->ki_retry = NULL;
	req->ki_dtor = NULL;
	req->private = NULL;
	INIT_LIST_HEAD(&req->ki_run_list);

	/* Check if the completion queue has enough free space to
	 * accept an event from this io.
	 */
	spin_lock_irq(&ctx->ctx_lock);
	ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0);
	if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) {
		list_add(&req->ki_list, &ctx->active_reqs);
		get_ioctx(ctx);
		ctx->reqs_active++;
		okay = 1;
	}
	kunmap_atomic(ring, KM_USER0);
	spin_unlock_irq(&ctx->ctx_lock);

	if (!okay) {
		kmem_cache_free(kiocb_cachep, req);
		req = NULL;
	}

	return req;
}

static inline struct kiocb *aio_get_req(struct kioctx *ctx)
{
	struct kiocb *req;
	/* Handle a potential starvation case -- should be exceedingly rare as 
	 * requests will be stuck on fput_head only if the aio_fput_routine is 
	 * delayed and the requests were the last user of the struct file.
	 */
	req = __aio_get_req(ctx);
	if (unlikely(NULL == req)) {
		aio_fput_routine(NULL);
		req = __aio_get_req(ctx);
	}
	return req;
}

static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
{
	if (req->ki_dtor)
		req->ki_dtor(req);
	kmem_cache_free(kiocb_cachep, req);
	ctx->reqs_active--;

	if (unlikely(!ctx->reqs_active && ctx->dead))
		wake_up(&ctx->wait);
}

static void aio_fput_routine(void *data)
{
	spin_lock_irq(&fput_lock);
	while (likely(!list_empty(&fput_head))) {
		struct kiocb *req = list_kiocb(fput_head.next);
		struct kioctx *ctx = req->ki_ctx;

		list_del(&req->ki_list);
		spin_unlock_irq(&fput_lock);

		/* Complete the fput */
		__fput(req->ki_filp);

		/* Link the iocb into the context's free list */
		spin_lock_irq(&ctx->ctx_lock);
		really_put_req(ctx, req);
		spin_unlock_irq(&ctx->ctx_lock);

		put_ioctx(ctx);
		spin_lock_irq(&fput_lock);
	}
	spin_unlock_irq(&fput_lock);
}

/* __aio_put_req