aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ieee1394/dma.h
blob: 061550a6fb9941669655e5eb68463b01c089ac5d (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
/*
 * DMA region bookkeeping routines
 *
 * Copyright (C) 2002 Maas Digital LLC
 *
 * This code is licensed under the GPL.  See the file COPYING in the root
 * directory of the kernel sources for details.
 */

#ifndef IEEE1394_DMA_H
#define IEEE1394_DMA_H

#include <linux/pci.h>
#include <asm/scatterlist.h>

/* struct dma_prog_region

   a small, physically-contiguous DMA buffer with random-access,
   synchronous usage characteristics
*/

struct dma_prog_region {
	unsigned char    *kvirt;     /* kernel virtual address */
	struct pci_dev   *dev;       /* PCI device */
	unsigned int      n_pages;   /* # of kernel pages */
	dma_addr_t        bus_addr;  /* base bus address */
};

/* clear out all fields but do not allocate any memory */
void dma_prog_region_init(struct dma_prog_region *prog);
int  dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev);
void dma_prog_region_free(struct dma_prog_region *prog);

static inline dma_addr_t dma_prog_region_offset_to_bus(struct dma_prog_region *prog, unsigned long offset)
{
	return prog->bus_addr + offset;
}

/* struct dma_region

   a large, non-physically-contiguous DMA buffer with streaming,
   asynchronous usage characteristics
*/

struct dma_region {
	unsigned char      *kvirt;       /* kernel virtual address */
	struct pci_dev     *dev;         /* PCI device */
	unsigned int        n_pages;     /* # of kernel pages */
	unsigned int        n_dma_pages; /* # of IOMMU pages */
	struct scatterlist *sglist;      /* IOMMU mapping */
	int                 direction;   /* PCI_DMA_TODEVICE, etc */
};

/* clear out all fields but do not allocate anything */
void dma_region_init(struct dma_region *dma);

/* allocate the buffer and map it to the IOMMU */
int  dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction);

/* unmap and free the buffer */
void dma_region_free(struct dma_region *dma);

/* sync the CPU's view of the buffer */
void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsigned long len);
/* sync the IO bus' view of the buffer */
void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, unsigned long len);

/* map the buffer into a user space process */
int  dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma);

/* macro to index into a DMA region (or dma_prog_region) */
#define dma_region_i(_dma, _type, _index) ( ((_type*) ((_dma)->kvirt)) + (_index) )

/* return the DMA bus address of the byte with the given offset
   relative to the beginning of the dma_region */
dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset);

#endif /* IEEE1394_DMA_H */
/span> case 0x206: boot_cpu_data.type = CPU_SH7750S; boot_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU | CPU_HAS_PERF_COUNTER; break; case 0x1100: boot_cpu_data.type = CPU_SH7751; boot_cpu_data.flags |= CPU_HAS_FPU; break; case 0x2001: case 0x2004: boot_cpu_data.type = CPU_SH7770; boot_cpu_data.icache.ways = 4; boot_cpu_data.dcache.ways = 4; boot_cpu_data.flags |= CPU_HAS_FPU | CPU_HAS_LLSC; break; case 0x2006: case 0x200A: if (prr == 0x61) boot_cpu_data.type = CPU_SH7781; else if (prr == 0xa1) boot_cpu_data.type = CPU_SH7763; else boot_cpu_data.type = CPU_SH7780; boot_cpu_data.icache.ways = 4; boot_cpu_data.dcache.ways = 4; boot_cpu_data.flags |= CPU_HAS_FPU | CPU_HAS_PERF_COUNTER | CPU_HAS_LLSC; break; case 0x3000: case 0x3003: case 0x3009: boot_cpu_data.type = CPU_SH7343; boot_cpu_data.icache.ways = 4; boot_cpu_data.dcache.ways = 4; boot_cpu_data.flags |= CPU_HAS_LLSC; break; case 0x3004: case 0x3007: boot_cpu_data.type = CPU_SH7785; boot_cpu_data.icache.ways = 4; boot_cpu_data.dcache.ways = 4; boot_cpu_data.flags |= CPU_HAS_FPU | CPU_HAS_PERF_COUNTER | CPU_HAS_LLSC; break; case 0x3008: boot_cpu_data.icache.ways = 4; boot_cpu_data.dcache.ways = 4; boot_cpu_data.flags |= CPU_HAS_LLSC; switch (prr) { case 0x50: boot_cpu_data.type = CPU_SH7723; boot_cpu_data.flags |= CPU_HAS_FPU | CPU_HAS_L2_CACHE; break; case 0x70: boot_cpu_data.type = CPU_SH7366; break; case 0xa0: case 0xa1: boot_cpu_data.type = CPU_SH7722; break; } break; case 0x4000: /* 1st cut */ case 0x4001: /* 2nd cut */ boot_cpu_data.type = CPU_SHX3; boot_cpu_data.icache.ways = 4; boot_cpu_data.dcache.ways = 4; boot_cpu_data.flags |= CPU_HAS_FPU | CPU_HAS_PERF_COUNTER | CPU_HAS_LLSC; break; case 0x700: boot_cpu_data.type = CPU_SH4_501; boot_cpu_data.icache.ways = 2; boot_cpu_data.dcache.ways = 2; break; case 0x600: boot_cpu_data.type = CPU_SH4_202; boot_cpu_data.icache.ways = 2; boot_cpu_data.dcache.ways = 2; boot_cpu_data.flags |= CPU_HAS_FPU; break; case 0x500 ... 0x501: switch (prr) { case 0x10: boot_cpu_data.type = CPU_SH7750R; break; case 0x11: boot_cpu_data.type = CPU_SH7751R; break; case 0x50 ... 0x5f: boot_cpu_data.type = CPU_SH7760; break; } boot_cpu_data.icache.ways = 2; boot_cpu_data.dcache.ways = 2; boot_cpu_data.flags |= CPU_HAS_FPU; break; default: boot_cpu_data.type = CPU_SH_NONE; break; } #ifdef CONFIG_SH_DIRECT_MAPPED boot_cpu_data.icache.ways = 1; boot_cpu_data.dcache.ways = 1; #endif #ifdef CONFIG_CPU_HAS_PTEA boot_cpu_data.flags |= CPU_HAS_PTEA; #endif /* * On anything that's not a direct-mapped cache, look to the CVR * for I/D-cache specifics. */ if (boot_cpu_data.icache.ways > 1) { size = sizes[(cvr >> 20) & 0xf]; boot_cpu_data.icache.way_incr = (size >> 1); boot_cpu_data.icache.sets = (size >> 6); } /* And the rest of the D-cache */ if (boot_cpu_data.dcache.ways > 1) { size = sizes[(cvr >> 16) & 0xf]; boot_cpu_data.dcache.way_incr = (size >> 1); boot_cpu_data.dcache.sets = (size >> 6); } /* * Setup the L2 cache desc * * SH-4A's have an optional PIPT L2. */ if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) { /* * Size calculation is much more sensible * than it is for the L1. * * Sizes are 128KB, 258KB, 512KB, and 1MB. */ size = (cvr & 0xf) << 17; BUG_ON(!size); boot_cpu_data.scache.way_incr = (1 << 16); boot_cpu_data.scache.entry_shift = 5; boot_cpu_data.scache.ways = 4; boot_cpu_data.scache.linesz = L1_CACHE_BYTES; boot_cpu_data.scache.entry_mask = (boot_cpu_data.scache.way_incr - boot_cpu_data.scache.linesz); boot_cpu_data.scache.sets = size / (boot_cpu_data.scache.linesz * boot_cpu_data.scache.ways); boot_cpu_data.scache.way_size = (boot_cpu_data.scache.sets * boot_cpu_data.scache.linesz); } return 0; }