diff options
Diffstat (limited to 'drivers/misc/genwqe/card_base.h')
-rw-r--r-- | drivers/misc/genwqe/card_base.h | 557 |
1 files changed, 557 insertions, 0 deletions
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h new file mode 100644 index 000000000000..5e4dbd21f89a --- /dev/null +++ b/drivers/misc/genwqe/card_base.h | |||
@@ -0,0 +1,557 @@ | |||
1 | #ifndef __CARD_BASE_H__ | ||
2 | #define __CARD_BASE_H__ | ||
3 | |||
4 | /** | ||
5 | * IBM Accelerator Family 'GenWQE' | ||
6 | * | ||
7 | * (C) Copyright IBM Corp. 2013 | ||
8 | * | ||
9 | * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> | ||
10 | * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> | ||
11 | * Author: Michael Jung <mijung@de.ibm.com> | ||
12 | * Author: Michael Ruettger <michael@ibmra.de> | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License (version 2 only) | ||
16 | * as published by the Free Software Foundation. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | * GNU General Public License for more details. | ||
22 | */ | ||
23 | |||
24 | /* | ||
25 | * Interfaces within the GenWQE module. Defines genwqe_card and | ||
26 | * ddcb_queue as well as ddcb_requ. | ||
27 | */ | ||
28 | |||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/cdev.h> | ||
32 | #include <linux/stringify.h> | ||
33 | #include <linux/pci.h> | ||
34 | #include <linux/semaphore.h> | ||
35 | #include <linux/uaccess.h> | ||
36 | #include <linux/io.h> | ||
37 | #include <linux/version.h> | ||
38 | #include <linux/debugfs.h> | ||
39 | #include <linux/slab.h> | ||
40 | |||
41 | #include <linux/genwqe/genwqe_card.h> | ||
42 | #include "genwqe_driver.h" | ||
43 | |||
44 | #define GENWQE_MSI_IRQS 4 /* Just one supported, no MSIx */ | ||
45 | #define GENWQE_FLAG_MSI_ENABLED (1 << 0) | ||
46 | |||
47 | #define GENWQE_MAX_VFS 15 /* maximum 15 VFs are possible */ | ||
48 | #define GENWQE_MAX_FUNCS 16 /* 1 PF and 15 VFs */ | ||
49 | #define GENWQE_CARD_NO_MAX (16 * GENWQE_MAX_FUNCS) | ||
50 | |||
51 | /* Compile parameters, some of them appear in debugfs for later adjustment */ | ||
52 | #define genwqe_ddcb_max 32 /* DDCBs on the work-queue */ | ||
53 | #define genwqe_polling_enabled 0 /* in case of irqs not working */ | ||
54 | #define genwqe_ddcb_software_timeout 10 /* timeout per DDCB in seconds */ | ||
55 | #define genwqe_kill_timeout 8 /* time until process gets killed */ | ||
56 | #define genwqe_vf_jobtimeout_msec 250 /* 250 msec */ | ||
57 | #define genwqe_pf_jobtimeout_msec 8000 /* 8 sec should be ok */ | ||
58 | #define genwqe_health_check_interval 4 /* <= 0: disabled */ | ||
59 | |||
60 | /* Sysfs attribute groups used when we create the genwqe device */ | ||
61 | extern const struct attribute_group *genwqe_attribute_groups[]; | ||
62 | |||
63 | /* | ||
64 | * Config space for Genwqe5 A7: | ||
65 | * 00:[14 10 4b 04]40 00 10 00[00 00 00 12]00 00 00 00 | ||
66 | * 10: 0c 00 00 f0 07 3c 00 00 00 00 00 00 00 00 00 00 | ||
67 | * 20: 00 00 00 00 00 00 00 00 00 00 00 00[14 10 4b 04] | ||
68 | * 30: 00 00 00 00 50 00 00 00 00 00 00 00 00 00 00 00 | ||
69 | */ | ||
70 | #define PCI_DEVICE_GENWQE 0x044b /* Genwqe DeviceID */ | ||
71 | |||
72 | #define PCI_SUBSYSTEM_ID_GENWQE5 0x035f /* Genwqe A5 Subsystem-ID */ | ||
73 | #define PCI_SUBSYSTEM_ID_GENWQE5_NEW 0x044b /* Genwqe A5 Subsystem-ID */ | ||
74 | #define PCI_CLASSCODE_GENWQE5 0x1200 /* UNKNOWN */ | ||
75 | |||
76 | #define PCI_SUBVENDOR_ID_IBM_SRIOV 0x0000 | ||
77 | #define PCI_SUBSYSTEM_ID_GENWQE5_SRIOV 0x0000 /* Genwqe A5 Subsystem-ID */ | ||
78 | #define PCI_CLASSCODE_GENWQE5_SRIOV 0x1200 /* UNKNOWN */ | ||
79 | |||
80 | #define GENWQE_SLU_ARCH_REQ 2 /* Required SLU architecture level */ | ||
81 | |||
82 | /** | ||
83 | * struct genwqe_reg - Genwqe data dump functionality | ||
84 | */ | ||
85 | struct genwqe_reg { | ||
86 | u32 addr; | ||
87 | u32 idx; | ||
88 | u64 val; | ||
89 | }; | ||
90 | |||
91 | /* | ||
92 | * enum genwqe_dbg_type - Specify chip unit to dump/debug | ||
93 | */ | ||
94 | enum genwqe_dbg_type { | ||
95 | GENWQE_DBG_UNIT0 = 0, /* captured before prev errs cleared */ | ||
96 | GENWQE_DBG_UNIT1 = 1, | ||
97 | GENWQE_DBG_UNIT2 = 2, | ||
98 | GENWQE_DBG_UNIT3 = 3, | ||
99 | GENWQE_DBG_UNIT4 = 4, | ||
100 | GENWQE_DBG_UNIT5 = 5, | ||
101 | GENWQE_DBG_UNIT6 = 6, | ||
102 | GENWQE_DBG_UNIT7 = 7, | ||
103 | GENWQE_DBG_REGS = 8, | ||
104 | GENWQE_DBG_DMA = 9, | ||
105 | GENWQE_DBG_UNITS = 10, /* max number of possible debug units */ | ||
106 | }; | ||
107 | |||
108 | /* Software error injection to simulate card failures */ | ||
109 | #define GENWQE_INJECT_HARDWARE_FAILURE 0x00000001 /* injects -1 reg reads */ | ||
110 | #define GENWQE_INJECT_BUS_RESET_FAILURE 0x00000002 /* pci_bus_reset fail */ | ||
111 | #define GENWQE_INJECT_GFIR_FATAL 0x00000004 /* GFIR = 0x0000ffff */ | ||
112 | #define GENWQE_INJECT_GFIR_INFO 0x00000008 /* GFIR = 0xffff0000 */ | ||
113 | |||
114 | /* | ||
115 | * Genwqe card description and management data. | ||
116 | * | ||
117 | * Error-handling in case of card malfunction | ||
118 | * ------------------------------------------ | ||
119 | * | ||
120 | * If the card is detected to be defective the outside environment | ||
121 | * will cause the PCI layer to call deinit (the cleanup function for | ||
122 | * probe). This is the same effect like doing a unbind/bind operation | ||
123 | * on the card. | ||
124 | * | ||
125 | * The genwqe card driver implements a health checking thread which | ||
126 | * verifies the card function. If this detects a problem the cards | ||
127 | * device is being shutdown and restarted again, along with a reset of | ||
128 | * the card and queue. | ||
129 | * | ||
130 | * All functions accessing the card device return either -EIO or -ENODEV | ||
131 | * code to indicate the malfunction to the user. The user has to close | ||
132 | * the file descriptor and open a new one, once the card becomes | ||
133 | * available again. | ||
134 | * | ||
135 | * If the open file descriptor is setup to receive SIGIO, the signal is | ||
136 | * genereated for the application which has to provide a handler to | ||
137 | * react on it. If the application does not close the open | ||
138 | * file descriptor a SIGKILL is send to enforce freeing the cards | ||
139 | * resources. | ||
140 | * | ||
141 | * I did not find a different way to prevent kernel problems due to | ||
142 | * reference counters for the cards character devices getting out of | ||
143 | * sync. The character device deallocation does not block, even if | ||
144 | * there is still an open file descriptor pending. If this pending | ||
145 | * descriptor is closed, the data structures used by the character | ||
146 | * device is reinstantiated, which will lead to the reference counter | ||
147 | * dropping below the allowed values. | ||
148 | * | ||
149 | * Card recovery | ||
150 | * ------------- | ||
151 | * | ||
152 | * To test the internal driver recovery the following command can be used: | ||
153 | * sudo sh -c 'echo 0xfffff > /sys/class/genwqe/genwqe0_card/err_inject' | ||
154 | */ | ||
155 | |||
156 | |||
157 | /** | ||
158 | * struct dma_mapping_type - Mapping type definition | ||
159 | * | ||
160 | * To avoid memcpying data arround we use user memory directly. To do | ||
161 | * this we need to pin/swap-in the memory and request a DMA address | ||
162 | * for it. | ||
163 | */ | ||
164 | enum dma_mapping_type { | ||
165 | GENWQE_MAPPING_RAW = 0, /* contignous memory buffer */ | ||
166 | GENWQE_MAPPING_SGL_TEMP, /* sglist dynamically used */ | ||
167 | GENWQE_MAPPING_SGL_PINNED, /* sglist used with pinning */ | ||
168 | }; | ||
169 | |||
170 | /** | ||
171 | * struct dma_mapping - Information about memory mappings done by the driver | ||
172 | */ | ||
173 | struct dma_mapping { | ||
174 | enum dma_mapping_type type; | ||
175 | |||
176 | void *u_vaddr; /* user-space vaddr/non-aligned */ | ||
177 | void *k_vaddr; /* kernel-space vaddr/non-aligned */ | ||
178 | dma_addr_t dma_addr; /* physical DMA address */ | ||
179 | |||
180 | struct page **page_list; /* list of pages used by user buff */ | ||
181 | dma_addr_t *dma_list; /* list of dma addresses per page */ | ||
182 | unsigned int nr_pages; /* number of pages */ | ||
183 | unsigned int size; /* size in bytes */ | ||
184 | |||
185 | struct list_head card_list; /* list of usr_maps for card */ | ||
186 | struct list_head pin_list; /* list of pinned memory for dev */ | ||
187 | }; | ||
188 | |||
189 | static inline void genwqe_mapping_init(struct dma_mapping *m, | ||
190 | enum dma_mapping_type type) | ||
191 | { | ||
192 | memset(m, 0, sizeof(*m)); | ||
193 | m->type = type; | ||
194 | } | ||
195 | |||
196 | /** | ||
197 | * struct ddcb_queue - DDCB queue data | ||
198 | * @ddcb_max: Number of DDCBs on the queue | ||
199 | * @ddcb_next: Next free DDCB | ||
200 | * @ddcb_act: Next DDCB supposed to finish | ||
201 | * @ddcb_seq: Sequence number of last DDCB | ||
202 | * @ddcbs_in_flight: Currently enqueued DDCBs | ||
203 | * @ddcbs_completed: Number of already completed DDCBs | ||
204 | * @busy: Number of -EBUSY returns | ||
205 | * @ddcb_daddr: DMA address of first DDCB in the queue | ||
206 | * @ddcb_vaddr: Kernel virtual address of first DDCB in the queue | ||
207 | * @ddcb_req: Associated requests (one per DDCB) | ||
208 | * @ddcb_waitqs: Associated wait queues (one per DDCB) | ||
209 | * @ddcb_lock: Lock to protect queuing operations | ||
210 | * @ddcb_waitq: Wait on next DDCB finishing | ||
211 | */ | ||
212 | |||
213 | struct ddcb_queue { | ||
214 | int ddcb_max; /* amount of DDCBs */ | ||
215 | int ddcb_next; /* next available DDCB num */ | ||
216 | int ddcb_act; /* DDCB to be processed */ | ||
217 | u16 ddcb_seq; /* slc seq num */ | ||
218 | unsigned int ddcbs_in_flight; /* number of ddcbs in processing */ | ||
219 | unsigned int ddcbs_completed; | ||
220 | unsigned int ddcbs_max_in_flight; | ||
221 | unsigned int busy; /* how many times -EBUSY? */ | ||
222 | |||
223 | dma_addr_t ddcb_daddr; /* DMA address */ | ||
224 | struct ddcb *ddcb_vaddr; /* kernel virtual addr for DDCBs */ | ||
225 | struct ddcb_requ **ddcb_req; /* ddcb processing parameter */ | ||
226 | wait_queue_head_t *ddcb_waitqs; /* waitqueue per ddcb */ | ||
227 | |||
228 | spinlock_t ddcb_lock; /* exclusive access to queue */ | ||
229 | wait_queue_head_t ddcb_waitq; /* wait for ddcb processing */ | ||
230 | |||
231 | /* registers or the respective queue to be used */ | ||
232 | u32 IO_QUEUE_CONFIG; | ||
233 | u32 IO_QUEUE_STATUS; | ||
234 | u32 IO_QUEUE_SEGMENT; | ||
235 | u32 IO_QUEUE_INITSQN; | ||
236 | u32 IO_QUEUE_WRAP; | ||
237 | u32 IO_QUEUE_OFFSET; | ||
238 | u32 IO_QUEUE_WTIME; | ||
239 | u32 IO_QUEUE_ERRCNTS; | ||
240 | u32 IO_QUEUE_LRW; | ||
241 | }; | ||
242 | |||
243 | /* | ||
244 | * GFIR, SLU_UNITCFG, APP_UNITCFG | ||
245 | * 8 Units with FIR/FEC + 64 * 2ndary FIRS/FEC. | ||
246 | */ | ||
247 | #define GENWQE_FFDC_REGS (3 + (8 * (2 + 2 * 64))) | ||
248 | |||
249 | struct genwqe_ffdc { | ||
250 | unsigned int entries; | ||
251 | struct genwqe_reg *regs; | ||
252 | }; | ||
253 | |||
254 | /** | ||
255 | * struct genwqe_dev - GenWQE device information | ||
256 | * @card_state: Card operation state, see above | ||
257 | * @ffdc: First Failure Data Capture buffers for each unit | ||
258 | * @card_thread: Working thread to operate the DDCB queue | ||
259 | * @card_waitq: Wait queue used in card_thread | ||
260 | * @queue: DDCB queue | ||
261 | * @health_thread: Card monitoring thread (only for PFs) | ||
262 | * @health_waitq: Wait queue used in health_thread | ||
263 | * @pci_dev: Associated PCI device (function) | ||
264 | * @mmio: Base address of 64-bit register space | ||
265 | * @mmio_len: Length of register area | ||
266 | * @file_lock: Lock to protect access to file_list | ||
267 | * @file_list: List of all processes with open GenWQE file descriptors | ||
268 | * | ||
269 | * This struct contains all information needed to communicate with a | ||
270 | * GenWQE card. It is initialized when a GenWQE device is found and | ||
271 | * destroyed when it goes away. It holds data to maintain the queue as | ||
272 | * well as data needed to feed the user interfaces. | ||
273 | */ | ||
274 | struct genwqe_dev { | ||
275 | enum genwqe_card_state card_state; | ||
276 | spinlock_t print_lock; | ||
277 | |||
278 | int card_idx; /* card index 0..CARD_NO_MAX-1 */ | ||
279 | u64 flags; /* general flags */ | ||
280 | |||
281 | /* FFDC data gathering */ | ||
282 | struct genwqe_ffdc ffdc[GENWQE_DBG_UNITS]; | ||
283 | |||
284 | /* DDCB workqueue */ | ||
285 | struct task_struct *card_thread; | ||
286 | wait_queue_head_t queue_waitq; | ||
287 | struct ddcb_queue queue; /* genwqe DDCB queue */ | ||
288 | unsigned int irqs_processed; | ||
289 | |||
290 | /* Card health checking thread */ | ||
291 | struct task_struct *health_thread; | ||
292 | wait_queue_head_t health_waitq; | ||
293 | |||
294 | /* char device */ | ||
295 | dev_t devnum_genwqe; /* major/minor num card */ | ||
296 | struct class *class_genwqe; /* reference to class object */ | ||
297 | struct device *dev; /* for device creation */ | ||
298 | struct cdev cdev_genwqe; /* char device for card */ | ||
299 | |||
300 | struct dentry *debugfs_root; /* debugfs card root directory */ | ||
301 | struct dentry *debugfs_genwqe; /* debugfs driver root directory */ | ||
302 | |||
303 | /* pci resources */ | ||
304 | struct pci_dev *pci_dev; /* PCI device */ | ||
305 | void __iomem *mmio; /* BAR-0 MMIO start */ | ||
306 | unsigned long mmio_len; | ||
307 | u16 num_vfs; | ||
308 | u32 vf_jobtimeout_msec[GENWQE_MAX_VFS]; | ||
309 | int is_privileged; /* access to all regs possible */ | ||
310 | |||
311 | /* config regs which we need often */ | ||
312 | u64 slu_unitcfg; | ||
313 | u64 app_unitcfg; | ||
314 | u64 softreset; | ||
315 | u64 err_inject; | ||
316 | u64 last_gfir; | ||
317 | char app_name[5]; | ||
318 | |||
319 | spinlock_t file_lock; /* lock for open files */ | ||
320 | struct list_head file_list; /* list of open files */ | ||
321 | |||
322 | /* debugfs parameters */ | ||
323 | int ddcb_software_timeout; /* wait until DDCB times out */ | ||
324 | int skip_recovery; /* circumvention if recovery fails */ | ||
325 | int kill_timeout; /* wait after sending SIGKILL */ | ||
326 | }; | ||
327 | |||
328 | /** | ||
329 | * enum genwqe_requ_state - State of a DDCB execution request | ||
330 | */ | ||
331 | enum genwqe_requ_state { | ||
332 | GENWQE_REQU_NEW = 0, | ||
333 | GENWQE_REQU_ENQUEUED = 1, | ||
334 | GENWQE_REQU_TAPPED = 2, | ||
335 | GENWQE_REQU_FINISHED = 3, | ||
336 | GENWQE_REQU_STATE_MAX, | ||
337 | }; | ||
338 | |||
339 | /** | ||
340 | * struct ddcb_requ - Kernel internal representation of the DDCB request | ||
341 | * @cmd: User space representation of the DDCB execution request | ||
342 | */ | ||
343 | struct ddcb_requ { | ||
344 | /* kernel specific content */ | ||
345 | enum genwqe_requ_state req_state; /* request status */ | ||
346 | int num; /* ddcb_no for this request */ | ||
347 | struct ddcb_queue *queue; /* associated queue */ | ||
348 | |||
349 | struct dma_mapping dma_mappings[DDCB_FIXUPS]; | ||
350 | struct sg_entry *sgl[DDCB_FIXUPS]; | ||
351 | dma_addr_t sgl_dma_addr[DDCB_FIXUPS]; | ||
352 | size_t sgl_size[DDCB_FIXUPS]; | ||
353 | |||
354 | /* kernel/user shared content */ | ||
355 | struct genwqe_ddcb_cmd cmd; /* ddcb_no for this request */ | ||
356 | struct genwqe_debug_data debug_data; | ||
357 | }; | ||
358 | |||
359 | /** | ||
360 | * struct genwqe_file - Information for open GenWQE devices | ||
361 | */ | ||
362 | struct genwqe_file { | ||
363 | struct genwqe_dev *cd; | ||
364 | struct genwqe_driver *client; | ||
365 | struct file *filp; | ||
366 | |||
367 | struct fasync_struct *async_queue; | ||
368 | struct task_struct *owner; | ||
369 | struct list_head list; /* entry in list of open files */ | ||
370 | |||
371 | spinlock_t map_lock; /* lock for dma_mappings */ | ||
372 | struct list_head map_list; /* list of dma_mappings */ | ||
373 | |||
374 | spinlock_t pin_lock; /* lock for pinned memory */ | ||
375 | struct list_head pin_list; /* list of pinned memory */ | ||
376 | }; | ||
377 | |||
378 | int genwqe_setup_service_layer(struct genwqe_dev *cd); /* for PF only */ | ||
379 | int genwqe_finish_queue(struct genwqe_dev *cd); | ||
380 | int genwqe_release_service_layer(struct genwqe_dev *cd); | ||
381 | |||
382 | /** | ||
383 | * genwqe_get_slu_id() - Read Service Layer Unit Id | ||
384 | * Return: 0x00: Development code | ||
385 | * 0x01: SLC1 (old) | ||
386 | * 0x02: SLC2 (sept2012) | ||
387 | * 0x03: SLC2 (feb2013, generic driver) | ||
388 | */ | ||
389 | static inline int genwqe_get_slu_id(struct genwqe_dev *cd) | ||
390 | { | ||
391 | return (int)((cd->slu_unitcfg >> 32) & 0xff); | ||
392 | } | ||
393 | |||
394 | int genwqe_ddcbs_in_flight(struct genwqe_dev *cd); | ||
395 | |||
396 | u8 genwqe_card_type(struct genwqe_dev *cd); | ||
397 | int genwqe_card_reset(struct genwqe_dev *cd); | ||
398 | int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count); | ||
399 | void genwqe_reset_interrupt_capability(struct genwqe_dev *cd); | ||
400 | |||
401 | int genwqe_device_create(struct genwqe_dev *cd); | ||
402 | int genwqe_device_remove(struct genwqe_dev *cd); | ||
403 | |||
404 | /* debugfs */ | ||
405 | int genwqe_init_debugfs(struct genwqe_dev *cd); | ||
406 | void genqwe_exit_debugfs(struct genwqe_dev *cd); | ||
407 | |||
408 | int genwqe_read_softreset(struct genwqe_dev *cd); | ||
409 | |||
410 | /* Hardware Circumventions */ | ||
411 | int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd); | ||
412 | int genwqe_flash_readback_fails(struct genwqe_dev *cd); | ||
413 | |||
414 | /** | ||
415 | * genwqe_write_vreg() - Write register in VF window | ||
416 | * @cd: genwqe device | ||
417 | * @reg: register address | ||
418 | * @val: value to write | ||
419 | * @func: 0: PF, 1: VF0, ..., 15: VF14 | ||
420 | */ | ||
421 | int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func); | ||
422 | |||
423 | /** | ||
424 | * genwqe_read_vreg() - Read register in VF window | ||
425 | * @cd: genwqe device | ||
426 | * @reg: register address | ||
427 | * @func: 0: PF, 1: VF0, ..., 15: VF14 | ||
428 | * | ||
429 | * Return: content of the register | ||
430 | */ | ||
431 | u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func); | ||
432 | |||
433 | /* FFDC Buffer Management */ | ||
434 | int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int unit_id); | ||
435 | int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int unit_id, | ||
436 | struct genwqe_reg *regs, unsigned int max_regs); | ||
437 | int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs, | ||
438 | unsigned int max_regs, int all); | ||
439 | int genwqe_ffdc_dump_dma(struct genwqe_dev *cd, | ||
440 | struct genwqe_reg *regs, unsigned int max_regs); | ||
441 | |||
442 | int genwqe_init_debug_data(struct genwqe_dev *cd, | ||
443 | struct genwqe_debug_data *d); | ||
444 | |||
445 | void genwqe_init_crc32(void); | ||
446 | int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len); | ||
447 | |||
448 | /* Memory allocation/deallocation; dma address handling */ | ||
449 | int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, | ||
450 | void *uaddr, unsigned long size, | ||
451 | struct ddcb_requ *req); | ||
452 | |||
453 | int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m, | ||
454 | struct ddcb_requ *req); | ||
455 | |||
456 | struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages, | ||
457 | dma_addr_t *dma_addr, size_t *sgl_size); | ||
458 | |||
459 | void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list, | ||
460 | dma_addr_t dma_addr, size_t size); | ||
461 | |||
462 | int genwqe_setup_sgl(struct genwqe_dev *cd, | ||
463 | unsigned long offs, | ||
464 | unsigned long size, | ||
465 | struct sg_entry *sgl, /* genwqe sgl */ | ||
466 | dma_addr_t dma_addr, size_t sgl_size, | ||
467 | dma_addr_t *dma_list, int page_offs, int num_pages); | ||
468 | |||
469 | int genwqe_check_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list, | ||
470 | int size); | ||
471 | |||
472 | static inline bool dma_mapping_used(struct dma_mapping *m) | ||
473 | { | ||
474 | if (!m) | ||
475 | return 0; | ||
476 | return m->size != 0; | ||
477 | } | ||
478 | |||
479 | /** | ||
480 | * __genwqe_execute_ddcb() - Execute DDCB request with addr translation | ||
481 | * | ||
482 | * This function will do the address translation changes to the DDCBs | ||
483 | * according to the definitions required by the ATS field. It looks up | ||
484 | * the memory allocation buffer or does vmap/vunmap for the respective | ||
485 | * user-space buffers, inclusive page pinning and scatter gather list | ||
486 | * buildup and teardown. | ||
487 | */ | ||
488 | int __genwqe_execute_ddcb(struct genwqe_dev *cd, | ||
489 | struct genwqe_ddcb_cmd *cmd); | ||
490 | |||
491 | /** | ||
492 | * __genwqe_execute_raw_ddcb() - Execute DDCB request without addr translation | ||
493 | * | ||
494 | * This version will not do address translation or any modifcation of | ||
495 | * the DDCB data. It is used e.g. for the MoveFlash DDCB which is | ||
496 | * entirely prepared by the driver itself. That means the appropriate | ||
497 | * DMA addresses are already in the DDCB and do not need any | ||
498 | * modification. | ||
499 | */ | ||
500 | int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, | ||
501 | struct genwqe_ddcb_cmd *cmd); | ||
502 | |||
503 | int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); | ||
504 | int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); | ||
505 | int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req); | ||
506 | |||
507 | /* register access */ | ||
508 | int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val); | ||
509 | u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs); | ||
510 | int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val); | ||
511 | u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs); | ||
512 | |||
513 | void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, | ||
514 | dma_addr_t *dma_handle); | ||
515 | void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, | ||
516 | void *vaddr, dma_addr_t dma_handle); | ||
517 | |||
518 | /* Base clock frequency in MHz */ | ||
519 | int genwqe_base_clock_frequency(struct genwqe_dev *cd); | ||
520 | |||
521 | /* Before FFDC is captured the traps should be stopped. */ | ||
522 | void genwqe_stop_traps(struct genwqe_dev *cd); | ||
523 | void genwqe_start_traps(struct genwqe_dev *cd); | ||
524 | |||
525 | /* Hardware circumvention */ | ||
526 | bool genwqe_need_err_masking(struct genwqe_dev *cd); | ||
527 | |||
528 | /** | ||
529 | * genwqe_is_privileged() - Determine operation mode for PCI function | ||
530 | * | ||
531 | * On Intel with SRIOV support we see: | ||
532 | * PF: is_physfn = 1 is_virtfn = 0 | ||
533 | * VF: is_physfn = 0 is_virtfn = 1 | ||
534 | * | ||
535 | * On Systems with no SRIOV support _and_ virtualized systems we get: | ||
536 | * is_physfn = 0 is_virtfn = 0 | ||
537 | * | ||
538 | * Other vendors have individual pci device ids to distinguish between | ||
539 | * virtual function drivers and physical function drivers. GenWQE | ||
540 | * unfortunately has just on pci device id for both, VFs and PF. | ||
541 | * | ||
542 | * The following code is used to distinguish if the card is running in | ||
543 | * privileged mode, either as true PF or in a virtualized system with | ||
544 | * full register access e.g. currently on PowerPC. | ||
545 | * | ||
546 | * if (pci_dev->is_virtfn) | ||
547 | * cd->is_privileged = 0; | ||
548 | * else | ||
549 | * cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM) | ||
550 | * != IO_ILLEGAL_VALUE); | ||
551 | */ | ||
552 | static inline int genwqe_is_privileged(struct genwqe_dev *cd) | ||
553 | { | ||
554 | return cd->is_privileged; | ||
555 | } | ||
556 | |||
557 | #endif /* __CARD_BASE_H__ */ | ||