blob: e940d3647c8043a20b8a7a3175f43149157672bf (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
|
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PDA_H
#define _ASM_IA64_SN_PDA_H
#include <linux/cache.h>
#include <asm/percpu.h>
#include <asm/system.h>
#include <asm/sn/bte.h>
/*
* CPU-specific data structure.
*
* One of these structures is allocated for each cpu of a NUMA system.
*
* This structure provides a convenient way of keeping together
* all SN per-cpu data structures.
*/
typedef struct pda_s {
/* Having a pointer in the begining of PDA tends to increase
* the chance of having this pointer in cache. (Yes something
* else gets pushed out). Doing this reduces the number of memory
* access to all nodepda variables to be one
*/
struct nodepda_s *p_nodepda; /* Pointer to Per node PDA */
struct subnodepda_s *p_subnodepda; /* Pointer to CPU subnode PDA */
/*
* Support for SN LEDs
*/
volatile short *led_address;
u8 led_state;
u8 hb_state; /* supports blinking heartbeat leds */
unsigned int hb_count;
unsigned int idle_flag;
volatile unsigned long *bedrock_rev_id;
volatile unsigned long *pio_write_status_addr;
unsigned long pio_write_status_val;
volatile unsigned long *pio_shub_war_cam_addr;
unsigned long sn_soft_irr[4];
unsigned long sn_in_service_ivecs[4];
short cnodeid_to_nasid_table[MAX_NUMNODES];
int sn_lb_int_war_ticks;
int sn_last_irq;
int sn_first_irq;
} pda_t;
#define CACHE_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
/*
* PDA
* Per-cpu private data area for each cpu. The PDA is located immediately after
* the IA64 cpu_data area. A full page is allocated for the cp_data area for each
* cpu but only a small amout of the page is actually used. We put the SNIA PDA
* in the same page as the cpu_data area. Note that there is a check in the setup
* code to verify that we don't overflow the page.
*
* Seems like we should should cache-line align the pda so that any changes in the
* size of the cpu_data area don't change cache layout. Should we align to 32, 64, 128
* or 512 boundary. Each has merits. For now, pick 128 but should be revisited later.
*/
DECLARE_PER_CPU(struct pda_s, pda_percpu);
#define pda (&__ia64_per_cpu_var(pda_percpu))
#define pdacpu(cpu) (&per_cpu(pda_percpu, cpu))
#endif /* _ASM_IA64_SN_PDA_H */
|