aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sunqe.h
blob: 347c8ddc1592a447109c8a4f94bf3e8d45d2e432 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
/* $Id: sunqe.h,v 1.13 2000/02/09 11:15:42 davem Exp $
 * sunqe.h: Definitions for the Sun QuadEthernet driver.
 *
 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
 */

#ifndef _SUNQE_H
#define _SUNQE_H

/* QEC global registers. */
#define GLOB_CTRL	0x00UL		/* Control			*/
#define GLOB_STAT	0x04UL		/* Status			*/
#define GLOB_PSIZE	0x08UL		/* Packet Size			*/
#define GLOB_MSIZE	0x0cUL		/* Local-memory Size		*/
#define GLOB_RSIZE	0x10UL		/* Receive partition size	*/
#define GLOB_TSIZE	0x14UL		/* Transmit partition size	*/
#define GLOB_REG_SIZE	0x18UL

#define GLOB_CTRL_MMODE       0x40000000 /* MACE qec mode            */
#define GLOB_CTRL_BMODE       0x10000000 /* BigMAC qec mode          */
#define GLOB_CTRL_EPAR        0x00000020 /* Enable parity            */
#define GLOB_CTRL_ACNTRL      0x00000018 /* SBUS arbitration control */
#define GLOB_CTRL_B64         0x00000004 /* 64 byte dvma bursts      */
#define GLOB_CTRL_B32         0x00000002 /* 32 byte dvma bursts      */
#define GLOB_CTRL_B16         0x00000000 /* 16 byte dvma bursts      */
#define GLOB_CTRL_RESET       0x00000001 /* Reset the QEC            */

#define GLOB_STAT_TX          0x00000008 /* BigMAC Transmit IRQ      */
#define GLOB_STAT_RX          0x00000004 /* BigMAC Receive IRQ       */
#define GLOB_STAT_BM          0x00000002 /* BigMAC Global IRQ        */
#define GLOB_STAT_ER          0x00000001 /* BigMAC Error IRQ         */

#define GLOB_PSIZE_2048       0x00       /* 2k packet size           */
#define GLOB_PSIZE_4096       0x01       /* 4k packet size           */
#define GLOB_PSIZE_6144       0x10       /* 6k packet size           */
#define GLOB_PSIZE_8192       0x11       /* 8k packet size           */

/* In MACE mode, there are four qe channels.  Each channel has it's own
 * status bits in the QEC status register.  This macro picks out the
 * ones you want.
 */
#define GLOB_STAT_PER_QE(status, channel) (((status) >> ((channel) * 4)) & 0xf)

/* The following registers are for per-qe channel information/status. */
#define CREG_CTRL	0x00UL	/* Control                   */
#define CREG_STAT	0x04UL	/* Status                    */
#define CREG_RXDS	0x08UL	/* RX descriptor ring ptr    */
#define CREG_TXDS	0x0cUL	/* TX descriptor ring ptr    */
#define CREG_RIMASK	0x10UL	/* RX Interrupt Mask         */
#define CREG_TIMASK	0x14UL	/* TX Interrupt Mask         */
#define CREG_QMASK	0x18UL	/* QEC Error Interrupt Mask  */
#define CREG_MMASK	0x1cUL	/* MACE Error Interrupt Mask */
#define CREG_RXWBUFPTR	0x20UL	/* Local memory rx write ptr */
#define CREG_RXRBUFPTR	0x24UL	/* Local memory rx read ptr  */
#define CREG_TXWBUFPTR	0x28UL	/* Local memory tx write ptr */
#define CREG_TXRBUFPTR	0x2cUL	/* Local memory tx read ptr  */
#define CREG_CCNT	0x30UL	/* Collision Counter         */
#define CREG_PIPG	0x34UL	/* Inter-Frame Gap           */
#define CREG_REG_SIZE	0x38UL

#define CREG_CTRL_RXOFF       0x00000004  /* Disable this qe's receiver*/
#define CREG_CTRL_RESET       0x00000002  /* Reset this qe channel     */
#define CREG_CTRL_TWAKEUP     0x00000001  /* Transmitter Wakeup, 'go'. */

#define CREG_STAT_EDEFER      0x10000000  /* Excessive Defers          */
#define CREG_STAT_CLOSS       0x08000000  /* Carrier Loss              */
#define CREG_STAT_ERETRIES    0x04000000  /* More than 16 retries      */
#define CREG_STAT_LCOLL       0x02000000  /* Late TX Collision         */
#define CREG_STAT_FUFLOW      0x01000000  /* FIFO Underflow            */
#define CREG_STAT_JERROR      0x00800000  /* Jabber Error              */
#define CREG_STAT_BERROR      0x00400000  /* Babble Error              */
#define CREG_STAT_TXIRQ       0x00200000  /* Transmit Interrupt        */
#define CREG_STAT_CCOFLOW     0x00100000  /* TX Coll-counter Overflow  */
#define CREG_STAT_TXDERROR    0x00080000  /* TX Descriptor is bogus    */
#define CREG_STAT_TXLERR      0x00040000  /* Late Transmit Error       */
#define CREG_STAT_TXPERR      0x00020000  /* Transmit Parity Error     */
#define CREG_STAT_TXSERR      0x00010000  /* Transmit SBUS error ack   */
#define CREG_STAT_RCCOFLOW    0x00001000  /* RX Coll-counter Overflow  */
#define CREG_STAT_RUOFLOW     0x00000800  /* Runt Counter Overflow     */
#define CREG_STAT_MCOFLOW     0x00000400  /* Missed Counter Overflow   */
#define CREG_STAT_RXFOFLOW    0x00000200  /* RX FIFO Overflow          */
#define CREG_STAT_RLCOLL      0x00000100  /* RX Late Collision         */
#define CREG_STAT_FCOFLOW     0x00000080  /* Frame Counter Overflow    */
#define CREG_STAT_CECOFLOW    0x00000040  /* CRC Error-counter Overflow*/
#define CREG_STAT_RXIRQ       0x00000020  /* Receive Interrupt         */
#define CREG_STAT_RXDROP      0x00000010  /* Dropped a RX'd packet     */
#define CREG_STAT_RXSMALL     0x00000008  /* Receive buffer too small  */
#define CREG_STAT_RXLERR      0x00000004  /* Receive Late Error        */
#define CREG_STAT_RXPERR      0x00000002  /* Receive Parity Error      */
#define CREG_STAT_RXSERR      0x00000001  /* Receive SBUS Error ACK    */

#define CREG_STAT_ERRORS      (CREG_STAT_EDEFER|CREG_STAT_CLOSS|CREG_STAT_ERETRIES|     \
			       CREG_STAT_LCOLL|CREG_STAT_FUFLOW|CREG_STAT_JERROR|       \
			       CREG_STAT_BERROR|CREG_STAT_CCOFLOW|CREG_STAT_TXDERROR|   \
			       CREG_STAT_TXLERR|CREG_STAT_TXPERR|CREG_STAT_TXSERR|      \
			       CREG_STAT_RCCOFLOW|CREG_STAT_RUOFLOW|CREG_STAT_MCOFLOW| \
			       CREG_STAT_RXFOFLOW|CREG_STAT_RLCOLL|CREG_STAT_FCOFLOW|   \
			       CREG_STAT_CECOFLOW|CREG_STAT_RXDROP|CREG_STAT_RXSMALL|   \
			       CREG_STAT_RXLERR|CREG_STAT_RXPERR|CREG_STAT_RXSERR)

#define CREG_QMASK_COFLOW     0x00100000  /* CollCntr overflow         */
#define CREG_QMASK_TXDERROR   0x00080000  /* TXD error                 */
#define CREG_QMASK_TXLERR     0x00040000  /* TX late error             */
#define CREG_QMASK_TXPERR     0x00020000  /* TX parity error           */
#define CREG_QMASK_TXSERR     0x00010000  /* TX sbus error ack         */
#define CREG_QMASK_RXDROP     0x00000010  /* RX drop                   */
#define CREG_QMASK_RXBERROR   0x00000008  /* RX buffer error           */
#define CREG_QMASK_RXLEERR    0x00000004  /* RX late error             */
#define CREG_QMASK_RXPERR     0x00000002  /* RX parity error           */
#define CREG_QMASK_RXSERR     0x00000001  /* RX sbus error ack         */

#define CREG_MMASK_EDEFER     0x10000000  /* Excess defer              */
#define CREG_MMASK_CLOSS      0x08000000  /* Carrier loss              */
#define CREG_MMASK_ERETRY     0x04000000  /* Excess retry              */
#define CREG_MMASK_LCOLL      0x02000000  /* Late collision error      */
#define CREG_MMASK_UFLOW      0x01000000  /* Underflow                 */
#define CREG_MMASK_JABBER     0x00800000  /* Jabber error              */
#define CREG_MMASK_BABBLE     0x00400000  /* Babble error              */
#define CREG_MMASK_OFLOW      0x00000800  /* Overflow                  */
#define CREG_MMASK_RXCOLL     0x00000400  /* RX Coll-Cntr overflow     */
#define CREG_MMASK_RPKT       0x00000200  /* Runt pkt overflow         */
#define CREG_MMASK_MPKT       0x00000100  /* Missed pkt overflow       */

#define CREG_PIPG_TENAB       0x00000020  /* Enable Throttle           */
#define CREG_PIPG_MMODE       0x00000010  /* Manual Mode               */
#define CREG_PIPG_WMASK       0x0000000f  /* SBUS Wait Mask            */

/* Per-channel AMD 79C940 MACE registers. */
#define MREGS_RXFIFO	0x00UL	/* Receive FIFO                   */
#define MREGS_TXFIFO	0x01UL	/* Transmit FIFO                  */
#define MREGS_TXFCNTL	0x02UL	/* Transmit Frame Control         */
#define MREGS_TXFSTAT	0x03UL	/* Transmit Frame Status          */
#define MREGS_TXRCNT	0x04UL	/* Transmit Retry Count           */
#define MREGS_RXFCNTL	0x05UL	/* Receive Frame Control          */
#define MREGS_RXFSTAT	0x06UL	/* Receive Frame Status           */
#define MREGS_FFCNT	0x07UL	/* FIFO Frame Count               */
#define MREGS_IREG	0x08UL	/* Interrupt Register             */
#define MREGS_IMASK	0x09UL	/* Interrupt Mask                 */
#define MREGS_POLL	0x0aUL	/* POLL Register                  */
#define MREGS_BCONFIG	0x0bUL	/* BIU Config                     */
#define MREGS_FCONFIG	0x0cUL	/* FIFO Config                    */
#define MREGS_MCONFIG	0x0dUL	/* MAC Config                     */
#define MREGS_PLSCONFIG	0x0eUL	/* PLS Config                     */
#define MREGS_PHYCONFIG	0x0fUL	/* PHY Config                     */
#define MREGS_CHIPID1	0x10UL	/* Chip-ID, low bits              */
#define MREGS_CHIPID2	0x11UL	/* Chip-ID, high bits             */
#define MREGS_IACONFIG	0x12UL	/* Internal Address Config        */
	/* 0x13UL, reserved */
#define MREGS_FILTER	0x14UL	/* Logical Address Filter         */
#define MREGS_ETHADDR	0x15UL	/* Our Ethernet Address           */
	/* 0x16UL, reserved */
	/* 0x17UL, reserved */
#define MREGS_MPCNT	0x18UL	/* Missed Packet Count            */
	/* 0x19UL, reserved */
#define MREGS_RPCNT	0x1aUL	/* Runt Packet Count              */
#define MREGS_RCCNT	0x1bUL	/* RX Collision Count             */
	/* 0x1cUL, reserved */
#define MREGS_UTEST	0x1dUL	/* User Test                      */
#define MREGS_RTEST1	0x1eUL	/* Reserved Test 1                */
#define MREGS_RTEST2	0x1fUL	/* Reserved Test 2                */
#define MREGS_REG_SIZE	0x20UL

#define MREGS_TXFCNTL_DRETRY        0x80 /* Retry disable                  */
#define MREGS_TXFCNTL_DFCS          0x08 /* Disable TX FCS                 */
#define MREGS_TXFCNTL_AUTOPAD       0x01 /* TX auto pad                    */

#define MREGS_TXFSTAT_VALID         0x80 /* TX valid                       */
#define MREGS_TXFSTAT_UNDERFLOW     0x40 /* TX underflow                   */
#define MREGS_TXFSTAT_LCOLL         0x20 /* TX late collision              */
#define MREGS_TXFSTAT_MRETRY        0x10 /* TX > 1 retries                 */
#define MREGS_TXFSTAT_ORETRY        0x08 /* TX 1 retry                     */
#define MREGS_TXFSTAT_PDEFER        0x04 /* TX pkt deferred                */
#define MREGS_TXFSTAT_CLOSS         0x02 /* TX carrier lost                */
#define MREGS_TXFSTAT_RERROR        0x01 /* TX retry error                 */

#define MREGS_TXRCNT_EDEFER         0x80 /* TX Excess defers               */
#define MREGS_TXRCNT_CMASK          0x0f /* TX retry count                 */

#define MREGS_RXFCNTL_LOWLAT        0x08 /* RX low latency                 */
#define MREGS_RXFCNTL_AREJECT       0x04 /* RX addr match rej              */
#define MREGS_RXFCNTL_AUTOSTRIP     0x01 /* RX auto strip                  */

#define MREGS_RXFSTAT_OVERFLOW      0x80 /* RX overflow                    */
#define MREGS_RXFSTAT_LCOLL         0x40 /* RX late collision              */
#define MREGS_RXFSTAT_FERROR        0x20 /* RX framing error               */
#define MREGS_RXFSTAT_FCSERROR      0x10 /* RX FCS error                   */
#define MREGS_RXFSTAT_RBCNT         0x0f /* RX msg byte count              */

#define MREGS_FFCNT_RX              0xf0 /* RX FIFO frame cnt              */
#define MREGS_FFCNT_TX              0x0f /* TX FIFO frame cnt              */

#define MREGS_IREG_JABBER           0x80 /* IRQ Jabber error               */
#define MREGS_IREG_BABBLE           0x40 /* IRQ Babble error               */
#define MREGS_IREG_COLL             0x20 /* IRQ Collision error            */
#define MREGS_IREG_RCCO             0x10 /* IRQ Collision cnt overflow     */
#define MREGS_IREG_RPKTCO           0x08 /* IRQ Runt packet count overflow */
#define MREGS_IREG_MPKTCO           0x04 /* IRQ missed packet cnt overflow */
#define MREGS_IREG_RXIRQ            0x02 /* IRQ RX'd a packet              */
#define MREGS_IREG_TXIRQ            0x01 /* IRQ TX'd a packet              */

#define MREGS_IMASK_BABBLE          0x40 /* IMASK Babble errors            */
#define MREGS_IMASK_COLL            0x20 /* IMASK Collision errors         */
#define MREGS_IMASK_MPKTCO          0x04 /* IMASK Missed pkt cnt overflow  */
#define MREGS_IMASK_RXIRQ           0x02 /* IMASK RX interrupts            */
#define MREGS_IMASK_TXIRQ           0x01 /* IMASK TX interrupts            */

#define MREGS_POLL_TXVALID          0x80 /* TX is valid                    */
#define MREGS_POLL_TDTR             0x40 /* TX data transfer request       */
#define MREGS_POLL_RDTR             0x20 /* RX data transfer request       */

#define MREGS_BCONFIG_BSWAP         0x40 /* Byte Swap                      */
#define MREGS_BCONFIG_4TS           0x00 /* 4byte transmit start point     */
#define MREGS_BCONFIG_16TS          0x10 /* 16byte transmit start point    */
#define MREGS_BCONFIG_64TS          0x20 /* 64byte transmit start point    */
#define MREGS_BCONFIG_112TS         0x30 /* 112byte transmit start point   */
#define MREGS_BCONFIG_RESET         0x01 /* SW-Reset the MACE              */

#define MREGS_FCONFIG_TXF8          0x00 /* TX fifo 8 write cycles         */
#define MREGS_FCONFIG_TXF32         0x80 /* TX fifo 32 write cycles        */
#define MREGS_FCONFIG_TXF16         0x40 /* TX fifo 16 write cycles        */
#define MREGS_FCONFIG_RXF64         0x20 /* RX fifo 64 write cycles        */
#define MREGS_FCONFIG_RXF32         0x10 /* RX fifo 32 write cycles        */
#define MREGS_FCONFIG_RXF16         0x00 /* RX fifo 16 write cycles        */
#define MREGS_FCONFIG_TFWU          0x08 /* TX fifo watermark update       */
#define MREGS_FCONFIG_RFWU          0x04 /* RX fifo watermark update       */
#define MREGS_FCONFIG_TBENAB        0x02 /* TX burst enable                */
#define MREGS_FCONFIG_RBENAB        0x01 /* RX burst enable                */

#define MREGS_MCONFIG_PROMISC       0x80 /* Promiscuous mode enable        */
#define MREGS_MCONFIG_TPDDISAB      0x40 /* TX 2part deferral enable       */
#define MREGS_MCONFIG_MBAENAB       0x20 /* Modified backoff enable        */
#define MREGS_MCONFIG_RPADISAB      0x08 /* RX physical addr disable       */
#define MREGS_MCONFIG_RBDISAB       0x04 /* RX broadcast disable           */
#define MREGS_MCONFIG_TXENAB        0x02 /* Enable transmitter             */
#define MREGS_MCONFIG_RXENAB        0x01 /* Enable receiver                */

#define MREGS_PLSCONFIG_TXMS        0x08 /* TX mode select                 */
#define MREGS_PLSCONFIG_GPSI        0x06 /* Use GPSI connector             */
#define MREGS_PLSCONFIG_DAI         0x04 /* Use DAI connector              */
#define MREGS_PLSCONFIG_TP          0x02 /* Use TwistedPair connector      */
#define MREGS_PLSCONFIG_AUI         0x00 /* Use AUI connector              */
#define MREGS_PLSCONFIG_IOENAB      0x01 /* PLS I/O enable                 */

#define MREGS_PHYCONFIG_LSTAT       0x80 /* Link status                    */
#define MREGS_PHYCONFIG_LTESTDIS    0x40 /* Disable link test logic        */
#define MREGS_PHYCONFIG_RXPOLARITY  0x20 /* RX polarity                    */
#define MREGS_PHYCONFIG_APCDISAB    0x10 /* AutoPolarityCorrect disab      */
#define MREGS_PHYCONFIG_LTENAB      0x08 /* Select low threshold           */
#define MREGS_PHYCONFIG_AUTO        0x04 /* Connector port auto-sel        */
#define MREGS_PHYCONFIG_RWU         0x02 /* Remote WakeUp                  */
#define MREGS_PHYCONFIG_AW          0x01 /* Auto Wakeup                    */

#define MREGS_IACONFIG_ACHNGE       0x80 /* Do address change              */
#define MREGS_IACONFIG_PARESET      0x04 /* Physical address reset         */
#define MREGS_IACONFIG_LARESET      0x02 /* Logical address reset          */

#define MREGS_UTEST_RTRENAB         0x80 /* Enable resv test register      */
#define MREGS_UTEST_RTRDISAB        0x40 /* Disab resv test register       */
#define MREGS_UTEST_RPACCEPT        0x20 /* Accept runt packets            */
#define MREGS_UTEST_FCOLL           0x10 /* Force collision status         */
#define MREGS_UTEST_FCSENAB         0x08 /* Enable FCS on RX               */
#define MREGS_UTEST_INTLOOPM        0x06 /* Intern lpback w/MENDEC         */
#define MREGS_UTEST_INTLOOP         0x04 /* Intern lpback                  */
#define MREGS_UTEST_EXTLOOP         0x02 /* Extern lpback                  */
#define MREGS_UTEST_NOLOOP          0x00 /* No loopback                    */

struct qe_rxd {
	u32 rx_flags;
	u32 rx_addr;
};

#define RXD_OWN      0x80000000 /* Ownership.      */
#define RXD_UPDATE   0x10000000 /* Being Updated?  */
#define RXD_LENGTH   0x000007ff /* Packet Length.  */

struct qe_txd {
	u32 tx_flags;
	u32 tx_addr;
};

#define TXD_OWN      0x80000000 /* Ownership.      */
#define TXD_SOP      0x40000000 /* Start Of Packet */
#define TXD_EOP      0x20000000 /* End Of Packet   */
#define TXD_UPDATE   0x10000000 /* Being Updated?  */
#define TXD_LENGTH   0x000007ff /* Packet Length.  */

#define TX_RING_MAXSIZE   256
#define RX_RING_MAXSIZE   256

#define TX_RING_SIZE      16
#define RX_RING_SIZE      16

#define NEXT_RX(num)       (((num) + 1) & (RX_RING_MAXSIZE - 1))
#define NEXT_TX(num)       (((num) + 1) & (TX_RING_MAXSIZE - 1))
#define PREV_RX(num)       (((num) - 1) & (RX_RING_MAXSIZE - 1))
#define PREV_TX(num)       (((num) - 1) & (TX_RING_MAXSIZE - 1))

#define TX_BUFFS_AVAIL(qp)                                    \
        (((qp)->tx_old <= (qp)->tx_new) ?                     \
	  (qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new :  \
			    (qp)->tx_old - (qp)->tx_new - 1)

struct qe_init_block {
	struct qe_rxd qe_rxd[RX_RING_MAXSIZE];
	struct qe_txd qe_txd[TX_RING_MAXSIZE];
};

#define qib_offset(mem, elem) \
((__u32)((unsigned long)(&(((struct qe_init_block *)0)->mem[elem]))))

struct sunqe;

struct sunqec {
	void __iomem		*gregs;		/* QEC Global Registers         */
	struct sunqe		*qes[4];	/* Each child MACE              */
	unsigned int            qec_bursts;	/* Support burst sizes          */
	struct sbus_dev		*qec_sdev;	/* QEC's SBUS device            */
	struct sunqec		*next_module;	/* List of all QECs in system   */
};

#define PKT_BUF_SZ	1664
#define RXD_PKT_SZ	1664

struct sunqe_buffers {
	u8	tx_buf[TX_RING_SIZE][PKT_BUF_SZ];
	u8	__pad[2];
	u8	rx_buf[RX_RING_SIZE][PKT_BUF_SZ];
};

#define qebuf_offset(mem, elem) \
((__u32)((unsigned long)(&(((struct sunqe_buffers *)0)->mem[elem][0]))))

struct sunqe {
	void __iomem			*qcregs;		/* QEC per-channel Registers   */
	void __iomem			*mregs;		/* Per-channel MACE Registers  */
	struct qe_init_block      	*qe_block;	/* RX and TX descriptors       */
	__u32                      	qblock_dvma;	/* RX and TX descriptors       */
	spinlock_t			lock;		/* Protects txfull state       */
	int                        	rx_new, rx_old;	/* RX ring extents	       */
	int			   	tx_new, tx_old;	/* TX ring extents	       */
	struct sunqe_buffers		*buffers;	/* CPU visible address.        */
	__u32				buffers_dvma;	/* DVMA visible address.       */
	struct sunqec			*parent;
	u8				mconfig;	/* Base MACE mconfig value     */
	struct sbus_dev			*qe_sdev;	/* QE's SBUS device struct     */
	struct net_device		*dev;		/* QE's netdevice struct       */
	int				channel;	/* Who am I?                   */
};

#endif /* !(_SUNQE_H) */
d='n1535' href='#n1535'>1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643
/*
 *  linux/kernel/signal.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
 *
 *  2003-06-02  Jim Houston - Concurrent Computer Corp.
 *		Changes to use preallocated sigqueue structures
 *		to allow signals to be sent reliably.
 */

#include <linux/slab.h>
#include <linux/module.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/ptrace.h>
#include <linux/signal.h>
#include <linux/capability.h>
#include <linux/freezer.h>
#include <linux/pid_namespace.h>
#include <linux/nsproxy.h>

#include <asm/param.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/siginfo.h>
#include "audit.h"	/* audit_signal_info() */

/*
 * SLAB caches for signal bits.
 */

static struct kmem_cache *sigqueue_cachep;

/*
 * In POSIX a signal is sent either to a specific thread (Linux task)
 * or to the process as a whole (Linux thread group).  How the signal
 * is sent determines whether it's to one thread or the whole group,
 * which determines which signal mask(s) are involved in blocking it
 * from being delivered until later.  When the signal is delivered,
 * either it's caught or ignored by a user handler or it has a default
 * effect that applies to the whole thread group (POSIX process).
 *
 * The possible effects an unblocked signal set to SIG_DFL can have are:
 *   ignore	- Nothing Happens
 *   terminate	- kill the process, i.e. all threads in the group,
 * 		  similar to exit_group.  The group leader (only) reports
 *		  WIFSIGNALED status to its parent.
 *   coredump	- write a core dump file describing all threads using
 *		  the same mm and then kill all those threads
 *   stop 	- stop all the threads in the group, i.e. TASK_STOPPED state
 *
 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
 * Other signals when not blocked and set to SIG_DFL behaves as follows.
 * The job control signals also have other special effects.
 *
 *	+--------------------+------------------+
 *	|  POSIX signal      |  default action  |
 *	+--------------------+------------------+
 *	|  SIGHUP            |  terminate	|
 *	|  SIGINT            |	terminate	|
 *	|  SIGQUIT           |	coredump 	|
 *	|  SIGILL            |	coredump 	|
 *	|  SIGTRAP           |	coredump 	|
 *	|  SIGABRT/SIGIOT    |	coredump 	|
 *	|  SIGBUS            |	coredump 	|
 *	|  SIGFPE            |	coredump 	|
 *	|  SIGKILL           |	terminate(+)	|
 *	|  SIGUSR1           |	terminate	|
 *	|  SIGSEGV           |	coredump 	|
 *	|  SIGUSR2           |	terminate	|
 *	|  SIGPIPE           |	terminate	|
 *	|  SIGALRM           |	terminate	|
 *	|  SIGTERM           |	terminate	|
 *	|  SIGCHLD           |	ignore   	|
 *	|  SIGCONT           |	ignore(*)	|
 *	|  SIGSTOP           |	stop(*)(+)  	|
 *	|  SIGTSTP           |	stop(*)  	|
 *	|  SIGTTIN           |	stop(*)  	|
 *	|  SIGTTOU           |	stop(*)  	|
 *	|  SIGURG            |	ignore   	|
 *	|  SIGXCPU           |	coredump 	|
 *	|  SIGXFSZ           |	coredump 	|
 *	|  SIGVTALRM         |	terminate	|
 *	|  SIGPROF           |	terminate	|
 *	|  SIGPOLL/SIGIO     |	terminate	|
 *	|  SIGSYS/SIGUNUSED  |	coredump 	|
 *	|  SIGSTKFLT         |	terminate	|
 *	|  SIGWINCH          |	ignore   	|
 *	|  SIGPWR            |	terminate	|
 *	|  SIGRTMIN-SIGRTMAX |	terminate       |
 *	+--------------------+------------------+
 *	|  non-POSIX signal  |  default action  |
 *	+--------------------+------------------+
 *	|  SIGEMT            |  coredump	|
 *	+--------------------+------------------+
 *
 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
 * (*) Special job control effects:
 * When SIGCONT is sent, it resumes the process (all threads in the group)
 * from TASK_STOPPED state and also clears any pending/queued stop signals
 * (any of those marked with "stop(*)").  This happens regardless of blocking,
 * catching, or ignoring SIGCONT.  When any stop signal is sent, it clears
 * any pending/queued SIGCONT signals; this happens regardless of blocking,
 * catching, or ignored the stop signal, though (except for SIGSTOP) the
 * default action of stopping the process may happen later or never.
 */

#ifdef SIGEMT
#define M_SIGEMT	M(SIGEMT)
#else
#define M_SIGEMT	0
#endif

#if SIGRTMIN > BITS_PER_LONG
#define M(sig) (1ULL << ((sig)-1))
#else
#define M(sig) (1UL << ((sig)-1))
#endif
#define T(sig, mask) (M(sig) & (mask))

#define SIG_KERNEL_ONLY_MASK (\
	M(SIGKILL)   |  M(SIGSTOP)                                   )

#define SIG_KERNEL_STOP_MASK (\
	M(SIGSTOP)   |  M(SIGTSTP)   |  M(SIGTTIN)   |  M(SIGTTOU)   )

#define SIG_KERNEL_COREDUMP_MASK (\
        M(SIGQUIT)   |  M(SIGILL)    |  M(SIGTRAP)   |  M(SIGABRT)   | \
        M(SIGFPE)    |  M(SIGSEGV)   |  M(SIGBUS)    |  M(SIGSYS)    | \
        M(SIGXCPU)   |  M(SIGXFSZ)   |  M_SIGEMT                     )

#define SIG_KERNEL_IGNORE_MASK (\
        M(SIGCONT)   |  M(SIGCHLD)   |  M(SIGWINCH)  |  M(SIGURG)    )

#define sig_kernel_only(sig) \
		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_ONLY_MASK))
#define sig_kernel_coredump(sig) \
		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_COREDUMP_MASK))
#define sig_kernel_ignore(sig) \
		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_IGNORE_MASK))
#define sig_kernel_stop(sig) \
		(((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_STOP_MASK))

#define sig_needs_tasklist(sig)	((sig) == SIGCONT)

#define sig_user_defined(t, signr) \
	(((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) &&	\
	 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))

#define sig_fatal(t, signr) \
	(!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
	 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)

static int sig_ignored(struct task_struct *t, int sig)
{
	void __user * handler;

	/*
	 * Tracers always want to know about signals..
	 */
	if (t->ptrace & PT_PTRACED)
		return 0;

	/*
	 * Blocked signals are never ignored, since the
	 * signal handler may change by the time it is
	 * unblocked.
	 */
	if (sigismember(&t->blocked, sig))
		return 0;

	/* Is it explicitly or implicitly ignored? */
	handler = t->sighand->action[sig-1].sa.sa_handler;
	return   handler == SIG_IGN ||
		(handler == SIG_DFL && sig_kernel_ignore(sig));
}

/*
 * Re-calculate pending state from the set of locally pending
 * signals, globally pending signals, and blocked signals.
 */
static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
{
	unsigned long ready;
	long i;

	switch (_NSIG_WORDS) {
	default:
		for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
			ready |= signal->sig[i] &~ blocked->sig[i];
		break;

	case 4: ready  = signal->sig[3] &~ blocked->sig[3];
		ready |= signal->sig[2] &~ blocked->sig[2];
		ready |= signal->sig[1] &~ blocked->sig[1];
		ready |= signal->sig[0] &~ blocked->sig[0];
		break;

	case 2: ready  = signal->sig[1] &~ blocked->sig[1];
		ready |= signal->sig[0] &~ blocked->sig[0];
		break;

	case 1: ready  = signal->sig[0] &~ blocked->sig[0];
	}
	return ready !=	0;
}

#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))

fastcall void recalc_sigpending_tsk(struct task_struct *t)
{
	if (t->signal->group_stop_count > 0 ||
	    (freezing(t)) ||
	    PENDING(&t->pending, &t->blocked) ||
	    PENDING(&t->signal->shared_pending, &t->blocked))
		set_tsk_thread_flag(t, TIF_SIGPENDING);
	else
		clear_tsk_thread_flag(t, TIF_SIGPENDING);
}

void recalc_sigpending(void)
{
	recalc_sigpending_tsk(current);
}

/* Given the mask, find the first available signal that should be serviced. */

static int
next_signal(struct sigpending *pending, sigset_t *mask)
{
	unsigned long i, *s, *m, x;
	int sig = 0;
	
	s = pending->signal.sig;
	m = mask->sig;
	switch (_NSIG_WORDS) {
	default:
		for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
			if ((x = *s &~ *m) != 0) {
				sig = ffz(~x) + i*_NSIG_BPW + 1;
				break;
			}
		break;

	case 2: if ((x = s[0] &~ m[0]) != 0)
			sig = 1;
		else if ((x = s[1] &~ m[1]) != 0)
			sig = _NSIG_BPW + 1;
		else
			break;
		sig += ffz(~x);
		break;

	case 1: if ((x = *s &~ *m) != 0)
			sig = ffz(~x) + 1;
		break;
	}
	
	return sig;
}

static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
					 int override_rlimit)
{
	struct sigqueue *q = NULL;
	struct user_struct *user;

	/*
	 * In order to avoid problems with "switch_user()", we want to make
	 * sure that the compiler doesn't re-load "t->user"
	 */
	user = t->user;
	barrier();
	atomic_inc(&user->sigpending);
	if (override_rlimit ||
	    atomic_read(&user->sigpending) <=
			t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
		q = kmem_cache_alloc(sigqueue_cachep, flags);
	if (unlikely(q == NULL)) {
		atomic_dec(&user->sigpending);
	} else {
		INIT_LIST_HEAD(&q->list);
		q->flags = 0;
		q->user = get_uid(user);
	}
	return(q);
}

static void __sigqueue_free(struct sigqueue *q)
{
	if (q->flags & SIGQUEUE_PREALLOC)
		return;
	atomic_dec(&q->user->sigpending);
	free_uid(q->user);
	kmem_cache_free(sigqueue_cachep, q);
}

void flush_sigqueue(struct sigpending *queue)
{
	struct sigqueue *q;

	sigemptyset(&queue->signal);
	while (!list_empty(&queue->list)) {
		q = list_entry(queue->list.next, struct sigqueue , list);
		list_del_init(&q->list);
		__sigqueue_free(q);
	}
}

/*
 * Flush all pending signals for a task.
 */
void flush_signals(struct task_struct *t)
{
	unsigned long flags;

	spin_lock_irqsave(&t->sighand->siglock, flags);
	clear_tsk_thread_flag(t,TIF_SIGPENDING);
	flush_sigqueue(&t->pending);
	flush_sigqueue(&t->signal->shared_pending);
	spin_unlock_irqrestore(&t->sighand->siglock, flags);
}

/*
 * Flush all handlers for a task.
 */

void
flush_signal_handlers(struct task_struct *t, int force_default)
{
	int i;
	struct k_sigaction *ka = &t->sighand->action[0];
	for (i = _NSIG ; i != 0 ; i--) {
		if (force_default || ka->sa.sa_handler != SIG_IGN)
			ka->sa.sa_handler = SIG_DFL;
		ka->sa.sa_flags = 0;
		sigemptyset(&ka->sa.sa_mask);
		ka++;
	}
}


/* Notify the system that a driver wants to block all signals for this
 * process, and wants to be notified if any signals at all were to be
 * sent/acted upon.  If the notifier routine returns non-zero, then the
 * signal will be acted upon after all.  If the notifier routine returns 0,
 * then then signal will be blocked.  Only one block per process is
 * allowed.  priv is a pointer to private data that the notifier routine
 * can use to determine if the signal should be blocked or not.  */

void
block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
{
	unsigned long flags;

	spin_lock_irqsave(&current->sighand->siglock, flags);
	current->notifier_mask = mask;
	current->notifier_data = priv;
	current->notifier = notifier;
	spin_unlock_irqrestore(&current->sighand->siglock, flags);
}

/* Notify the system that blocking has ended. */

void
unblock_all_signals(void)
{
	unsigned long flags;

	spin_lock_irqsave(&current->sighand->siglock, flags);
	current->notifier = NULL;
	current->notifier_data = NULL;
	recalc_sigpending();
	spin_unlock_irqrestore(&current->sighand->siglock, flags);
}

static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
{
	struct sigqueue *q, *first = NULL;
	int still_pending = 0;

	if (unlikely(!sigismember(&list->signal, sig)))
		return 0;

	/*
	 * Collect the siginfo appropriate to this signal.  Check if
	 * there is another siginfo for the same signal.
	*/
	list_for_each_entry(q, &list->list, list) {
		if (q->info.si_signo == sig) {
			if (first) {
				still_pending = 1;
				break;
			}
			first = q;
		}
	}
	if (first) {
		list_del_init(&first->list);
		copy_siginfo(info, &first->info);
		__sigqueue_free(first);
		if (!still_pending)
			sigdelset(&list->signal, sig);
	} else {

		/* Ok, it wasn't in the queue.  This must be
		   a fast-pathed signal or we must have been
		   out of queue space.  So zero out the info.
		 */
		sigdelset(&list->signal, sig);
		info->si_signo = sig;
		info->si_errno = 0;
		info->si_code = 0;
		info->si_pid = 0;
		info->si_uid = 0;
	}
	return 1;
}

static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
			siginfo_t *info)
{
	int sig = next_signal(pending, mask);

	if (sig) {
		if (current->notifier) {
			if (sigismember(current->notifier_mask, sig)) {
				if (!(current->notifier)(current->notifier_data)) {
					clear_thread_flag(TIF_SIGPENDING);
					return 0;
				}
			}
		}

		if (!collect_signal(sig, pending, info))
			sig = 0;
	}

	return sig;
}

/*
 * Dequeue a signal and return the element to the caller, which is 
 * expected to free it.
 *
 * All callers have to hold the siglock.
 */
int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
	int signr = __dequeue_signal(&tsk->pending, mask, info);
	if (!signr)
		signr = __dequeue_signal(&tsk->signal->shared_pending,
					 mask, info);
	recalc_sigpending_tsk(tsk);
 	if (signr && unlikely(sig_kernel_stop(signr))) {
 		/*
 		 * Set a marker that we have dequeued a stop signal.  Our
 		 * caller might release the siglock and then the pending
 		 * stop signal it is about to process is no longer in the
 		 * pending bitmasks, but must still be cleared by a SIGCONT
 		 * (and overruled by a SIGKILL).  So those cases clear this
 		 * shared flag after we've set it.  Note that this flag may
 		 * remain set after the signal we return is ignored or
 		 * handled.  That doesn't matter because its only purpose
 		 * is to alert stop-signal processing code when another
 		 * processor has come along and cleared the flag.
 		 */
 		if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
 			tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
 	}
	if ( signr &&
	     ((info->si_code & __SI_MASK) == __SI_TIMER) &&
	     info->si_sys_private){
		/*
		 * Release the siglock to ensure proper locking order
		 * of timer locks outside of siglocks.  Note, we leave
		 * irqs disabled here, since the posix-timers code is
		 * about to disable them again anyway.
		 */
		spin_unlock(&tsk->sighand->siglock);
		do_schedule_next_timer(info);
		spin_lock(&tsk->sighand->siglock);
	}
	return signr;
}

/*
 * Tell a process that it has a new active signal..
 *
 * NOTE! we rely on the previous spin_lock to
 * lock interrupts for us! We can only be called with
 * "siglock" held, and the local interrupt must
 * have been disabled when that got acquired!
 *
 * No need to set need_resched since signal event passing
 * goes through ->blocked
 */
void signal_wake_up(struct task_struct *t, int resume)
{
	unsigned int mask;

	set_tsk_thread_flag(t, TIF_SIGPENDING);

	/*
	 * For SIGKILL, we want to wake it up in the stopped/traced case.
	 * We don't check t->state here because there is a race with it
	 * executing another processor and just now entering stopped state.
	 * By using wake_up_state, we ensure the process will wake up and
	 * handle its death signal.
	 */
	mask = TASK_INTERRUPTIBLE;
	if (resume)
		mask |= TASK_STOPPED | TASK_TRACED;
	if (!wake_up_state(t, mask))
		kick_process(t);
}

/*
 * Remove signals in mask from the pending set and queue.
 * Returns 1 if any signals were found.
 *
 * All callers must be holding the siglock.
 *
 * This version takes a sigset mask and looks at all signals,
 * not just those in the first mask word.
 */
static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
{
	struct sigqueue *q, *n;
	sigset_t m;

	sigandsets(&m, mask, &s->signal);
	if (sigisemptyset(&m))
		return 0;

	signandsets(&s->signal, &s->signal, mask);
	list_for_each_entry_safe(q, n, &s->list, list) {
		if (sigismember(mask, q->info.si_signo)) {
			list_del_init(&q->list);
			__sigqueue_free(q);
		}
	}
	return 1;
}
/*
 * Remove signals in mask from the pending set and queue.
 * Returns 1 if any signals were found.
 *
 * All callers must be holding the siglock.
 */
static int rm_from_queue(unsigned long mask, struct sigpending *s)
{
	struct sigqueue *q, *n;

	if (!sigtestsetmask(&s->signal, mask))
		return 0;

	sigdelsetmask(&s->signal, mask);
	list_for_each_entry_safe(q, n, &s->list, list) {
		if (q->info.si_signo < SIGRTMIN &&
		    (mask & sigmask(q->info.si_signo))) {
			list_del_init(&q->list);
			__sigqueue_free(q);
		}
	}
	return 1;
}

/*
 * Bad permissions for sending the signal
 */
static int check_kill_permission(int sig, struct siginfo *info,
				 struct task_struct *t)
{
	int error = -EINVAL;
	if (!valid_signal(sig))
		return error;
	error = -EPERM;
	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
	    && ((sig != SIGCONT) ||
		(process_session(current) != process_session(t)))
	    && (current->euid ^ t->suid) && (current->euid ^ t->uid)
	    && (current->uid ^ t->suid) && (current->uid ^ t->uid)
	    && !capable(CAP_KILL))
		return error;

	error = security_task_kill(t, info, sig, 0);
	if (!error)
		audit_signal_info(sig, t); /* Let audit system see the signal */
	return error;
}

/* forward decl */
static void do_notify_parent_cldstop(struct task_struct *tsk, int why);

/*
 * Handle magic process-wide effects of stop/continue signals.
 * Unlike the signal actions, these happen immediately at signal-generation
 * time regardless of blocking, ignoring, or handling.  This does the
 * actual continuing for SIGCONT, but not the actual stopping for stop
 * signals.  The process stop is done as a signal action for SIG_DFL.
 */
static void handle_stop_signal(int sig, struct task_struct *p)
{
	struct task_struct *t;

	if (p->signal->flags & SIGNAL_GROUP_EXIT)
		/*
		 * The process is in the middle of dying already.
		 */
		return;

	if (sig_kernel_stop(sig)) {
		/*
		 * This is a stop signal.  Remove SIGCONT from all queues.
		 */
		rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
		t = p;
		do {
			rm_from_queue(sigmask(SIGCONT), &t->pending);
			t = next_thread(t);
		} while (t != p);
	} else if (sig == SIGCONT) {
		/*
		 * Remove all stop signals from all queues,
		 * and wake all threads.
		 */
		if (unlikely(p->signal->group_stop_count > 0)) {
			/*
			 * There was a group stop in progress.  We'll
			 * pretend it finished before we got here.  We are
			 * obliged to report it to the parent: if the
			 * SIGSTOP happened "after" this SIGCONT, then it
			 * would have cleared this pending SIGCONT.  If it
			 * happened "before" this SIGCONT, then the parent
			 * got the SIGCHLD about the stop finishing before
			 * the continue happened.  We do the notification
			 * now, and it's as if the stop had finished and
			 * the SIGCHLD was pending on entry to this kill.
			 */
			p->signal->group_stop_count = 0;
			p->signal->flags = SIGNAL_STOP_CONTINUED;
			spin_unlock(&p->sighand->siglock);
			do_notify_parent_cldstop(p, CLD_STOPPED);
			spin_lock(&p->sighand->siglock);
		}
		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
		t = p;
		do {
			unsigned int state;
			rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
			
			/*
			 * If there is a handler for SIGCONT, we must make
			 * sure that no thread returns to user mode before
			 * we post the signal, in case it was the only
			 * thread eligible to run the signal handler--then
			 * it must not do anything between resuming and
			 * running the handler.  With the TIF_SIGPENDING
			 * flag set, the thread will pause and acquire the
			 * siglock that we hold now and until we've queued
			 * the pending signal. 
			 *
			 * Wake up the stopped thread _after_ setting
			 * TIF_SIGPENDING
			 */
			state = TASK_STOPPED;
			if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
				set_tsk_thread_flag(t, TIF_SIGPENDING);
				state |= TASK_INTERRUPTIBLE;
			}
			wake_up_state(t, state);

			t = next_thread(t);
		} while (t != p);

		if (p->signal->flags & SIGNAL_STOP_STOPPED) {
			/*
			 * We were in fact stopped, and are now continued.
			 * Notify the parent with CLD_CONTINUED.
			 */
			p->signal->flags = SIGNAL_STOP_CONTINUED;
			p->signal->group_exit_code = 0;
			spin_unlock(&p->sighand->siglock);
			do_notify_parent_cldstop(p, CLD_CONTINUED);
			spin_lock(&p->sighand->siglock);
		} else {
			/*
			 * We are not stopped, but there could be a stop
			 * signal in the middle of being processed after
			 * being removed from the queue.  Clear that too.
			 */
			p->signal->flags = 0;
		}
	} else if (sig == SIGKILL) {
		/*
		 * Make sure that any pending stop signal already dequeued
		 * is undone by the wakeup for SIGKILL.
		 */
		p->signal->flags = 0;
	}
}

static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
			struct sigpending *signals)
{
	struct sigqueue * q = NULL;
	int ret = 0;

	/*
	 * fast-pathed signals for kernel-internal things like SIGSTOP
	 * or SIGKILL.
	 */
	if (info == SEND_SIG_FORCED)
		goto out_set;

	/* Real-time signals must be queued if sent by sigqueue, or
	   some other real-time mechanism.  It is implementation
	   defined whether kill() does so.  We attempt to do so, on
	   the principle of least surprise, but since kill is not
	   allowed to fail with EAGAIN when low on memory we just
	   make sure at least one signal gets delivered and don't
	   pass on the info struct.  */

	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
					     (is_si_special(info) ||
					      info->si_code >= 0)));
	if (q) {
		list_add_tail(&q->list, &signals->list);
		switch ((unsigned long) info) {
		case (unsigned long) SEND_SIG_NOINFO:
			q->info.si_signo = sig;
			q->info.si_errno = 0;
			q->info.si_code = SI_USER;
			q->info.si_pid = current->pid;
			q->info.si_uid = current->uid;
			break;
		case (unsigned long) SEND_SIG_PRIV:
			q->info.si_signo = sig;
			q->info.si_errno = 0;
			q->info.si_code = SI_KERNEL;
			q->info.si_pid = 0;
			q->info.si_uid = 0;
			break;
		default:
			copy_siginfo(&q->info, info);
			break;
		}
	} else if (!is_si_special(info)) {
		if (sig >= SIGRTMIN && info->si_code != SI_USER)
		/*
		 * Queue overflow, abort.  We may abort if the signal was rt
		 * and sent by user using something other than kill().
		 */
			return -EAGAIN;
	}

out_set:
	sigaddset(&signals->signal, sig);
	return ret;
}

#define LEGACY_QUEUE(sigptr, sig) \
	(((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))


static int
specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
{
	int ret = 0;

	BUG_ON(!irqs_disabled());
	assert_spin_locked(&t->sighand->siglock);

	/* Short-circuit ignored signals.  */
	if (sig_ignored(t, sig))
		goto out;

	/* Support queueing exactly one non-rt signal, so that we
	   can get more detailed information about the cause of
	   the signal. */
	if (LEGACY_QUEUE(&t->pending, sig))
		goto out;

	ret = send_signal(sig, info, t, &t->pending);
	if (!ret && !sigismember(&t->blocked, sig))
		signal_wake_up(t, sig == SIGKILL);
out:
	return ret;
}

/*
 * Force a signal that the process can't ignore: if necessary
 * we unblock the signal and change any SIG_IGN to SIG_DFL.
 *
 * Note: If we unblock the signal, we always reset it to SIG_DFL,
 * since we do not want to have a signal handler that was blocked
 * be invoked when user space had explicitly blocked it.
 *
 * We don't want to have recursive SIGSEGV's etc, for example.
 */
int
force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
{
	unsigned long int flags;
	int ret, blocked, ignored;
	struct k_sigaction *action;

	spin_lock_irqsave(&t->sighand->siglock, flags);
	action = &t->sighand->action[sig-1];
	ignored = action->sa.sa_handler == SIG_IGN;
	blocked = sigismember(&t->blocked, sig);
	if (blocked || ignored) {
		action->sa.sa_handler = SIG_DFL;
		if (blocked) {
			sigdelset(&t->blocked, sig);
			recalc_sigpending_tsk(t);
		}
	}
	ret = specific_send_sig_info(sig, info, t);
	spin_unlock_irqrestore(&t->sighand->siglock, flags);

	return ret;
}

void
force_sig_specific(int sig, struct task_struct *t)
{
	force_sig_info(sig, SEND_SIG_FORCED, t);
}

/*
 * Test if P wants to take SIG.  After we've checked all threads with this,
 * it's equivalent to finding no threads not blocking SIG.  Any threads not
 * blocking SIG were ruled out because they are not running and already
 * have pending signals.  Such threads will dequeue from the shared queue
 * as soon as they're available, so putting the signal on the shared queue
 * will be equivalent to sending it to one such thread.
 */
static inline int wants_signal(int sig, struct task_struct *p)
{
	if (sigismember(&p->blocked, sig))
		return 0;
	if (p->flags & PF_EXITING)
		return 0;
	if (sig == SIGKILL)
		return 1;
	if (p->state & (TASK_STOPPED | TASK_TRACED))
		return 0;
	return task_curr(p) || !signal_pending(p);
}

static void
__group_complete_signal(int sig, struct task_struct *p)
{
	struct task_struct *t;

	/*
	 * Now find a thread we can wake up to take the signal off the queue.
	 *
	 * If the main thread wants the signal, it gets first crack.
	 * Probably the least surprising to the average bear.
	 */
	if (wants_signal(sig, p))
		t = p;
	else if (thread_group_empty(p))
		/*
		 * There is just one thread and it does not need to be woken.
		 * It will dequeue unblocked signals before it runs again.
		 */
		return;
	else {
		/*
		 * Otherwise try to find a suitable thread.
		 */
		t = p->signal->curr_target;
		if (t == NULL)
			/* restart balancing at this thread */
			t = p->signal->curr_target = p;

		while (!wants_signal(sig, t)) {
			t = next_thread(t);
			if (t == p->signal->curr_target)
				/*
				 * No thread needs to be woken.
				 * Any eligible threads will see
				 * the signal in the queue soon.
				 */
				return;
		}
		p->signal->curr_target = t;
	}

	/*
	 * Found a killable thread.  If the signal will be fatal,
	 * then start taking the whole group down immediately.
	 */
	if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
	    !sigismember(&t->real_blocked, sig) &&
	    (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
		/*
		 * This signal will be fatal to the whole group.
		 */
		if (!sig_kernel_coredump(sig)) {
			/*
			 * Start a group exit and wake everybody up.
			 * This way we don't have other threads
			 * running and doing things after a slower
			 * thread has the fatal signal pending.
			 */
			p->signal->flags = SIGNAL_GROUP_EXIT;
			p->signal->group_exit_code = sig;
			p->signal->group_stop_count = 0;
			t = p;
			do {
				sigaddset(&t->pending.signal, SIGKILL);
				signal_wake_up(t, 1);
				t = next_thread(t);
			} while (t != p);
			return;
		}

		/*
		 * There will be a core dump.  We make all threads other
		 * than the chosen one go into a group stop so that nothing
		 * happens until it gets scheduled, takes the signal off
		 * the shared queue, and does the core dump.  This is a
		 * little more complicated than strictly necessary, but it
		 * keeps the signal state that winds up in the core dump
		 * unchanged from the death state, e.g. which thread had
		 * the core-dump signal unblocked.
		 */
		rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
		rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
		p->signal->group_stop_count = 0;
		p->signal->group_exit_task = t;
		t = p;
		do {
			p->signal->group_stop_count++;
			signal_wake_up(t, 0);
			t = next_thread(t);
		} while (t != p);
		wake_up_process(p->signal->group_exit_task);
		return;
	}

	/*
	 * The signal is already in the shared-pending queue.
	 * Tell the chosen thread to wake up and dequeue it.
	 */
	signal_wake_up(t, sig == SIGKILL);
	return;
}

int
__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
{
	int ret = 0;

	assert_spin_locked(&p->sighand->siglock);
	handle_stop_signal(sig, p);

	/* Short-circuit ignored signals.  */
	if (sig_ignored(p, sig))
		return ret;

	if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
		/* This is a non-RT signal and we already have one queued.  */
		return ret;

	/*
	 * Put this signal on the shared-pending queue, or fail with EAGAIN.
	 * We always use the shared queue for process-wide signals,
	 * to avoid several races.
	 */
	ret = send_signal(sig, info, p, &p->signal->shared_pending);
	if (unlikely(ret))
		return ret;

	__group_complete_signal(sig, p);
	return 0;
}

/*
 * Nuke all other threads in the group.
 */
void zap_other_threads(struct task_struct *p)
{
	struct task_struct *t;

	p->signal->flags = SIGNAL_GROUP_EXIT;
	p->signal->group_stop_count = 0;

	if (thread_group_empty(p))
		return;

	for (t = next_thread(p); t != p; t = next_thread(t)) {
		/*
		 * Don't bother with already dead threads
		 */
		if (t->exit_state)
			continue;

		/*
		 * We don't want to notify the parent, since we are
		 * killed as part of a thread group due to another
		 * thread doing an execve() or similar. So set the
		 * exit signal to -1 to allow immediate reaping of
		 * the process.  But don't detach the thread group
		 * leader.
		 */
		if (t != p->group_leader)
			t->exit_signal = -1;

		/* SIGKILL will be handled before any pending SIGSTOP */
		sigaddset(&t->pending.signal, SIGKILL);
		signal_wake_up(t, 1);
	}
}

/*
 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
 */
struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
{
	struct sighand_struct *sighand;

	for (;;) {
		sighand = rcu_dereference(tsk->sighand);
		if (unlikely(sighand == NULL))
			break;

		spin_lock_irqsave(&sighand->siglock, *flags);
		if (likely(sighand == tsk->sighand))
			break;
		spin_unlock_irqrestore(&sighand->siglock, *flags);
	}

	return sighand;
}

int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
{
	unsigned long flags;
	int ret;

	ret = check_kill_permission(sig, info, p);

	if (!ret && sig) {
		ret = -ESRCH;
		if (lock_task_sighand(p, &flags)) {
			ret = __group_send_sig_info(sig, info, p);
			unlock_task_sighand(p, &flags);
		}
	}

	return ret;
}

/*
 * kill_pgrp_info() sends a signal to a process group: this is what the tty
 * control characters do (^C, ^Z etc)
 */

int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
{
	struct task_struct *p = NULL;
	int retval, success;

	success = 0;
	retval = -ESRCH;
	do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
		int err = group_send_sig_info(sig, info, p);
		success |= !err;
		retval = err;
	} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
	return success ? 0 : retval;
}

int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
{
	int retval;

	read_lock(&tasklist_lock);
	retval = __kill_pgrp_info(sig, info, pgrp);
	read_unlock(&tasklist_lock);

	return retval;
}

int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
{
	if (pgrp <= 0)
		return -EINVAL;

	return __kill_pgrp_info(sig, info, find_pid(pgrp));
}

int
kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
{
	int retval;

	read_lock(&tasklist_lock);
	retval = __kill_pg_info(sig, info, pgrp);
	read_unlock(&tasklist_lock);

	return retval;
}

int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
{
	int error;
	int acquired_tasklist_lock = 0;
	struct task_struct *p;

	rcu_read_lock();
	if (unlikely(sig_needs_tasklist(sig))) {
		read_lock(&tasklist_lock);
		acquired_tasklist_lock = 1;
	}
	p = pid_task(pid, PIDTYPE_PID);
	error = -ESRCH;
	if (p)
		error = group_send_sig_info(sig, info, p);
	if (unlikely(acquired_tasklist_lock))
		read_unlock(&tasklist_lock);
	rcu_read_unlock();
	return error;
}

static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
{
	int error;
	rcu_read_lock();
	error = kill_pid_info(sig, info, find_pid(pid));
	rcu_read_unlock();
	return error;
}

/* like kill_pid_info(), but doesn't use uid/euid of "current" */
int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
		      uid_t uid, uid_t euid, u32 secid)
{
	int ret = -EINVAL;
	struct task_struct *p;

	if (!valid_signal(sig))
		return ret;

	read_lock(&tasklist_lock);
	p = pid_task(pid, PIDTYPE_PID);
	if (!p) {
		ret = -ESRCH;
		goto out_unlock;
	}
	if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
	    && (euid != p->suid) && (euid != p->uid)
	    && (uid != p->suid) && (uid != p->uid)) {
		ret = -EPERM;
		goto out_unlock;
	}
	ret = security_task_kill(p, info, sig, secid);
	if (ret)
		goto out_unlock;
	if (sig && p->sighand) {
		unsigned long flags;
		spin_lock_irqsave(&p->sighand->siglock, flags);
		ret = __group_send_sig_info(sig, info, p);
		spin_unlock_irqrestore(&p->sighand->siglock, flags);
	}
out_unlock:
	read_unlock(&tasklist_lock);
	return ret;
}
EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);

/*
 * kill_something_info() interprets pid in interesting ways just like kill(2).
 *
 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
 * is probably wrong.  Should make it like BSD or SYSV.
 */

static int kill_something_info(int sig, struct siginfo *info, int pid)
{
	if (!pid) {
		return kill_pg_info(sig, info, process_group(current));
	} else if (pid == -1) {
		int retval = 0, count = 0;
		struct task_struct * p;

		read_lock(&tasklist_lock);
		for_each_process(p) {
			if (p->pid > 1 && p->tgid != current->tgid) {
				int err = group_send_sig_info(sig, info, p);
				++count;
				if (err != -EPERM)
					retval = err;
			}
		}
		read_unlock(&tasklist_lock);
		return count ? retval : -ESRCH;