diff options
author | Sjur Braendeland <sjur.brandeland@stericsson.com> | 2010-06-17 02:55:39 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-06-20 22:46:05 -0400 |
commit | a7da1f55a826c621251874e7684c234972fc3216 (patch) | |
tree | e5c66620b9cfe00ed62233665002a0acf5fc1004 /net | |
parent | b1c74247b9e29ae3bfdf133862328c309bc9cf14 (diff) |
caif: Bugfix - RFM must support segmentation.
CAIF Remote File Manager may send or receive more than 4050 bytes.
Due to this The CAIF RFM service have to support segmentation.
Signed-off-by: Sjur Braendeland@stericsson.com
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/caif/caif_socket.c | 4 | ||||
-rw-r--r-- | net/caif/cfcnfg.c | 4 | ||||
-rw-r--r-- | net/caif/cfpkt_skbuff.c | 1 | ||||
-rw-r--r-- | net/caif/cfrfml.c | 314 | ||||
-rw-r--r-- | net/caif/cfsrvl.c | 13 |
5 files changed, 271 insertions, 65 deletions
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 791249316ef3..848ae755cdd6 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -596,10 +596,6 @@ static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock, | |||
596 | 596 | ||
597 | buffer_size = len + CAIF_NEEDED_HEADROOM + CAIF_NEEDED_TAILROOM; | 597 | buffer_size = len + CAIF_NEEDED_HEADROOM + CAIF_NEEDED_TAILROOM; |
598 | 598 | ||
599 | ret = -EMSGSIZE; | ||
600 | if (buffer_size > CAIF_MAX_PAYLOAD_SIZE) | ||
601 | goto err; | ||
602 | |||
603 | timeo = sock_sndtimeo(sk, noblock); | 599 | timeo = sock_sndtimeo(sk, noblock); |
604 | timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk), | 600 | timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk), |
605 | 1, timeo, &ret); | 601 | 1, timeo, &ret); |
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index 7c81974a45c4..cff2dcb9efe4 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #define PHY_NAME_LEN 20 | 22 | #define PHY_NAME_LEN 20 |
23 | 23 | ||
24 | #define container_obj(layr) container_of(layr, struct cfcnfg, layer) | 24 | #define container_obj(layr) container_of(layr, struct cfcnfg, layer) |
25 | #define RFM_FRAGMENT_SIZE 4030 | ||
25 | 26 | ||
26 | /* Information about CAIF physical interfaces held by Config Module in order | 27 | /* Information about CAIF physical interfaces held by Config Module in order |
27 | * to manage physical interfaces | 28 | * to manage physical interfaces |
@@ -328,7 +329,8 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, | |||
328 | servicel = cfdgml_create(channel_id, &phyinfo->dev_info); | 329 | servicel = cfdgml_create(channel_id, &phyinfo->dev_info); |
329 | break; | 330 | break; |
330 | case CFCTRL_SRV_RFM: | 331 | case CFCTRL_SRV_RFM: |
331 | servicel = cfrfml_create(channel_id, &phyinfo->dev_info); | 332 | servicel = cfrfml_create(channel_id, &phyinfo->dev_info, |
333 | RFM_FRAGMENT_SIZE); | ||
332 | break; | 334 | break; |
333 | case CFCTRL_SRV_UTIL: | 335 | case CFCTRL_SRV_UTIL: |
334 | servicel = cfutill_create(channel_id, &phyinfo->dev_info); | 336 | servicel = cfutill_create(channel_id, &phyinfo->dev_info); |
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index a6fdf899741a..318b0f4b416e 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c | |||
@@ -338,7 +338,6 @@ struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, | |||
338 | u16 dstlen; | 338 | u16 dstlen; |
339 | u16 createlen; | 339 | u16 createlen; |
340 | if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) { | 340 | if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) { |
341 | cfpkt_destroy(addpkt); | ||
342 | return dstpkt; | 341 | return dstpkt; |
343 | } | 342 | } |
344 | if (expectlen > addlen) | 343 | if (expectlen > addlen) |
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index 689cbfd0e43d..4b04d25b6a3f 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c | |||
@@ -7,98 +7,304 @@ | |||
7 | #include <linux/stddef.h> | 7 | #include <linux/stddef.h> |
8 | #include <linux/spinlock.h> | 8 | #include <linux/spinlock.h> |
9 | #include <linux/slab.h> | 9 | #include <linux/slab.h> |
10 | #include <linux/unaligned/le_byteshift.h> | ||
10 | #include <net/caif/caif_layer.h> | 11 | #include <net/caif/caif_layer.h> |
11 | #include <net/caif/cfsrvl.h> | 12 | #include <net/caif/cfsrvl.h> |
12 | #include <net/caif/cfpkt.h> | 13 | #include <net/caif/cfpkt.h> |
13 | 14 | ||
14 | #define container_obj(layr) container_of(layr, struct cfsrvl, layer) | 15 | #define container_obj(layr) container_of(layr, struct cfrfml, serv.layer) |
15 | |||
16 | #define RFM_SEGMENTATION_BIT 0x01 | 16 | #define RFM_SEGMENTATION_BIT 0x01 |
17 | #define RFM_PAYLOAD 0x00 | 17 | #define RFM_HEAD_SIZE 7 |
18 | #define RFM_CMD_BIT 0x80 | ||
19 | #define RFM_FLOW_OFF 0x81 | ||
20 | #define RFM_FLOW_ON 0x80 | ||
21 | #define RFM_SET_PIN 0x82 | ||
22 | #define RFM_CTRL_PKT_SIZE 1 | ||
23 | 18 | ||
24 | static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt); | 19 | static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt); |
25 | static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt); | 20 | static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt); |
26 | 21 | ||
27 | struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info) | 22 | struct cfrfml { |
23 | struct cfsrvl serv; | ||
24 | struct cfpkt *incomplete_frm; | ||
25 | int fragment_size; | ||
26 | u8 seghead[6]; | ||
27 | u16 pdu_size; | ||
28 | /* Protects serialized processing of packets */ | ||
29 | spinlock_t sync; | ||
30 | }; | ||
31 | |||
32 | static void cfrfml_release(struct kref *kref) | ||
33 | { | ||
34 | struct cfsrvl *srvl = container_of(kref, struct cfsrvl, ref); | ||
35 | struct cfrfml *rfml = container_obj(&srvl->layer); | ||
36 | |||
37 | if (rfml->incomplete_frm) | ||
38 | cfpkt_destroy(rfml->incomplete_frm); | ||
39 | |||
40 | kfree(srvl); | ||
41 | } | ||
42 | |||
43 | struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info, | ||
44 | int mtu_size) | ||
28 | { | 45 | { |
29 | struct cfsrvl *rfm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); | 46 | int tmp; |
47 | struct cfrfml *this = | ||
48 | kzalloc(sizeof(struct cfrfml), GFP_ATOMIC); | ||
30 | 49 | ||
31 | if (!rfm) { | 50 | if (!this) { |
32 | pr_warning("CAIF: %s(): Out of memory\n", __func__); | 51 | pr_warning("CAIF: %s(): Out of memory\n", __func__); |
33 | return NULL; | 52 | return NULL; |
34 | } | 53 | } |
35 | 54 | ||
36 | caif_assert(offsetof(struct cfsrvl, layer) == 0); | 55 | cfsrvl_init(&this->serv, channel_id, dev_info, false); |
56 | this->serv.release = cfrfml_release; | ||
57 | this->serv.layer.receive = cfrfml_receive; | ||
58 | this->serv.layer.transmit = cfrfml_transmit; | ||
59 | |||
60 | /* Round down to closest multiple of 16 */ | ||
61 | tmp = (mtu_size - RFM_HEAD_SIZE - 6) / 16; | ||
62 | tmp *= 16; | ||
63 | |||
64 | this->fragment_size = tmp; | ||
65 | spin_lock_init(&this->sync); | ||
66 | snprintf(this->serv.layer.name, CAIF_LAYER_NAME_SZ, | ||
67 | "rfm%d", channel_id); | ||
68 | |||
69 | return &this->serv.layer; | ||
70 | } | ||
71 | |||
72 | static struct cfpkt *rfm_append(struct cfrfml *rfml, char *seghead, | ||
73 | struct cfpkt *pkt, int *err) | ||
74 | { | ||
75 | struct cfpkt *tmppkt; | ||
76 | *err = -EPROTO; | ||
77 | /* n-th but not last segment */ | ||
78 | |||
79 | if (cfpkt_extr_head(pkt, seghead, 6) < 0) | ||
80 | return NULL; | ||
81 | |||
82 | /* Verify correct header */ | ||
83 | if (memcmp(seghead, rfml->seghead, 6) != 0) | ||
84 | return NULL; | ||
85 | |||
86 | tmppkt = cfpkt_append(rfml->incomplete_frm, pkt, | ||
87 | rfml->pdu_size + RFM_HEAD_SIZE); | ||
88 | |||
89 | /* If cfpkt_append failes input pkts are not freed */ | ||
90 | *err = -ENOMEM; | ||
91 | if (tmppkt == NULL) | ||
92 | return NULL; | ||
37 | 93 | ||
38 | memset(rfm, 0, sizeof(struct cfsrvl)); | 94 | *err = 0; |
39 | cfsrvl_init(rfm, channel_id, dev_info, false); | 95 | return tmppkt; |
40 | rfm->layer.receive = cfrfml_receive; | ||
41 | rfm->layer.transmit = cfrfml_transmit; | ||
42 | snprintf(rfm->layer.name, CAIF_LAYER_NAME_SZ, "rfm%d", channel_id); | ||
43 | return &rfm->layer; | ||
44 | } | 96 | } |
45 | 97 | ||
46 | static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt) | 98 | static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt) |
47 | { | 99 | { |
48 | u8 tmp; | 100 | u8 tmp; |
49 | bool segmented; | 101 | bool segmented; |
50 | int ret; | 102 | int err; |
103 | u8 seghead[6]; | ||
104 | struct cfrfml *rfml; | ||
105 | struct cfpkt *tmppkt = NULL; | ||
106 | |||
51 | caif_assert(layr->up != NULL); | 107 | caif_assert(layr->up != NULL); |
52 | caif_assert(layr->receive != NULL); | 108 | caif_assert(layr->receive != NULL); |
109 | rfml = container_obj(layr); | ||
110 | spin_lock(&rfml->sync); | ||
111 | |||
112 | err = -EPROTO; | ||
113 | if (cfpkt_extr_head(pkt, &tmp, 1) < 0) | ||
114 | goto out; | ||
115 | segmented = tmp & RFM_SEGMENTATION_BIT; | ||
116 | |||
117 | if (segmented) { | ||
118 | if (rfml->incomplete_frm == NULL) { | ||
119 | /* Initial Segment */ | ||
120 | if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0) | ||
121 | goto out; | ||
122 | |||
123 | rfml->pdu_size = get_unaligned_le16(rfml->seghead+4); | ||
124 | |||
125 | if (cfpkt_erroneous(pkt)) | ||
126 | goto out; | ||
127 | rfml->incomplete_frm = pkt; | ||
128 | pkt = NULL; | ||
129 | } else { | ||
130 | |||
131 | tmppkt = rfm_append(rfml, seghead, pkt, &err); | ||
132 | if (tmppkt == NULL) | ||
133 | goto out; | ||
134 | |||
135 | if (cfpkt_erroneous(tmppkt)) | ||
136 | goto out; | ||
137 | |||
138 | rfml->incomplete_frm = tmppkt; | ||
139 | |||
140 | |||
141 | if (cfpkt_erroneous(tmppkt)) | ||
142 | goto out; | ||
143 | } | ||
144 | err = 0; | ||
145 | goto out; | ||
146 | } | ||
147 | |||
148 | if (rfml->incomplete_frm) { | ||
149 | |||
150 | /* Last Segment */ | ||
151 | tmppkt = rfm_append(rfml, seghead, pkt, &err); | ||
152 | if (tmppkt == NULL) | ||
153 | goto out; | ||
154 | |||
155 | if (cfpkt_erroneous(tmppkt)) | ||
156 | goto out; | ||
157 | |||
158 | rfml->incomplete_frm = NULL; | ||
159 | pkt = tmppkt; | ||
160 | tmppkt = NULL; | ||
161 | |||
162 | /* Verify that length is correct */ | ||
163 | err = EPROTO; | ||
164 | if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1) | ||
165 | goto out; | ||
166 | } | ||
167 | |||
168 | err = rfml->serv.layer.up->receive(rfml->serv.layer.up, pkt); | ||
169 | |||
170 | out: | ||
171 | |||
172 | if (err != 0) { | ||
173 | if (tmppkt) | ||
174 | cfpkt_destroy(tmppkt); | ||
175 | if (pkt) | ||
176 | cfpkt_destroy(pkt); | ||
177 | if (rfml->incomplete_frm) | ||
178 | cfpkt_destroy(rfml->incomplete_frm); | ||
179 | rfml->incomplete_frm = NULL; | ||
180 | |||
181 | pr_info("CAIF: %s(): " | ||
182 | "Connection error %d triggered on RFM link\n", | ||
183 | __func__, err); | ||
184 | |||
185 | /* Trigger connection error upon failure.*/ | ||
186 | layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, | ||
187 | rfml->serv.dev_info.id); | ||
188 | } | ||
189 | spin_unlock(&rfml->sync); | ||
190 | return err; | ||
191 | } | ||
192 | |||
193 | |||
194 | static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt) | ||
195 | { | ||
196 | caif_assert(!cfpkt_getlen(pkt) < rfml->fragment_size); | ||
197 | |||
198 | /* Add info for MUX-layer to route the packet out. */ | ||
199 | cfpkt_info(pkt)->channel_id = rfml->serv.layer.id; | ||
53 | 200 | ||
54 | /* | 201 | /* |
55 | * RFM is taking care of segmentation and stripping of | 202 | * To optimize alignment, we add up the size of CAIF header before |
56 | * segmentation bit. | 203 | * payload. |
57 | */ | 204 | */ |
58 | if (cfpkt_extr_head(pkt, &tmp, 1) < 0) { | 205 | cfpkt_info(pkt)->hdr_len = RFM_HEAD_SIZE; |
59 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | 206 | cfpkt_info(pkt)->dev_info = &rfml->serv.dev_info; |
60 | cfpkt_destroy(pkt); | ||
61 | return -EPROTO; | ||
62 | } | ||
63 | segmented = tmp & RFM_SEGMENTATION_BIT; | ||
64 | caif_assert(!segmented); | ||
65 | 207 | ||
66 | ret = layr->up->receive(layr->up, pkt); | 208 | return rfml->serv.layer.dn->transmit(rfml->serv.layer.dn, pkt); |
67 | return ret; | ||
68 | } | 209 | } |
69 | 210 | ||
70 | static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt) | 211 | static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt) |
71 | { | 212 | { |
72 | u8 tmp = 0; | 213 | int err; |
73 | int ret; | 214 | u8 seg; |
74 | struct cfsrvl *service = container_obj(layr); | 215 | u8 head[6]; |
216 | struct cfpkt *rearpkt = NULL; | ||
217 | struct cfpkt *frontpkt = pkt; | ||
218 | struct cfrfml *rfml = container_obj(layr); | ||
75 | 219 | ||
76 | caif_assert(layr->dn != NULL); | 220 | caif_assert(layr->dn != NULL); |
77 | caif_assert(layr->dn->transmit != NULL); | 221 | caif_assert(layr->dn->transmit != NULL); |
78 | 222 | ||
79 | if (!cfsrvl_ready(service, &ret)) | 223 | if (!cfsrvl_ready(&rfml->serv, &err)) |
80 | return ret; | 224 | return err; |
225 | |||
226 | err = -EPROTO; | ||
227 | if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1) | ||
228 | goto out; | ||
229 | |||
230 | err = 0; | ||
231 | if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE) | ||
232 | err = cfpkt_peek_head(pkt, head, 6); | ||
233 | |||
234 | if (err < 0) | ||
235 | goto out; | ||
236 | |||
237 | while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) { | ||
238 | |||
239 | seg = 1; | ||
240 | err = -EPROTO; | ||
241 | |||
242 | if (cfpkt_add_head(frontpkt, &seg, 1) < 0) | ||
243 | goto out; | ||
244 | /* | ||
245 | * On OOM error cfpkt_split returns NULL. | ||
246 | * | ||
247 | * NOTE: Segmented pdu is not correctly aligned. | ||
248 | * This has negative performance impact. | ||
249 | */ | ||
250 | |||
251 | rearpkt = cfpkt_split(frontpkt, rfml->fragment_size); | ||
252 | if (rearpkt == NULL) | ||
253 | goto out; | ||
254 | |||
255 | err = cfrfml_transmit_segment(rfml, frontpkt); | ||
256 | |||
257 | if (err != 0) | ||
258 | goto out; | ||
259 | frontpkt = rearpkt; | ||
260 | rearpkt = NULL; | ||
261 | |||
262 | err = -ENOMEM; | ||
263 | if (frontpkt == NULL) | ||
264 | goto out; | ||
265 | err = -EPROTO; | ||
266 | if (cfpkt_add_head(frontpkt, head, 6) < 0) | ||
267 | goto out; | ||
81 | 268 | ||
82 | if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | ||
83 | pr_err("CAIF: %s():Packet too large - size=%d\n", | ||
84 | __func__, cfpkt_getlen(pkt)); | ||
85 | return -EOVERFLOW; | ||
86 | } | 269 | } |
87 | if (cfpkt_add_head(pkt, &tmp, 1) < 0) { | 270 | |
88 | pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); | 271 | seg = 0; |
89 | return -EPROTO; | 272 | err = -EPROTO; |
273 | |||
274 | if (cfpkt_add_head(frontpkt, &seg, 1) < 0) | ||
275 | goto out; | ||
276 | |||
277 | err = cfrfml_transmit_segment(rfml, frontpkt); | ||
278 | |||
279 | frontpkt = NULL; | ||
280 | out: | ||
281 | |||
282 | if (err != 0) { | ||
283 | pr_info("CAIF: %s(): " | ||
284 | "Connection error %d triggered on RFM link\n", | ||
285 | __func__, err); | ||
286 | /* Trigger connection error upon failure.*/ | ||
287 | |||
288 | layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, | ||
289 | rfml->serv.dev_info.id); | ||
290 | |||
291 | if (rearpkt) | ||
292 | cfpkt_destroy(rearpkt); | ||
293 | |||
294 | if (frontpkt && frontpkt != pkt) { | ||
295 | |||
296 | cfpkt_destroy(frontpkt); | ||
297 | /* | ||
298 | * Socket layer will free the original packet, | ||
299 | * but this packet may already be sent and | ||
300 | * freed. So we have to return 0 in this case | ||
301 | * to avoid socket layer to re-free this packet. | ||
302 | * The return of shutdown indication will | ||
303 | * cause connection to be invalidated anyhow. | ||
304 | */ | ||
305 | err = 0; | ||
306 | } | ||
90 | } | 307 | } |
91 | 308 | ||
92 | /* Add info for MUX-layer to route the packet out. */ | 309 | return err; |
93 | cfpkt_info(pkt)->channel_id = service->layer.id; | ||
94 | /* | ||
95 | * To optimize alignment, we add up the size of CAIF header before | ||
96 | * payload. | ||
97 | */ | ||
98 | cfpkt_info(pkt)->hdr_len = 1; | ||
99 | cfpkt_info(pkt)->dev_info = &service->dev_info; | ||
100 | ret = layr->dn->transmit(layr->dn, pkt); | ||
101 | if (ret < 0) | ||
102 | cfpkt_extr_head(pkt, &tmp, 1); | ||
103 | return ret; | ||
104 | } | 310 | } |
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c index 7aa1f03a0151..4c9f147c38af 100644 --- a/net/caif/cfsrvl.c +++ b/net/caif/cfsrvl.c | |||
@@ -159,6 +159,13 @@ void cfservl_destroy(struct cflayer *layer) | |||
159 | kfree(layer); | 159 | kfree(layer); |
160 | } | 160 | } |
161 | 161 | ||
162 | void cfsrvl_release(struct kref *kref) | ||
163 | { | ||
164 | struct cfsrvl *service = container_of(kref, struct cfsrvl, ref); | ||
165 | pr_info("CAIF: %s(): enter\n", __func__); | ||
166 | kfree(service); | ||
167 | } | ||
168 | |||
162 | void cfsrvl_init(struct cfsrvl *service, | 169 | void cfsrvl_init(struct cfsrvl *service, |
163 | u8 channel_id, | 170 | u8 channel_id, |
164 | struct dev_info *dev_info, | 171 | struct dev_info *dev_info, |
@@ -174,14 +181,10 @@ void cfsrvl_init(struct cfsrvl *service, | |||
174 | service->layer.modemcmd = cfservl_modemcmd; | 181 | service->layer.modemcmd = cfservl_modemcmd; |
175 | service->dev_info = *dev_info; | 182 | service->dev_info = *dev_info; |
176 | service->supports_flowctrl = supports_flowctrl; | 183 | service->supports_flowctrl = supports_flowctrl; |
184 | service->release = cfsrvl_release; | ||
177 | kref_init(&service->ref); | 185 | kref_init(&service->ref); |
178 | } | 186 | } |
179 | 187 | ||
180 | void cfsrvl_release(struct kref *kref) | ||
181 | { | ||
182 | struct cfsrvl *service = container_of(kref, struct cfsrvl, ref); | ||
183 | kfree(service); | ||
184 | } | ||
185 | 188 | ||
186 | bool cfsrvl_ready(struct cfsrvl *service, int *err) | 189 | bool cfsrvl_ready(struct cfsrvl *service, int *err) |
187 | { | 190 | { |