aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/8139cp.c2
-rw-r--r--drivers/net/8139too.c2
-rw-r--r--drivers/net/gianfar.c3
-rw-r--r--drivers/net/phy/lxt.c51
-rw-r--r--drivers/net/r8169.c12
-rw-r--r--drivers/net/wimax/i2400m/fw.c2
-rw-r--r--include/linux/skbuff.h5
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/caif/cfveil.c2
-rw-r--r--net/core/dev.c25
-rw-r--r--net/core/gen_estimator.c15
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/ipv6/icmp.c4
14 files changed, 103 insertions, 28 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 9c149750e2bf..284a5f4a63ac 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -598,8 +598,8 @@ rx_next:
598 goto rx_status_loop; 598 goto rx_status_loop;
599 599
600 spin_lock_irqsave(&cp->lock, flags); 600 spin_lock_irqsave(&cp->lock, flags);
601 cpw16_f(IntrMask, cp_intr_mask);
602 __napi_complete(napi); 601 __napi_complete(napi);
602 cpw16_f(IntrMask, cp_intr_mask);
603 spin_unlock_irqrestore(&cp->lock, flags); 603 spin_unlock_irqrestore(&cp->lock, flags);
604 } 604 }
605 605
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 80cd074d3817..97d8068b372b 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -2089,8 +2089,8 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
2089 * again when we think we are done. 2089 * again when we think we are done.
2090 */ 2090 */
2091 spin_lock_irqsave(&tp->lock, flags); 2091 spin_lock_irqsave(&tp->lock, flags);
2092 RTL_W16_F(IntrMask, rtl8139_intr_mask);
2093 __napi_complete(napi); 2092 __napi_complete(napi);
2093 RTL_W16_F(IntrMask, rtl8139_intr_mask);
2094 spin_unlock_irqrestore(&tp->lock, flags); 2094 spin_unlock_irqrestore(&tp->lock, flags);
2095 } 2095 }
2096 spin_unlock(&tp->rx_lock); 2096 spin_unlock(&tp->rx_lock);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 1830f3199cb5..46c69cd06553 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -747,8 +747,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
747 FSL_GIANFAR_DEV_HAS_CSUM | 747 FSL_GIANFAR_DEV_HAS_CSUM |
748 FSL_GIANFAR_DEV_HAS_VLAN | 748 FSL_GIANFAR_DEV_HAS_VLAN |
749 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 749 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
750 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | 750 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
751 FSL_GIANFAR_DEV_HAS_TIMER;
752 751
753 ctype = of_get_property(np, "phy-connection-type", NULL); 752 ctype = of_get_property(np, "phy-connection-type", NULL);
754 753
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 8ee929b796d8..dbd003453737 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -53,6 +53,9 @@
53 53
54#define MII_LXT971_ISR 19 /* Interrupt Status Register */ 54#define MII_LXT971_ISR 19 /* Interrupt Status Register */
55 55
56/* register definitions for the 973 */
57#define MII_LXT973_PCR 16 /* Port Configuration Register */
58#define PCR_FIBER_SELECT 1
56 59
57MODULE_DESCRIPTION("Intel LXT PHY driver"); 60MODULE_DESCRIPTION("Intel LXT PHY driver");
58MODULE_AUTHOR("Andy Fleming"); 61MODULE_AUTHOR("Andy Fleming");
@@ -119,6 +122,33 @@ static int lxt971_config_intr(struct phy_device *phydev)
119 return err; 122 return err;
120} 123}
121 124
125static int lxt973_probe(struct phy_device *phydev)
126{
127 int val = phy_read(phydev, MII_LXT973_PCR);
128
129 if (val & PCR_FIBER_SELECT) {
130 /*
131 * If fiber is selected, then the only correct setting
132 * is 100Mbps, full duplex, and auto negotiation off.
133 */
134 val = phy_read(phydev, MII_BMCR);
135 val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
136 val &= ~BMCR_ANENABLE;
137 phy_write(phydev, MII_BMCR, val);
138 /* Remember that the port is in fiber mode. */
139 phydev->priv = lxt973_probe;
140 } else {
141 phydev->priv = NULL;
142 }
143 return 0;
144}
145
146static int lxt973_config_aneg(struct phy_device *phydev)
147{
148 /* Do nothing if port is in fiber mode. */
149 return phydev->priv ? 0 : genphy_config_aneg(phydev);
150}
151
122static struct phy_driver lxt970_driver = { 152static struct phy_driver lxt970_driver = {
123 .phy_id = 0x78100000, 153 .phy_id = 0x78100000,
124 .name = "LXT970", 154 .name = "LXT970",
@@ -146,6 +176,18 @@ static struct phy_driver lxt971_driver = {
146 .driver = { .owner = THIS_MODULE,}, 176 .driver = { .owner = THIS_MODULE,},
147}; 177};
148 178
179static struct phy_driver lxt973_driver = {
180 .phy_id = 0x00137a10,
181 .name = "LXT973",
182 .phy_id_mask = 0xfffffff0,
183 .features = PHY_BASIC_FEATURES,
184 .flags = 0,
185 .probe = lxt973_probe,
186 .config_aneg = lxt973_config_aneg,
187 .read_status = genphy_read_status,
188 .driver = { .owner = THIS_MODULE,},
189};
190
149static int __init lxt_init(void) 191static int __init lxt_init(void)
150{ 192{
151 int ret; 193 int ret;
@@ -157,9 +199,15 @@ static int __init lxt_init(void)
157 ret = phy_driver_register(&lxt971_driver); 199 ret = phy_driver_register(&lxt971_driver);
158 if (ret) 200 if (ret)
159 goto err2; 201 goto err2;
202
203 ret = phy_driver_register(&lxt973_driver);
204 if (ret)
205 goto err3;
160 return 0; 206 return 0;
161 207
162 err2: 208 err3:
209 phy_driver_unregister(&lxt971_driver);
210 err2:
163 phy_driver_unregister(&lxt970_driver); 211 phy_driver_unregister(&lxt970_driver);
164 err1: 212 err1:
165 return ret; 213 return ret;
@@ -169,6 +217,7 @@ static void __exit lxt_exit(void)
169{ 217{
170 phy_driver_unregister(&lxt970_driver); 218 phy_driver_unregister(&lxt970_driver);
171 phy_driver_unregister(&lxt971_driver); 219 phy_driver_unregister(&lxt971_driver);
220 phy_driver_unregister(&lxt973_driver);
172} 221}
173 222
174module_init(lxt_init); 223module_init(lxt_init);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 03a8318d90a2..96b6cfbf0a3a 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -560,10 +560,10 @@ static void mdio_write(void __iomem *ioaddr, int reg_addr, int value)
560 udelay(25); 560 udelay(25);
561 } 561 }
562 /* 562 /*
563 * Some configurations require a small delay even after the write 563 * According to hardware specs a 20us delay is required after write
564 * completed indication or the next write might fail. 564 * complete indication, but before sending next command.
565 */ 565 */
566 udelay(25); 566 udelay(20);
567} 567}
568 568
569static int mdio_read(void __iomem *ioaddr, int reg_addr) 569static int mdio_read(void __iomem *ioaddr, int reg_addr)
@@ -583,6 +583,12 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
583 } 583 }
584 udelay(25); 584 udelay(25);
585 } 585 }
586 /*
587 * According to hardware specs a 20us delay is required after read
588 * complete indication, but before sending next command.
589 */
590 udelay(20);
591
586 return value; 592 return value;
587} 593}
588 594
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 3f283bff0ff7..11491354e5b5 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -1192,7 +1192,7 @@ int i2400m_fw_hdr_check(struct i2400m *i2400m,
1192 unsigned module_type, header_len, major_version, minor_version, 1192 unsigned module_type, header_len, major_version, minor_version,
1193 module_id, module_vendor, date, size; 1193 module_id, module_vendor, date, size;
1194 1194
1195 module_type = bcf_hdr->module_type; 1195 module_type = le32_to_cpu(bcf_hdr->module_type);
1196 header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len); 1196 header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len);
1197 major_version = (le32_to_cpu(bcf_hdr->header_version) & 0xffff0000) 1197 major_version = (le32_to_cpu(bcf_hdr->header_version) & 0xffff0000)
1198 >> 16; 1198 >> 16;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index bf243fc54959..f89e7fd59a4c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -380,7 +380,10 @@ struct sk_buff {
380 kmemcheck_bitfield_begin(flags2); 380 kmemcheck_bitfield_begin(flags2);
381 __u16 queue_mapping:16; 381 __u16 queue_mapping:16;
382#ifdef CONFIG_IPV6_NDISC_NODETYPE 382#ifdef CONFIG_IPV6_NDISC_NODETYPE
383 __u8 ndisc_nodetype:2; 383 __u8 ndisc_nodetype:2,
384 deliver_no_wcard:1;
385#else
386 __u8 deliver_no_wcard:1;
384#endif 387#endif
385 kmemcheck_bitfield_end(flags2); 388 kmemcheck_bitfield_end(flags2);
386 389
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index bd537fc10254..50f58f5f1c34 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -12,7 +12,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
12 return NET_RX_DROP; 12 return NET_RX_DROP;
13 13
14 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) 14 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
15 goto drop; 15 skb->deliver_no_wcard = 1;
16 16
17 skb->skb_iif = skb->dev->ifindex; 17 skb->skb_iif = skb->dev->ifindex;
18 __vlan_hwaccel_put_tag(skb, vlan_tci); 18 __vlan_hwaccel_put_tag(skb, vlan_tci);
@@ -84,7 +84,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
84 struct sk_buff *p; 84 struct sk_buff *p;
85 85
86 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) 86 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
87 goto drop; 87 skb->deliver_no_wcard = 1;
88 88
89 skb->skb_iif = skb->dev->ifindex; 89 skb->skb_iif = skb->dev->ifindex;
90 __vlan_hwaccel_put_tag(skb, vlan_tci); 90 __vlan_hwaccel_put_tag(skb, vlan_tci);
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index cd2830fec935..fd27b172fb5d 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -83,7 +83,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
83 if (!cfsrvl_ready(service, &ret)) 83 if (!cfsrvl_ready(service, &ret))
84 return ret; 84 return ret;
85 85
86 if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { 86 if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
87 pr_err("CAIF: %s():Packet too large - size=%d\n", 87 pr_err("CAIF: %s():Packet too large - size=%d\n",
88 __func__, cfpkt_getlen(pkt)); 88 __func__, cfpkt_getlen(pkt));
89 return -EOVERFLOW; 89 return -EOVERFLOW;
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 0fd827f49491..e04f7d964e83 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -84,7 +84,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
84 return ret; 84 return ret;
85 caif_assert(layr->dn != NULL); 85 caif_assert(layr->dn != NULL);
86 caif_assert(layr->dn->transmit != NULL); 86 caif_assert(layr->dn->transmit != NULL);
87 if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { 87 if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
88 pr_warning("CAIF: %s(): Packet too large - size=%d\n", 88 pr_warning("CAIF: %s(): Packet too large - size=%d\n",
89 __func__, cfpkt_getlen(pkt)); 89 __func__, cfpkt_getlen(pkt));
90 return -EOVERFLOW; 90 return -EOVERFLOW;
diff --git a/net/core/dev.c b/net/core/dev.c
index d03470f5260a..2b3bf53bc687 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2253,11 +2253,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2253 if (skb_rx_queue_recorded(skb)) { 2253 if (skb_rx_queue_recorded(skb)) {
2254 u16 index = skb_get_rx_queue(skb); 2254 u16 index = skb_get_rx_queue(skb);
2255 if (unlikely(index >= dev->num_rx_queues)) { 2255 if (unlikely(index >= dev->num_rx_queues)) {
2256 if (net_ratelimit()) { 2256 WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
2257 pr_warning("%s received packet on queue " 2257 "on queue %u, but number of RX queues is %u\n",
2258 "%u, but number of RX queues is %u\n", 2258 dev->name, index, dev->num_rx_queues);
2259 dev->name, index, dev->num_rx_queues);
2260 }
2261 goto done; 2259 goto done;
2262 } 2260 }
2263 rxqueue = dev->_rx + index; 2261 rxqueue = dev->_rx + index;
@@ -2812,13 +2810,24 @@ static int __netif_receive_skb(struct sk_buff *skb)
2812 if (!skb->skb_iif) 2810 if (!skb->skb_iif)
2813 skb->skb_iif = skb->dev->ifindex; 2811 skb->skb_iif = skb->dev->ifindex;
2814 2812
2813 /*
2814 * bonding note: skbs received on inactive slaves should only
2815 * be delivered to pkt handlers that are exact matches. Also
2816 * the deliver_no_wcard flag will be set. If packet handlers
2817 * are sensitive to duplicate packets these skbs will need to
2818 * be dropped at the handler. The vlan accel path may have
2819 * already set the deliver_no_wcard flag.
2820 */
2815 null_or_orig = NULL; 2821 null_or_orig = NULL;
2816 orig_dev = skb->dev; 2822 orig_dev = skb->dev;
2817 master = ACCESS_ONCE(orig_dev->master); 2823 master = ACCESS_ONCE(orig_dev->master);
2818 if (master) { 2824 if (skb->deliver_no_wcard)
2819 if (skb_bond_should_drop(skb, master)) 2825 null_or_orig = orig_dev;
2826 else if (master) {
2827 if (skb_bond_should_drop(skb, master)) {
2828 skb->deliver_no_wcard = 1;
2820 null_or_orig = orig_dev; /* deliver only exact match */ 2829 null_or_orig = orig_dev; /* deliver only exact match */
2821 else 2830 } else
2822 skb->dev = master; 2831 skb->dev = master;
2823 } 2832 }
2824 2833
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index cf8e70392fe0..785e5276a300 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock);
107 107
108/* Protects against soft lockup during large deletion */ 108/* Protects against soft lockup during large deletion */
109static struct rb_root est_root = RB_ROOT; 109static struct rb_root est_root = RB_ROOT;
110static DEFINE_SPINLOCK(est_tree_lock);
110 111
111static void est_timer(unsigned long arg) 112static void est_timer(unsigned long arg)
112{ 113{
@@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
201 * 202 *
202 * Returns 0 on success or a negative error code. 203 * Returns 0 on success or a negative error code.
203 * 204 *
204 * NOTE: Called under rtnl_mutex
205 */ 205 */
206int gen_new_estimator(struct gnet_stats_basic_packed *bstats, 206int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
207 struct gnet_stats_rate_est *rate_est, 207 struct gnet_stats_rate_est *rate_est,
@@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
232 est->last_packets = bstats->packets; 232 est->last_packets = bstats->packets;
233 est->avpps = rate_est->pps<<10; 233 est->avpps = rate_est->pps<<10;
234 234
235 spin_lock(&est_tree_lock);
235 if (!elist[idx].timer.function) { 236 if (!elist[idx].timer.function) {
236 INIT_LIST_HEAD(&elist[idx].list); 237 INIT_LIST_HEAD(&elist[idx].list);
237 setup_timer(&elist[idx].timer, est_timer, idx); 238 setup_timer(&elist[idx].timer, est_timer, idx);
@@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
242 243
243 list_add_rcu(&est->list, &elist[idx].list); 244 list_add_rcu(&est->list, &elist[idx].list);
244 gen_add_node(est); 245 gen_add_node(est);
246 spin_unlock(&est_tree_lock);
245 247
246 return 0; 248 return 0;
247} 249}
@@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head)
261 * 263 *
262 * Removes the rate estimator specified by &bstats and &rate_est. 264 * Removes the rate estimator specified by &bstats and &rate_est.
263 * 265 *
264 * NOTE: Called under rtnl_mutex
265 */ 266 */
266void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, 267void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
267 struct gnet_stats_rate_est *rate_est) 268 struct gnet_stats_rate_est *rate_est)
268{ 269{
269 struct gen_estimator *e; 270 struct gen_estimator *e;
270 271
272 spin_lock(&est_tree_lock);
271 while ((e = gen_find_node(bstats, rate_est))) { 273 while ((e = gen_find_node(bstats, rate_est))) {
272 rb_erase(&e->node, &est_root); 274 rb_erase(&e->node, &est_root);
273 275
@@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
278 list_del_rcu(&e->list); 280 list_del_rcu(&e->list);
279 call_rcu(&e->e_rcu, __gen_kill_estimator); 281 call_rcu(&e->e_rcu, __gen_kill_estimator);
280 } 282 }
283 spin_unlock(&est_tree_lock);
281} 284}
282EXPORT_SYMBOL(gen_kill_estimator); 285EXPORT_SYMBOL(gen_kill_estimator);
283 286
@@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator);
312bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, 315bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
313 const struct gnet_stats_rate_est *rate_est) 316 const struct gnet_stats_rate_est *rate_est)
314{ 317{
318 bool res;
319
315 ASSERT_RTNL(); 320 ASSERT_RTNL();
316 321
317 return gen_find_node(bstats, rate_est) != NULL; 322 spin_lock(&est_tree_lock);
323 res = gen_find_node(bstats, rate_est) != NULL;
324 spin_unlock(&est_tree_lock);
325
326 return res;
318} 327}
319EXPORT_SYMBOL(gen_estimator_active); 328EXPORT_SYMBOL(gen_estimator_active);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2ad68da418df..1dacd7ba8dbb 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2170,7 +2170,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2170 end_time = ktime_now(); 2170 end_time = ktime_now();
2171 2171
2172 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); 2172 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
2173 pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay); 2173 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
2174} 2174}
2175 2175
2176static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) 2176static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index ce7992982557..03e62f94ff8e 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -483,7 +483,7 @@ route_done:
483 np->tclass, NULL, &fl, (struct rt6_info*)dst, 483 np->tclass, NULL, &fl, (struct rt6_info*)dst,
484 MSG_DONTWAIT, np->dontfrag); 484 MSG_DONTWAIT, np->dontfrag);
485 if (err) { 485 if (err) {
486 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); 486 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
487 ip6_flush_pending_frames(sk); 487 ip6_flush_pending_frames(sk);
488 goto out_put; 488 goto out_put;
489 } 489 }
@@ -565,7 +565,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
565 np->dontfrag); 565 np->dontfrag);
566 566
567 if (err) { 567 if (err) {
568 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); 568 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
569 ip6_flush_pending_frames(sk); 569 ip6_flush_pending_frames(sk);
570 goto out_put; 570 goto out_put;
571 } 571 }