aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/futex.c17
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/kexec.c7
-rw-r--r--kernel/power/Kconfig9
-rw-r--r--kernel/power/Makefile2
-rw-r--r--kernel/power/nvs.c (renamed from kernel/power/hibernate_nvs.c)24
-rw-r--r--kernel/power/suspend.c6
-rw-r--r--kernel/sched.c133
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/time/tick-sched.c5
-rw-r--r--kernel/trace/trace_event_perf.c4
11 files changed, 115 insertions, 97 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index e7a35f1039e..6a3a5fa1526 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -429,20 +429,11 @@ static void free_pi_state(struct futex_pi_state *pi_state)
429static struct task_struct * futex_find_get_task(pid_t pid) 429static struct task_struct * futex_find_get_task(pid_t pid)
430{ 430{
431 struct task_struct *p; 431 struct task_struct *p;
432 const struct cred *cred = current_cred(), *pcred;
433 432
434 rcu_read_lock(); 433 rcu_read_lock();
435 p = find_task_by_vpid(pid); 434 p = find_task_by_vpid(pid);
436 if (!p) { 435 if (p)
437 p = ERR_PTR(-ESRCH); 436 get_task_struct(p);
438 } else {
439 pcred = __task_cred(p);
440 if (cred->euid != pcred->euid &&
441 cred->euid != pcred->uid)
442 p = ERR_PTR(-ESRCH);
443 else
444 get_task_struct(p);
445 }
446 437
447 rcu_read_unlock(); 438 rcu_read_unlock();
448 439
@@ -564,8 +555,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
564 if (!pid) 555 if (!pid)
565 return -ESRCH; 556 return -ESRCH;
566 p = futex_find_get_task(pid); 557 p = futex_find_get_task(pid);
567 if (IS_ERR(p)) 558 if (!p)
568 return PTR_ERR(p); 559 return -ESRCH;
569 560
570 /* 561 /*
571 * We need to look at the task state flags to figure out, 562 * We need to look at the task state flags to figure out,
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 3164ba7ce15..e1497481fe8 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -456,6 +456,9 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
456 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ 456 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
457 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); 457 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
458 desc->status |= flags; 458 desc->status |= flags;
459
460 if (chip != desc->chip)
461 irq_chip_set_defaults(desc->chip);
459 } 462 }
460 463
461 return ret; 464 return ret;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 474a84715ea..131b1703936 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1089,9 +1089,10 @@ void crash_kexec(struct pt_regs *regs)
1089 1089
1090size_t crash_get_memory_size(void) 1090size_t crash_get_memory_size(void)
1091{ 1091{
1092 size_t size; 1092 size_t size = 0;
1093 mutex_lock(&kexec_mutex); 1093 mutex_lock(&kexec_mutex);
1094 size = crashk_res.end - crashk_res.start + 1; 1094 if (crashk_res.end != crashk_res.start)
1095 size = crashk_res.end - crashk_res.start + 1;
1095 mutex_unlock(&kexec_mutex); 1096 mutex_unlock(&kexec_mutex);
1096 return size; 1097 return size;
1097} 1098}
@@ -1134,7 +1135,7 @@ int crash_shrink_memory(unsigned long new_size)
1134 1135
1135 free_reserved_phys_range(end, crashk_res.end); 1136 free_reserved_phys_range(end, crashk_res.end);
1136 1137
1137 if (start == end) 1138 if ((start == end) && (crashk_res.parent != NULL))
1138 release_resource(&crashk_res); 1139 release_resource(&crashk_res);
1139 crashk_res.end = end - 1; 1140 crashk_res.end = end - 1;
1140 1141
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 5c36ea9d55d..ca6066a6952 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -99,9 +99,13 @@ config PM_SLEEP_ADVANCED_DEBUG
99 depends on PM_ADVANCED_DEBUG 99 depends on PM_ADVANCED_DEBUG
100 default n 100 default n
101 101
102config SUSPEND_NVS
103 bool
104
102config SUSPEND 105config SUSPEND
103 bool "Suspend to RAM and standby" 106 bool "Suspend to RAM and standby"
104 depends on PM && ARCH_SUSPEND_POSSIBLE 107 depends on PM && ARCH_SUSPEND_POSSIBLE
108 select SUSPEND_NVS if HAS_IOMEM
105 default y 109 default y
106 ---help--- 110 ---help---
107 Allow the system to enter sleep states in which main memory is 111 Allow the system to enter sleep states in which main memory is
@@ -130,13 +134,10 @@ config SUSPEND_FREEZER
130 134
131 Turning OFF this setting is NOT recommended! If in doubt, say Y. 135 Turning OFF this setting is NOT recommended! If in doubt, say Y.
132 136
133config HIBERNATION_NVS
134 bool
135
136config HIBERNATION 137config HIBERNATION
137 bool "Hibernation (aka 'suspend to disk')" 138 bool "Hibernation (aka 'suspend to disk')"
138 depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE 139 depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE
139 select HIBERNATION_NVS if HAS_IOMEM 140 select SUSPEND_NVS if HAS_IOMEM
140 ---help--- 141 ---help---
141 Enable the suspend to disk (STD) functionality, which is usually 142 Enable the suspend to disk (STD) functionality, which is usually
142 called "hibernation" in user interfaces. STD checkpoints the 143 called "hibernation" in user interfaces. STD checkpoints the
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 524e058dcf0..f9063c6b185 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -10,6 +10,6 @@ obj-$(CONFIG_SUSPEND) += suspend.o
10obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o 10obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
11obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ 11obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \
12 block_io.o 12 block_io.o
13obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o 13obj-$(CONFIG_SUSPEND_NVS) += nvs.o
14 14
15obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o 15obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/hibernate_nvs.c b/kernel/power/nvs.c
index fdcad9ed5a7..1836db60bbb 100644
--- a/kernel/power/hibernate_nvs.c
+++ b/kernel/power/nvs.c
@@ -15,7 +15,7 @@
15 15
16/* 16/*
17 * Platforms, like ACPI, may want us to save some memory used by them during 17 * Platforms, like ACPI, may want us to save some memory used by them during
18 * hibernation and to restore the contents of this memory during the subsequent 18 * suspend and to restore the contents of this memory during the subsequent
19 * resume. The code below implements a mechanism allowing us to do that. 19 * resume. The code below implements a mechanism allowing us to do that.
20 */ 20 */
21 21
@@ -30,7 +30,7 @@ struct nvs_page {
30static LIST_HEAD(nvs_list); 30static LIST_HEAD(nvs_list);
31 31
32/** 32/**
33 * hibernate_nvs_register - register platform NVS memory region to save 33 * suspend_nvs_register - register platform NVS memory region to save
34 * @start - physical address of the region 34 * @start - physical address of the region
35 * @size - size of the region 35 * @size - size of the region
36 * 36 *
@@ -38,7 +38,7 @@ static LIST_HEAD(nvs_list);
38 * things so that the data from page-aligned addresses in this region will 38 * things so that the data from page-aligned addresses in this region will
39 * be copied into separate RAM pages. 39 * be copied into separate RAM pages.
40 */ 40 */
41int hibernate_nvs_register(unsigned long start, unsigned long size) 41int suspend_nvs_register(unsigned long start, unsigned long size)
42{ 42{
43 struct nvs_page *entry, *next; 43 struct nvs_page *entry, *next;
44 44
@@ -68,9 +68,9 @@ int hibernate_nvs_register(unsigned long start, unsigned long size)
68} 68}
69 69
70/** 70/**
71 * hibernate_nvs_free - free data pages allocated for saving NVS regions 71 * suspend_nvs_free - free data pages allocated for saving NVS regions
72 */ 72 */
73void hibernate_nvs_free(void) 73void suspend_nvs_free(void)
74{ 74{
75 struct nvs_page *entry; 75 struct nvs_page *entry;
76 76
@@ -86,16 +86,16 @@ void hibernate_nvs_free(void)
86} 86}
87 87
88/** 88/**
89 * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions 89 * suspend_nvs_alloc - allocate memory necessary for saving NVS regions
90 */ 90 */
91int hibernate_nvs_alloc(void) 91int suspend_nvs_alloc(void)
92{ 92{
93 struct nvs_page *entry; 93 struct nvs_page *entry;
94 94
95 list_for_each_entry(entry, &nvs_list, node) { 95 list_for_each_entry(entry, &nvs_list, node) {
96 entry->data = (void *)__get_free_page(GFP_KERNEL); 96 entry->data = (void *)__get_free_page(GFP_KERNEL);
97 if (!entry->data) { 97 if (!entry->data) {
98 hibernate_nvs_free(); 98 suspend_nvs_free();
99 return -ENOMEM; 99 return -ENOMEM;
100 } 100 }
101 } 101 }
@@ -103,9 +103,9 @@ int hibernate_nvs_alloc(void)
103} 103}
104 104
105/** 105/**
106 * hibernate_nvs_save - save NVS memory regions 106 * suspend_nvs_save - save NVS memory regions
107 */ 107 */
108void hibernate_nvs_save(void) 108void suspend_nvs_save(void)
109{ 109{
110 struct nvs_page *entry; 110 struct nvs_page *entry;
111 111
@@ -119,12 +119,12 @@ void hibernate_nvs_save(void)
119} 119}
120 120
121/** 121/**
122 * hibernate_nvs_restore - restore NVS memory regions 122 * suspend_nvs_restore - restore NVS memory regions
123 * 123 *
124 * This function is going to be called with interrupts disabled, so it 124 * This function is going to be called with interrupts disabled, so it
125 * cannot iounmap the virtual addresses used to access the NVS region. 125 * cannot iounmap the virtual addresses used to access the NVS region.
126 */ 126 */
127void hibernate_nvs_restore(void) 127void suspend_nvs_restore(void)
128{ 128{
129 struct nvs_page *entry; 129 struct nvs_page *entry;
130 130
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 56e7dbb8b99..f37cb7dd440 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -16,6 +16,12 @@
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/syscalls.h> 17#include <linux/syscalls.h>
18#include <linux/gfp.h> 18#include <linux/gfp.h>
19#include <linux/io.h>
20#include <linux/kernel.h>
21#include <linux/list.h>
22#include <linux/mm.h>
23#include <linux/slab.h>
24#include <linux/suspend.h>
19 25
20#include "power.h" 26#include "power.h"
21 27
diff --git a/kernel/sched.c b/kernel/sched.c
index f8b8996228d..cb816e36cc8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -306,52 +306,6 @@ static int init_task_group_load = INIT_TASK_GROUP_LOAD;
306 */ 306 */
307struct task_group init_task_group; 307struct task_group init_task_group;
308 308
309/* return group to which a task belongs */
310static inline struct task_group *task_group(struct task_struct *p)
311{
312 struct task_group *tg;
313
314#ifdef CONFIG_CGROUP_SCHED
315 tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
316 struct task_group, css);
317#else
318 tg = &init_task_group;
319#endif
320 return tg;
321}
322
323/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
324static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
325{
326 /*
327 * Strictly speaking this rcu_read_lock() is not needed since the
328 * task_group is tied to the cgroup, which in turn can never go away
329 * as long as there are tasks attached to it.
330 *
331 * However since task_group() uses task_subsys_state() which is an
332 * rcu_dereference() user, this quiets CONFIG_PROVE_RCU.
333 */
334 rcu_read_lock();
335#ifdef CONFIG_FAIR_GROUP_SCHED
336 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
337 p->se.parent = task_group(p)->se[cpu];
338#endif
339
340#ifdef CONFIG_RT_GROUP_SCHED
341 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
342 p->rt.parent = task_group(p)->rt_se[cpu];
343#endif
344 rcu_read_unlock();
345}
346
347#else
348
349static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
350static inline struct task_group *task_group(struct task_struct *p)
351{
352 return NULL;
353}
354
355#endif /* CONFIG_CGROUP_SCHED */ 309#endif /* CONFIG_CGROUP_SCHED */
356 310
357/* CFS-related fields in a runqueue */ 311/* CFS-related fields in a runqueue */
@@ -644,6 +598,49 @@ static inline int cpu_of(struct rq *rq)
644#define cpu_curr(cpu) (cpu_rq(cpu)->curr) 598#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
645#define raw_rq() (&__raw_get_cpu_var(runqueues)) 599#define raw_rq() (&__raw_get_cpu_var(runqueues))
646 600
601#ifdef CONFIG_CGROUP_SCHED
602
603/*
604 * Return the group to which this tasks belongs.
605 *
606 * We use task_subsys_state_check() and extend the RCU verification
607 * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach()
608 * holds that lock for each task it moves into the cgroup. Therefore
609 * by holding that lock, we pin the task to the current cgroup.
610 */
611static inline struct task_group *task_group(struct task_struct *p)
612{
613 struct cgroup_subsys_state *css;
614
615 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
616 lockdep_is_held(&task_rq(p)->lock));
617 return container_of(css, struct task_group, css);
618}
619
620/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
621static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
622{
623#ifdef CONFIG_FAIR_GROUP_SCHED
624 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
625 p->se.parent = task_group(p)->se[cpu];
626#endif
627
628#ifdef CONFIG_RT_GROUP_SCHED
629 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
630 p->rt.parent = task_group(p)->rt_se[cpu];
631#endif
632}
633
634#else /* CONFIG_CGROUP_SCHED */
635
636static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
637static inline struct task_group *task_group(struct task_struct *p)
638{
639 return NULL;
640}
641
642#endif /* CONFIG_CGROUP_SCHED */
643
647inline void update_rq_clock(struct rq *rq) 644inline void update_rq_clock(struct rq *rq)
648{ 645{
649 if (!rq->skip_clock_update) 646 if (!rq->skip_clock_update)
@@ -1257,6 +1254,12 @@ static void sched_avg_update(struct rq *rq)
1257 s64 period = sched_avg_period(); 1254 s64 period = sched_avg_period();
1258 1255
1259 while ((s64)(rq->clock - rq->age_stamp) > period) { 1256 while ((s64)(rq->clock - rq->age_stamp) > period) {
1257 /*
1258 * Inline assembly required to prevent the compiler
1259 * optimising this loop into a divmod call.
1260 * See __iter_div_u64_rem() for another example of this.
1261 */
1262 asm("" : "+rm" (rq->age_stamp));
1260 rq->age_stamp += period; 1263 rq->age_stamp += period;
1261 rq->rt_avg /= 2; 1264 rq->rt_avg /= 2;
1262 } 1265 }
@@ -1660,9 +1663,6 @@ static void update_shares(struct sched_domain *sd)
1660 1663
1661static void update_h_load(long cpu) 1664static void update_h_load(long cpu)
1662{ 1665{
1663 if (root_task_group_empty())
1664 return;
1665
1666 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); 1666 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
1667} 1667}
1668 1668
@@ -2494,7 +2494,16 @@ void sched_fork(struct task_struct *p, int clone_flags)
2494 if (p->sched_class->task_fork) 2494 if (p->sched_class->task_fork)
2495 p->sched_class->task_fork(p); 2495 p->sched_class->task_fork(p);
2496 2496
2497 /*
2498 * The child is not yet in the pid-hash so no cgroup attach races,
2499 * and the cgroup is pinned to this child due to cgroup_fork()
2500 * is ran before sched_fork().
2501 *
2502 * Silence PROVE_RCU.
2503 */
2504 rcu_read_lock();
2497 set_task_cpu(p, cpu); 2505 set_task_cpu(p, cpu);
2506 rcu_read_unlock();
2498 2507
2499#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 2508#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
2500 if (likely(sched_info_on())) 2509 if (likely(sched_info_on()))
@@ -4465,16 +4474,6 @@ recheck:
4465 } 4474 }
4466 4475
4467 if (user) { 4476 if (user) {
4468#ifdef CONFIG_RT_GROUP_SCHED
4469 /*
4470 * Do not allow realtime tasks into groups that have no runtime
4471 * assigned.
4472 */
4473 if (rt_bandwidth_enabled() && rt_policy(policy) &&
4474 task_group(p)->rt_bandwidth.rt_runtime == 0)
4475 return -EPERM;
4476#endif
4477
4478 retval = security_task_setscheduler(p, policy, param); 4477 retval = security_task_setscheduler(p, policy, param);
4479 if (retval) 4478 if (retval)
4480 return retval; 4479 return retval;
@@ -4490,6 +4489,22 @@ recheck:
4490 * runqueue lock must be held. 4489 * runqueue lock must be held.
4491 */ 4490 */
4492 rq = __task_rq_lock(p); 4491 rq = __task_rq_lock(p);
4492
4493#ifdef CONFIG_RT_GROUP_SCHED
4494 if (user) {
4495 /*
4496 * Do not allow realtime tasks into groups that have no runtime
4497 * assigned.
4498 */
4499 if (rt_bandwidth_enabled() && rt_policy(policy) &&
4500 task_group(p)->rt_bandwidth.rt_runtime == 0) {
4501 __task_rq_unlock(rq);
4502 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4503 return -EPERM;
4504 }
4505 }
4506#endif
4507
4493 /* recheck policy now with rq lock held */ 4508 /* recheck policy now with rq lock held */
4494 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 4509 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4495 policy = oldpolicy = -1; 4510 policy = oldpolicy = -1;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index eed35eded60..a878b5332da 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1240,6 +1240,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1240 * effect of the currently running task from the load 1240 * effect of the currently running task from the load
1241 * of the current CPU: 1241 * of the current CPU:
1242 */ 1242 */
1243 rcu_read_lock();
1243 if (sync) { 1244 if (sync) {
1244 tg = task_group(current); 1245 tg = task_group(current);
1245 weight = current->se.load.weight; 1246 weight = current->se.load.weight;
@@ -1275,6 +1276,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1275 balanced = this_eff_load <= prev_eff_load; 1276 balanced = this_eff_load <= prev_eff_load;
1276 } else 1277 } else
1277 balanced = true; 1278 balanced = true;
1279 rcu_read_unlock();
1278 1280
1279 /* 1281 /*
1280 * If the currently running task will sleep within 1282 * If the currently running task will sleep within
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 1d7b9bc1c03..783fbadf220 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -315,9 +315,6 @@ void tick_nohz_stop_sched_tick(int inidle)
315 goto end; 315 goto end;
316 } 316 }
317 317
318 if (nohz_ratelimit(cpu))
319 goto end;
320
321 ts->idle_calls++; 318 ts->idle_calls++;
322 /* Read jiffies and the time when jiffies were updated last */ 319 /* Read jiffies and the time when jiffies were updated last */
323 do { 320 do {
@@ -328,7 +325,7 @@ void tick_nohz_stop_sched_tick(int inidle)
328 } while (read_seqretry(&xtime_lock, seq)); 325 } while (read_seqretry(&xtime_lock, seq));
329 326
330 if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || 327 if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
331 arch_needs_cpu(cpu)) { 328 arch_needs_cpu(cpu) || nohz_ratelimit(cpu)) {
332 next_jiffies = last_jiffies + 1; 329 next_jiffies = last_jiffies + 1;
333 delta_jiffies = 1; 330 delta_jiffies = 1;
334 } else { 331 } else {
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index e6f65887842..8a2b73f7c06 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -96,7 +96,9 @@ int perf_trace_init(struct perf_event *p_event)
96 mutex_lock(&event_mutex); 96 mutex_lock(&event_mutex);
97 list_for_each_entry(tp_event, &ftrace_events, list) { 97 list_for_each_entry(tp_event, &ftrace_events, list) {
98 if (tp_event->event.type == event_id && 98 if (tp_event->event.type == event_id &&
99 tp_event->class && tp_event->class->perf_probe && 99 tp_event->class &&
100 (tp_event->class->perf_probe ||
101 tp_event->class->reg) &&
100 try_module_get(tp_event->mod)) { 102 try_module_get(tp_event->mod)) {
101 ret = perf_trace_event_init(tp_event, p_event); 103 ret = perf_trace_event_init(tp_event, p_event);
102 break; 104 break;