aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile2
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c1
-rw-r--r--arch/x86/kernel/traps_32.c7
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/dm-crypt.c31
-rw-r--r--drivers/md/dm-ioctl.c12
-rw-r--r--drivers/md/dm-table.c16
-rw-r--r--drivers/md/dm.c31
-rw-r--r--drivers/md/dm.h7
-rw-r--r--fs/binfmt_aout.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c4
-rw-r--r--include/linux/device-mapper.h1
-rw-r--r--kernel/panic.c18
-rw-r--r--kernel/sched_rt.c2
-rw-r--r--mm/slub.c4
16 files changed, 97 insertions, 43 deletions
diff --git a/Makefile b/Makefile
index c1825aab77e8..fbb8dfc063d3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 24 3SUBLEVEL = 24
4EXTRAVERSION = -rc5 4EXTRAVERSION = -rc6
5NAME = Arr Matey! A Hairy Bilge Rat! 5NAME = Arr Matey! A Hairy Bilge Rat!
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 606fe4d55a91..9f530ff43c21 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -49,6 +49,7 @@ static struct _cache_table cache_table[] __cpuinitdata =
49 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 49 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
50 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */ 50 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
51 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 51 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
52 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
52 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */ 53 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
53 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */ 54 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
54 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */ 55 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index ef6010262597..c88bbffcaa03 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -373,14 +373,13 @@ void die(const char * str, struct pt_regs * regs, long err)
373 373
374 if (die.lock_owner != raw_smp_processor_id()) { 374 if (die.lock_owner != raw_smp_processor_id()) {
375 console_verbose(); 375 console_verbose();
376 raw_local_irq_save(flags);
376 __raw_spin_lock(&die.lock); 377 __raw_spin_lock(&die.lock);
377 raw_local_save_flags(flags);
378 die.lock_owner = smp_processor_id(); 378 die.lock_owner = smp_processor_id();
379 die.lock_owner_depth = 0; 379 die.lock_owner_depth = 0;
380 bust_spinlocks(1); 380 bust_spinlocks(1);
381 } 381 } else
382 else 382 raw_local_irq_save(flags);
383 raw_local_save_flags(flags);
384 383
385 if (++die.lock_owner_depth < 3) { 384 if (++die.lock_owner_depth < 3) {
386 unsigned long esp; 385 unsigned long esp;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 9b6fbf044fd8..3fa7c77d9bd9 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -269,7 +269,7 @@ config DM_MULTIPATH_RDAC
269 269
270config DM_MULTIPATH_HP 270config DM_MULTIPATH_HP
271 tristate "HP MSA multipath support (EXPERIMENTAL)" 271 tristate "HP MSA multipath support (EXPERIMENTAL)"
272 depends on DM_MULTIPATH && BLK_DEV_DM && EXPERIMENTAL 272 depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL
273 ---help--- 273 ---help---
274 Multipath support for HP MSA (Active/Passive) series hardware. 274 Multipath support for HP MSA (Active/Passive) series hardware.
275 275
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 28c6ae095c56..6b66ee46b87d 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -398,7 +398,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
398 struct bio *clone; 398 struct bio *clone;
399 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 399 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
400 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 400 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
401 unsigned int i; 401 unsigned i, len;
402 struct page *page;
402 403
403 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); 404 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
404 if (!clone) 405 if (!clone)
@@ -407,10 +408,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
407 clone_init(io, clone); 408 clone_init(io, clone);
408 409
409 for (i = 0; i < nr_iovecs; i++) { 410 for (i = 0; i < nr_iovecs; i++) {
410 struct bio_vec *bv = bio_iovec_idx(clone, i); 411 page = mempool_alloc(cc->page_pool, gfp_mask);
411 412 if (!page)
412 bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
413 if (!bv->bv_page)
414 break; 413 break;
415 414
416 /* 415 /*
@@ -421,15 +420,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
421 if (i == (MIN_BIO_PAGES - 1)) 420 if (i == (MIN_BIO_PAGES - 1))
422 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; 421 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
423 422
424 bv->bv_offset = 0; 423 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
425 if (size > PAGE_SIZE) 424
426 bv->bv_len = PAGE_SIZE; 425 if (!bio_add_page(clone, page, len, 0)) {
427 else 426 mempool_free(page, cc->page_pool);
428 bv->bv_len = size; 427 break;
428 }
429 429
430 clone->bi_size += bv->bv_len; 430 size -= len;
431 clone->bi_vcnt++;
432 size -= bv->bv_len;
433 } 431 }
434 432
435 if (!clone->bi_size) { 433 if (!clone->bi_size) {
@@ -511,6 +509,9 @@ static void crypt_endio(struct bio *clone, int error)
511 struct crypt_config *cc = io->target->private; 509 struct crypt_config *cc = io->target->private;
512 unsigned read_io = bio_data_dir(clone) == READ; 510 unsigned read_io = bio_data_dir(clone) == READ;
513 511
512 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
513 error = -EIO;
514
514 /* 515 /*
515 * free the processed pages 516 * free the processed pages
516 */ 517 */
@@ -519,10 +520,8 @@ static void crypt_endio(struct bio *clone, int error)
519 goto out; 520 goto out;
520 } 521 }
521 522
522 if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) { 523 if (unlikely(error))
523 error = -EIO;
524 goto out; 524 goto out;
525 }
526 525
527 bio_put(clone); 526 bio_put(clone);
528 kcryptd_queue_crypt(io); 527 kcryptd_queue_crypt(io);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 138200bf5e0b..9627fa0f9470 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -332,6 +332,8 @@ static int dm_hash_rename(const char *old, const char *new)
332 dm_table_put(table); 332 dm_table_put(table);
333 } 333 }
334 334
335 dm_kobject_uevent(hc->md);
336
335 dm_put(hc->md); 337 dm_put(hc->md);
336 up_write(&_hash_lock); 338 up_write(&_hash_lock);
337 kfree(old_name); 339 kfree(old_name);
@@ -1250,21 +1252,17 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
1250 if (!table) 1252 if (!table)
1251 goto out_argv; 1253 goto out_argv;
1252 1254
1253 if (tmsg->sector >= dm_table_get_size(table)) { 1255 ti = dm_table_find_target(table, tmsg->sector);
1256 if (!dm_target_is_valid(ti)) {
1254 DMWARN("Target message sector outside device."); 1257 DMWARN("Target message sector outside device.");
1255 r = -EINVAL; 1258 r = -EINVAL;
1256 goto out_table; 1259 } else if (ti->type->message)
1257 }
1258
1259 ti = dm_table_find_target(table, tmsg->sector);
1260 if (ti->type->message)
1261 r = ti->type->message(ti, argc, argv); 1260 r = ti->type->message(ti, argc, argv);
1262 else { 1261 else {
1263 DMWARN("Target type does not support messages"); 1262 DMWARN("Target type does not support messages");
1264 r = -EINVAL; 1263 r = -EINVAL;
1265 } 1264 }
1266 1265
1267 out_table:
1268 dm_table_put(table); 1266 dm_table_put(table);
1269 out_argv: 1267 out_argv:
1270 kfree(argv); 1268 kfree(argv);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index e298d8d11f24..47818d8249cb 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -99,6 +99,9 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
99 lhs->max_segment_size = 99 lhs->max_segment_size =
100 min_not_zero(lhs->max_segment_size, rhs->max_segment_size); 100 min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
101 101
102 lhs->max_hw_sectors =
103 min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors);
104
102 lhs->seg_boundary_mask = 105 lhs->seg_boundary_mask =
103 min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); 106 min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
104 107
@@ -189,8 +192,10 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
189 192
190 /* 193 /*
191 * Allocate both the target array and offset array at once. 194 * Allocate both the target array and offset array at once.
195 * Append an empty entry to catch sectors beyond the end of
196 * the device.
192 */ 197 */
193 n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) + 198 n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
194 sizeof(sector_t)); 199 sizeof(sector_t));
195 if (!n_highs) 200 if (!n_highs)
196 return -ENOMEM; 201 return -ENOMEM;
@@ -564,6 +569,9 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
564 rs->max_segment_size = 569 rs->max_segment_size =
565 min_not_zero(rs->max_segment_size, q->max_segment_size); 570 min_not_zero(rs->max_segment_size, q->max_segment_size);
566 571
572 rs->max_hw_sectors =
573 min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);
574
567 rs->seg_boundary_mask = 575 rs->seg_boundary_mask =
568 min_not_zero(rs->seg_boundary_mask, 576 min_not_zero(rs->seg_boundary_mask,
569 q->seg_boundary_mask); 577 q->seg_boundary_mask);
@@ -701,6 +709,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
701{ 709{
702 if (!rs->max_sectors) 710 if (!rs->max_sectors)
703 rs->max_sectors = SAFE_MAX_SECTORS; 711 rs->max_sectors = SAFE_MAX_SECTORS;
712 if (!rs->max_hw_sectors)
713 rs->max_hw_sectors = SAFE_MAX_SECTORS;
704 if (!rs->max_phys_segments) 714 if (!rs->max_phys_segments)
705 rs->max_phys_segments = MAX_PHYS_SEGMENTS; 715 rs->max_phys_segments = MAX_PHYS_SEGMENTS;
706 if (!rs->max_hw_segments) 716 if (!rs->max_hw_segments)
@@ -867,6 +877,9 @@ struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
867 877
868/* 878/*
869 * Search the btree for the correct target. 879 * Search the btree for the correct target.
880 *
881 * Caller should check returned pointer with dm_target_is_valid()
882 * to trap I/O beyond end of device.
870 */ 883 */
871struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) 884struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
872{ 885{
@@ -896,6 +909,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
896 q->max_hw_segments = t->limits.max_hw_segments; 909 q->max_hw_segments = t->limits.max_hw_segments;
897 q->hardsect_size = t->limits.hardsect_size; 910 q->hardsect_size = t->limits.hardsect_size;
898 q->max_segment_size = t->limits.max_segment_size; 911 q->max_segment_size = t->limits.max_segment_size;
912 q->max_hw_sectors = t->limits.max_hw_sectors;
899 q->seg_boundary_mask = t->limits.seg_boundary_mask; 913 q->seg_boundary_mask = t->limits.seg_boundary_mask;
900 q->bounce_pfn = t->limits.bounce_pfn; 914 q->bounce_pfn = t->limits.bounce_pfn;
901 if (t->limits.no_cluster) 915 if (t->limits.no_cluster)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 07cbbb8eb3e0..88c0fd657825 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -672,13 +672,19 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
672 return clone; 672 return clone;
673} 673}
674 674
675static void __clone_and_map(struct clone_info *ci) 675static int __clone_and_map(struct clone_info *ci)
676{ 676{
677 struct bio *clone, *bio = ci->bio; 677 struct bio *clone, *bio = ci->bio;
678 struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); 678 struct dm_target *ti;
679 sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); 679 sector_t len = 0, max;
680 struct dm_target_io *tio; 680 struct dm_target_io *tio;
681 681
682 ti = dm_table_find_target(ci->map, ci->sector);
683 if (!dm_target_is_valid(ti))
684 return -EIO;
685
686 max = max_io_len(ci->md, ci->sector, ti);
687
682 /* 688 /*
683 * Allocate a target io object. 689 * Allocate a target io object.
684 */ 690 */
@@ -736,6 +742,9 @@ static void __clone_and_map(struct clone_info *ci)
736 do { 742 do {
737 if (offset) { 743 if (offset) {
738 ti = dm_table_find_target(ci->map, ci->sector); 744 ti = dm_table_find_target(ci->map, ci->sector);
745 if (!dm_target_is_valid(ti))
746 return -EIO;
747
739 max = max_io_len(ci->md, ci->sector, ti); 748 max = max_io_len(ci->md, ci->sector, ti);
740 749
741 tio = alloc_tio(ci->md); 750 tio = alloc_tio(ci->md);
@@ -759,6 +768,8 @@ static void __clone_and_map(struct clone_info *ci)
759 768
760 ci->idx++; 769 ci->idx++;
761 } 770 }
771
772 return 0;
762} 773}
763 774
764/* 775/*
@@ -767,6 +778,7 @@ static void __clone_and_map(struct clone_info *ci)
767static int __split_bio(struct mapped_device *md, struct bio *bio) 778static int __split_bio(struct mapped_device *md, struct bio *bio)
768{ 779{
769 struct clone_info ci; 780 struct clone_info ci;
781 int error = 0;
770 782
771 ci.map = dm_get_table(md); 783 ci.map = dm_get_table(md);
772 if (unlikely(!ci.map)) 784 if (unlikely(!ci.map))
@@ -784,11 +796,11 @@ static int __split_bio(struct mapped_device *md, struct bio *bio)
784 ci.idx = bio->bi_idx; 796 ci.idx = bio->bi_idx;
785 797
786 start_io_acct(ci.io); 798 start_io_acct(ci.io);
787 while (ci.sector_count) 799 while (ci.sector_count && !error)
788 __clone_and_map(&ci); 800 error = __clone_and_map(&ci);
789 801
790 /* drop the extra reference count */ 802 /* drop the extra reference count */
791 dec_pending(ci.io, 0); 803 dec_pending(ci.io, error);
792 dm_table_put(ci.map); 804 dm_table_put(ci.map);
793 805
794 return 0; 806 return 0;
@@ -1502,7 +1514,7 @@ int dm_resume(struct mapped_device *md)
1502 1514
1503 dm_table_unplug_all(map); 1515 dm_table_unplug_all(map);
1504 1516
1505 kobject_uevent(&md->disk->kobj, KOBJ_CHANGE); 1517 dm_kobject_uevent(md);
1506 1518
1507 r = 0; 1519 r = 0;
1508 1520
@@ -1516,6 +1528,11 @@ out:
1516/*----------------------------------------------------------------- 1528/*-----------------------------------------------------------------
1517 * Event notification. 1529 * Event notification.
1518 *---------------------------------------------------------------*/ 1530 *---------------------------------------------------------------*/
1531void dm_kobject_uevent(struct mapped_device *md)
1532{
1533 kobject_uevent(&md->disk->kobj, KOBJ_CHANGE);
1534}
1535
1519uint32_t dm_next_uevent_seq(struct mapped_device *md) 1536uint32_t dm_next_uevent_seq(struct mapped_device *md)
1520{ 1537{
1521 return atomic_add_return(1, &md->uevent_seq); 1538 return atomic_add_return(1, &md->uevent_seq);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 4b3faa45277e..b4584a39383b 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -112,6 +112,11 @@ int dm_table_resume_targets(struct dm_table *t);
112int dm_table_any_congested(struct dm_table *t, int bdi_bits); 112int dm_table_any_congested(struct dm_table *t, int bdi_bits);
113void dm_table_unplug_all(struct dm_table *t); 113void dm_table_unplug_all(struct dm_table *t);
114 114
115/*
116 * To check the return value from dm_table_find_target().
117 */
118#define dm_target_is_valid(t) ((t)->table)
119
115/*----------------------------------------------------------------- 120/*-----------------------------------------------------------------
116 * A registry of target types. 121 * A registry of target types.
117 *---------------------------------------------------------------*/ 122 *---------------------------------------------------------------*/
@@ -182,4 +187,6 @@ union map_info *dm_get_mapinfo(struct bio *bio);
182int dm_open_count(struct mapped_device *md); 187int dm_open_count(struct mapped_device *md);
183int dm_lock_for_deletion(struct mapped_device *md); 188int dm_lock_for_deletion(struct mapped_device *md);
184 189
190void dm_kobject_uevent(struct mapped_device *md);
191
185#endif 192#endif
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index e176d195e7e5..7596e1e94cde 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -319,7 +319,6 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
319 current->mm->free_area_cache = current->mm->mmap_base; 319 current->mm->free_area_cache = current->mm->mmap_base;
320 current->mm->cached_hole_size = 0; 320 current->mm->cached_hole_size = 0;
321 321
322 current->mm->mmap = NULL;
323 compute_creds(bprm); 322 compute_creds(bprm);
324 current->flags &= ~PF_FORKNOEXEC; 323 current->flags &= ~PF_FORKNOEXEC;
325#ifdef __sparc__ 324#ifdef __sparc__
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index e1fcef2eb928..4847eb83fc18 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -347,6 +347,7 @@ xfs_file_readdir(
347 347
348 size = buf.used; 348 size = buf.used;
349 de = (struct hack_dirent *)buf.dirent; 349 de = (struct hack_dirent *)buf.dirent;
350 curr_offset = de->offset /* & 0x7fffffff */;
350 while (size > 0) { 351 while (size > 0) {
351 if (filldir(dirent, de->name, de->namlen, 352 if (filldir(dirent, de->name, de->namlen,
352 curr_offset & 0x7fffffff, 353 curr_offset & 0x7fffffff,
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 37e116779eb1..5e8bb7f71b5a 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -332,9 +332,7 @@ xfs_vn_mknod(
332 ASSERT(vp); 332 ASSERT(vp);
333 ip = vn_to_inode(vp); 333 ip = vn_to_inode(vp);
334 334
335 if (S_ISCHR(mode) || S_ISBLK(mode)) 335 if (S_ISDIR(mode))
336 ip->i_rdev = rdev;
337 else if (S_ISDIR(mode))
338 xfs_validate_fields(ip); 336 xfs_validate_fields(ip);
339 d_instantiate(dentry, ip); 337 d_instantiate(dentry, ip);
340 xfs_validate_fields(dir); 338 xfs_validate_fields(dir);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index b8b7c51389fe..e765e191663d 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -115,6 +115,7 @@ struct io_restrictions {
115 unsigned short max_hw_segments; 115 unsigned short max_hw_segments;
116 unsigned short hardsect_size; 116 unsigned short hardsect_size;
117 unsigned int max_segment_size; 117 unsigned int max_segment_size;
118 unsigned int max_hw_sectors;
118 unsigned long seg_boundary_mask; 119 unsigned long seg_boundary_mask;
119 unsigned long bounce_pfn; 120 unsigned long bounce_pfn;
120 unsigned char no_cluster; /* inverted so that 0 is default */ 121 unsigned char no_cluster; /* inverted so that 0 is default */
diff --git a/kernel/panic.c b/kernel/panic.c
index 6f6e03e91595..da4d6bac270e 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -19,6 +19,7 @@
19#include <linux/nmi.h> 19#include <linux/nmi.h>
20#include <linux/kexec.h> 20#include <linux/kexec.h>
21#include <linux/debug_locks.h> 21#include <linux/debug_locks.h>
22#include <linux/random.h>
22 23
23int panic_on_oops; 24int panic_on_oops;
24int tainted; 25int tainted;
@@ -266,12 +267,29 @@ void oops_enter(void)
266} 267}
267 268
268/* 269/*
270 * 64-bit random ID for oopses:
271 */
272static u64 oops_id;
273
274static int init_oops_id(void)
275{
276 if (!oops_id)
277 get_random_bytes(&oops_id, sizeof(oops_id));
278
279 return 0;
280}
281late_initcall(init_oops_id);
282
283/*
269 * Called when the architecture exits its oops handler, after printing 284 * Called when the architecture exits its oops handler, after printing
270 * everything. 285 * everything.
271 */ 286 */
272void oops_exit(void) 287void oops_exit(void)
273{ 288{
274 do_oops_enter_exit(); 289 do_oops_enter_exit();
290 init_oops_id();
291 printk(KERN_WARNING "---[ end trace %016llx ]---\n",
292 (unsigned long long)oops_id);
275} 293}
276 294
277#ifdef CONFIG_CC_STACKPROTECTOR 295#ifdef CONFIG_CC_STACKPROTECTOR
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index ee9c8b6529e9..9ba3daa03475 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -208,6 +208,8 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
208 208
209static void task_tick_rt(struct rq *rq, struct task_struct *p) 209static void task_tick_rt(struct rq *rq, struct task_struct *p)
210{ 210{
211 update_curr_rt(rq);
212
211 /* 213 /*
212 * RR tasks need a special form of timeslice management. 214 * RR tasks need a special form of timeslice management.
213 * FIFO tasks have no timeslices. 215 * FIFO tasks have no timeslices.
diff --git a/mm/slub.c b/mm/slub.c
index b9f37cb0f2e6..3655ad359f03 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -172,7 +172,7 @@ static inline void ClearSlabDebug(struct page *page)
172 * Mininum number of partial slabs. These will be left on the partial 172 * Mininum number of partial slabs. These will be left on the partial
173 * lists even if they are empty. kmem_cache_shrink may reclaim them. 173 * lists even if they are empty. kmem_cache_shrink may reclaim them.
174 */ 174 */
175#define MIN_PARTIAL 2 175#define MIN_PARTIAL 5
176 176
177/* 177/*
178 * Maximum number of desirable partial slabs. 178 * Maximum number of desirable partial slabs.
@@ -1613,7 +1613,7 @@ checks_ok:
1613 * then add it. 1613 * then add it.
1614 */ 1614 */
1615 if (unlikely(!prior)) 1615 if (unlikely(!prior))
1616 add_partial(get_node(s, page_to_nid(page)), page); 1616 add_partial_tail(get_node(s, page_to_nid(page)), page);
1617 1617
1618out_unlock: 1618out_unlock:
1619 slab_unlock(page); 1619 slab_unlock(page);