aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-09 05:32:36 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 15:30:44 -0400
commitbe7b3fbcef34452127bed93632b8e788f685d70e (patch)
treea2d1e80103982fd606390d4bb15131d1dd544b45
parent65c02d4cfbbd10188ded3d6577922ab034d943ba (diff)
SLUB: after object padding only needed for Redzoning
If no redzoning is selected then we do not need padding before the next object. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slub.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 40e92d8d4bc6..beac34a5e4fd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1661,7 +1661,7 @@ static int calculate_sizes(struct kmem_cache *s)
1661 */ 1661 */
1662 size += 2 * sizeof(struct track); 1662 size += 2 * sizeof(struct track);
1663 1663
1664 if (flags & DEBUG_DEFAULT_FLAGS) 1664 if (flags & SLAB_RED_ZONE)
1665 /* 1665 /*
1666 * Add some empty padding so that we can catch 1666 * Add some empty padding so that we can catch
1667 * overwrites from earlier objects rather than let 1667 * overwrites from earlier objects rather than let
m> 2008-10-15 15:50:22 -0400 committer Alexey Dobriyan <adobriyan@gmail.com> 2008-10-23 05:52:40 -0400 proc: switch /proc/meminfo to seq_file' href='/cgit/cgit.cgi/litmus-rt.git/commit/include/linux/hugetlb.h?h=wip-splitting-jerickso&id=e1759c215bee5abbcb6cb066590ab20905154ed5'>e1759c215bee
1da177e4c3f4
1da177e4c3f4
ac9b9c667c2e

a1e78772d72b

a43a8c39bbb4
1da177e4c3f4
396faf0303d2
1da177e4c3f4

53ba51d21d6e
1da177e4c3f4
63551ae0feaa

a5516438959d

63551ae0feaa
39dde65c9940
63551ae0feaa



ceb868796181

63551ae0feaa
ceb868796181
8f860591ffb2

63551ae0feaa
1da177e4c3f4





a1e78772d72b




1da177e4c3f4




5b23dbe8173c
1da177e4c3f4


04f2cbe35699
e1759c215bee


1da177e4c3f4

ceb868796181
a5516438959d
1da177e4c3f4
ceb868796181
1da177e4c3f4
9da61aef0fd5
ac9b9c667c2e
1da177e4c3f4
8f860591ffb2

1da177e4c3f4
51c6f666fceb

1da177e4c3f4










a137e1cc6d6e
1da177e4c3f4







a137e1cc6d6e
1da177e4c3f4

















4b6f5d20b04d
1da177e4c3f4
9d66586f7723
9a119c056dc2

1da177e4c3f4


516dffdcd882





1da177e4c3f4









9d66586f7723
1da177e4c3f4


d2ba27e8007b





a5516438959d

a3437870160c
a5516438959d














a3437870160c
a5516438959d

53ba51d21d6e







e5ff215941d5










a5516438959d
a137e1cc6d6e
a5516438959d
a137e1cc6d6e


a5516438959d



a137e1cc6d6e
a5516438959d

a137e1cc6d6e
a5516438959d
a137e1cc6d6e
a5516438959d






08fba69986e2

3340289ddf29

a5516438959d


























e5ff215941d5




a5516438959d

53ba51d21d6e
a5516438959d




08fba69986e2
3340289ddf29
a5516438959d

510a35d4a478



a5516438959d

1da177e4c3f4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290


                        

                     


                            
                      
                         







                                                                
                                                           
                                                                                                      
                                                                                                          
                                                                                                             
                                                                                             
                                                                                                                                                 



                                                                     
                                                                      
                                               
                                             
                                        

                                                                   

                                                                            
                                                                           
 
                                                

                                                          
                                        
 

                    

                                                              
                                                                 
                                                                             



                                                                          

                                                                         
                        
                        

                                                                            
 





                                                                




                                                                        




                                                     
                                                               


                                                                
                                                             


                                                             

                                                    
                                                    
                                                         
                         
                         
                                                 
                                                                             
                                                               
 

                                                             
                  

                                                                             










                                 
                              







                                                  
                              

















                                                                            
                                                              
                                                  
                                                          

                                                                  


                                                      





                                                     









                                                        
                                                        


                              





                                                                               

                          
                          














                                                           
                                   

  







                                                     










                                                    
 
                                                          
 


                                      



                                                        
                                                  

 
                                                                   
 
                                         






                                                            

                                                                     

                                                                  


























                                                                 




                                                                 

                 
                                       




                                   
                                        
                                     

                                     



                                                                

      
                             
#ifndef _LINUX_HUGETLB_H
#define _LINUX_HUGETLB_H

#include <linux/fs.h>

#ifdef CONFIG_HUGETLB_PAGE

#include <linux/mempolicy.h>
#include <linux/shm.h>
#include <asm/tlbflush.h>

struct ctl_table;

static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
{
	return vma->vm_flags & VM_HUGETLB;
}

void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
int hugetlb_overcommit_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int, int);
void unmap_hugepage_range(struct vm_area_struct *,
			unsigned long, unsigned long, struct page *);
void __unmap_hugepage_range(struct vm_area_struct *,
			unsigned long, unsigned long, struct page *);
int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
void hugetlb_report_meminfo(struct seq_file *);
int hugetlb_report_node_meminfo(int, char *);
unsigned long hugetlb_total_pages(void);
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, int write_access);
int hugetlb_reserve_pages(struct inode *inode, long from, long to,
						struct vm_area_struct *vma);
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);

extern unsigned long hugepages_treat_as_movable;
extern const unsigned long hugetlb_zero, hugetlb_infinity;
extern int sysctl_hugetlb_shm_group;
extern struct list_head huge_boot_pages;

/* arch callbacks */

pte_t *huge_pte_alloc(struct mm_struct *mm,
			unsigned long addr, unsigned long sz);
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
			      int write);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
				pmd_t *pmd, int write);
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
				pud_t *pud, int write);
int pmd_huge(pmd_t pmd);
int pud_huge(pud_t pmd);
void hugetlb_change_protection(struct vm_area_struct *vma,
		unsigned long address, unsigned long end, pgprot_t newprot);

#else /* !CONFIG_HUGETLB_PAGE */

static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
{
	return 0;
}

static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}

static inline unsigned long hugetlb_total_pages(void)
{
	return 0;
}

#define follow_hugetlb_page(m,v,p,vs,a,b,i,w)	({ BUG(); 0; })
#define follow_huge_addr(mm, addr, write)	ERR_PTR(-EINVAL)
#define copy_hugetlb_page_range(src, dst, vma)	({ BUG(); 0; })
#define hugetlb_prefault(mapping, vma)		({ BUG(); 0; })
#define unmap_hugepage_range(vma, start, end, page)	BUG()
static inline void hugetlb_report_meminfo(struct seq_file *m)
{
}
#define hugetlb_report_node_meminfo(n, buf)	0
#define follow_huge_pmd(mm, addr, pmd, write)	NULL
#define follow_huge_pud(mm, addr, pud, write)	NULL
#define prepare_hugepage_range(file, addr, len)	(-EINVAL)
#define pmd_huge(x)	0
#define pud_huge(x)	0
#define is_hugepage_only_range(mm, addr, len)	0
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
#define hugetlb_fault(mm, vma, addr, write)	({ BUG(); 0; })

#define hugetlb_change_protection(vma, address, end, newprot)

#ifndef HPAGE_MASK
#define HPAGE_MASK	PAGE_MASK		/* Keep the compiler happy */
#define HPAGE_SIZE	PAGE_SIZE
#endif

#endif /* !CONFIG_HUGETLB_PAGE */

#ifdef CONFIG_HUGETLBFS
struct hugetlbfs_config {
	uid_t   uid;
	gid_t   gid;
	umode_t mode;
	long	nr_blocks;
	long	nr_inodes;
	struct hstate *hstate;
};

struct hugetlbfs_sb_info {
	long	max_blocks;   /* blocks allowed */
	long	free_blocks;  /* blocks free */
	long	max_inodes;   /* inodes allowed */
	long	free_inodes;  /* inodes free */
	spinlock_t	stat_lock;
	struct hstate *hstate;
};


struct hugetlbfs_inode_info {
	struct shared_policy policy;
	struct inode vfs_inode;
};

static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
{
	return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
}

static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
{
	return sb->s_fs_info;
}

extern const struct file_operations hugetlbfs_file_operations;
extern struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_file_setup(const char *name, size_t);
int hugetlb_get_quota(struct address_space *mapping, long delta);
void hugetlb_put_quota(struct address_space *mapping, long delta);

static inline int is_file_hugepages(struct file *file)
{
	if (file->f_op == &hugetlbfs_file_operations)
		return 1;
	if (is_file_shm_hugepages(file))
		return 1;

	return 0;
}

static inline void set_file_hugepages(struct file *file)
{
	file->f_op = &hugetlbfs_file_operations;
}
#else /* !CONFIG_HUGETLBFS */

#define is_file_hugepages(file)		0
#define set_file_hugepages(file)	BUG()
#define hugetlb_file_setup(name,size)	ERR_PTR(-ENOSYS)

#endif /* !CONFIG_HUGETLBFS */

#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
					unsigned long len, unsigned long pgoff,
					unsigned long flags);
#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */

#ifdef CONFIG_HUGETLB_PAGE

#define HSTATE_NAME_LEN 32
/* Defines one hugetlb page size */
struct hstate {
	int hugetlb_next_nid;
	unsigned int order;
	unsigned long mask;
	unsigned long max_huge_pages;
	unsigned long nr_huge_pages;
	unsigned long free_huge_pages;
	unsigned long resv_huge_pages;
	unsigned long surplus_huge_pages;
	unsigned long nr_overcommit_huge_pages;
	struct list_head hugepage_freelists[MAX_NUMNODES];
	unsigned int nr_huge_pages_node[MAX_NUMNODES];
	unsigned int free_huge_pages_node[MAX_NUMNODES];
	unsigned int surplus_huge_pages_node[MAX_NUMNODES];
	char name[HSTATE_NAME_LEN];
};

struct huge_bootmem_page {
	struct list_head list;
	struct hstate *hstate;
};

/* arch callback */
int __init alloc_bootmem_huge_page(struct hstate *h);

void __init hugetlb_add_hstate(unsigned order);
struct hstate *size_to_hstate(unsigned long size);

#ifndef HUGE_MAX_HSTATE
#define HUGE_MAX_HSTATE 1
#endif

extern struct hstate hstates[HUGE_MAX_HSTATE];
extern unsigned int default_hstate_idx;

#define default_hstate (hstates[default_hstate_idx])

static inline struct hstate *hstate_inode(struct inode *i)
{
	struct hugetlbfs_sb_info *hsb;
	hsb = HUGETLBFS_SB(i->i_sb);
	return hsb->hstate;
}

static inline struct hstate *hstate_file(struct file *f)
{
	return hstate_inode(f->f_dentry->d_inode);
}

static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
{
	return hstate_file(vma->vm_file);
}

static inline unsigned long huge_page_size(struct hstate *h)
{
	return (unsigned long)PAGE_SIZE << h->order;
}

extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);

extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);

static inline unsigned long huge_page_mask(struct hstate *h)
{
	return h->mask;
}

static inline unsigned int huge_page_order(struct hstate *h)
{
	return h->order;
}

static inline unsigned huge_page_shift(struct hstate *h)
{
	return h->order + PAGE_SHIFT;
}

static inline unsigned int pages_per_huge_page(struct hstate *h)
{
	return 1 << h->order;
}

static inline unsigned int blocks_per_huge_page(struct hstate *h)
{
	return huge_page_size(h) / 512;
}

#include <asm/hugetlb.h>

static inline struct hstate *page_hstate(struct page *page)
{
	return size_to_hstate(PAGE_SIZE << compound_order(page));
}

#else
struct hstate {};
#define alloc_bootmem_huge_page(h) NULL
#define hstate_file(f) NULL
#define hstate_vma(v) NULL
#define hstate_inode(i) NULL
#define huge_page_size(h) PAGE_SIZE
#define huge_page_mask(h) PAGE_MASK
#define vma_kernel_pagesize(v) PAGE_SIZE
#define vma_mmu_pagesize(v) PAGE_SIZE
#define huge_page_order(h) 0
#define huge_page_shift(h) PAGE_SHIFT
static inline unsigned int pages_per_huge_page(struct hstate *h)
{
	return 1;
}
#endif

#endif /* _LINUX_HUGETLB_H */