aboutsummaryrefslogtreecommitdiffstats
path: root/fs/hfsplus
diff options
context:
space:
mode:
authorFabian Frederick <fabf@skynet.be>2014-06-06 17:36:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-06 19:08:09 -0400
commite46707d1533182d15a84026ba91ed6c9a2662162 (patch)
treeaecf61defe0d222e7f5fe1f9a4ebf4ccd93c691c /fs/hfsplus
parent97a62eaefdc196969e9d3857f65fc8a90ee1fe3c (diff)
fs/hfsplus/bnode.c: replace min/casting by min_t
Also fixes some pr_ formats Signed-off-by: Fabian Frederick <fabf@skynet.be> Cc: Vyacheslav Dubeyko <slava@dubeyko.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/hfsplus')
-rw-r--r--fs/hfsplus/bnode.c32
1 files changed, 15 insertions, 17 deletions
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 11c860204520..285502af8df1 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -27,13 +27,13 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
27 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 27 pagep = node->page + (off >> PAGE_CACHE_SHIFT);
28 off &= ~PAGE_CACHE_MASK; 28 off &= ~PAGE_CACHE_MASK;
29 29
30 l = min(len, (int)PAGE_CACHE_SIZE - off); 30 l = min_t(int, len, PAGE_CACHE_SIZE - off);
31 memcpy(buf, kmap(*pagep) + off, l); 31 memcpy(buf, kmap(*pagep) + off, l);
32 kunmap(*pagep); 32 kunmap(*pagep);
33 33
34 while ((len -= l) != 0) { 34 while ((len -= l) != 0) {
35 buf += l; 35 buf += l;
36 l = min(len, (int)PAGE_CACHE_SIZE); 36 l = min_t(int, len, PAGE_CACHE_SIZE);
37 memcpy(buf, kmap(*++pagep), l); 37 memcpy(buf, kmap(*++pagep), l);
38 kunmap(*pagep); 38 kunmap(*pagep);
39 } 39 }
@@ -80,14 +80,14 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
80 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 80 pagep = node->page + (off >> PAGE_CACHE_SHIFT);
81 off &= ~PAGE_CACHE_MASK; 81 off &= ~PAGE_CACHE_MASK;
82 82
83 l = min(len, (int)PAGE_CACHE_SIZE - off); 83 l = min_t(int, len, PAGE_CACHE_SIZE - off);
84 memcpy(kmap(*pagep) + off, buf, l); 84 memcpy(kmap(*pagep) + off, buf, l);
85 set_page_dirty(*pagep); 85 set_page_dirty(*pagep);
86 kunmap(*pagep); 86 kunmap(*pagep);
87 87
88 while ((len -= l) != 0) { 88 while ((len -= l) != 0) {
89 buf += l; 89 buf += l;
90 l = min(len, (int)PAGE_CACHE_SIZE); 90 l = min_t(int, len, PAGE_CACHE_SIZE);
91 memcpy(kmap(*++pagep), buf, l); 91 memcpy(kmap(*++pagep), buf, l);
92 set_page_dirty(*pagep); 92 set_page_dirty(*pagep);
93 kunmap(*pagep); 93 kunmap(*pagep);
@@ -110,13 +110,13 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
110 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 110 pagep = node->page + (off >> PAGE_CACHE_SHIFT);
111 off &= ~PAGE_CACHE_MASK; 111 off &= ~PAGE_CACHE_MASK;
112 112
113 l = min(len, (int)PAGE_CACHE_SIZE - off); 113 l = min_t(int, len, PAGE_CACHE_SIZE - off);
114 memset(kmap(*pagep) + off, 0, l); 114 memset(kmap(*pagep) + off, 0, l);
115 set_page_dirty(*pagep); 115 set_page_dirty(*pagep);
116 kunmap(*pagep); 116 kunmap(*pagep);
117 117
118 while ((len -= l) != 0) { 118 while ((len -= l) != 0) {
119 l = min(len, (int)PAGE_CACHE_SIZE); 119 l = min_t(int, len, PAGE_CACHE_SIZE);
120 memset(kmap(*++pagep), 0, l); 120 memset(kmap(*++pagep), 0, l);
121 set_page_dirty(*pagep); 121 set_page_dirty(*pagep);
122 kunmap(*pagep); 122 kunmap(*pagep);
@@ -142,14 +142,14 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
142 dst &= ~PAGE_CACHE_MASK; 142 dst &= ~PAGE_CACHE_MASK;
143 143
144 if (src == dst) { 144 if (src == dst) {
145 l = min(len, (int)PAGE_CACHE_SIZE - src); 145 l = min_t(int, len, PAGE_CACHE_SIZE - src);
146 memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l); 146 memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
147 kunmap(*src_page); 147 kunmap(*src_page);
148 set_page_dirty(*dst_page); 148 set_page_dirty(*dst_page);
149 kunmap(*dst_page); 149 kunmap(*dst_page);
150 150
151 while ((len -= l) != 0) { 151 while ((len -= l) != 0) {
152 l = min(len, (int)PAGE_CACHE_SIZE); 152 l = min_t(int, len, PAGE_CACHE_SIZE);
153 memcpy(kmap(*++dst_page), kmap(*++src_page), l); 153 memcpy(kmap(*++dst_page), kmap(*++src_page), l);
154 kunmap(*src_page); 154 kunmap(*src_page);
155 set_page_dirty(*dst_page); 155 set_page_dirty(*dst_page);
@@ -251,7 +251,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
251 dst &= ~PAGE_CACHE_MASK; 251 dst &= ~PAGE_CACHE_MASK;
252 252
253 if (src == dst) { 253 if (src == dst) {
254 l = min(len, (int)PAGE_CACHE_SIZE - src); 254 l = min_t(int, len, PAGE_CACHE_SIZE - src);
255 memmove(kmap(*dst_page) + src, 255 memmove(kmap(*dst_page) + src,
256 kmap(*src_page) + src, l); 256 kmap(*src_page) + src, l);
257 kunmap(*src_page); 257 kunmap(*src_page);
@@ -259,7 +259,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
259 kunmap(*dst_page); 259 kunmap(*dst_page);
260 260
261 while ((len -= l) != 0) { 261 while ((len -= l) != 0) {
262 l = min(len, (int)PAGE_CACHE_SIZE); 262 l = min_t(int, len, PAGE_CACHE_SIZE);
263 memmove(kmap(*++dst_page), 263 memmove(kmap(*++dst_page),
264 kmap(*++src_page), l); 264 kmap(*++src_page), l);
265 kunmap(*src_page); 265 kunmap(*src_page);
@@ -386,9 +386,8 @@ struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
386 struct hfs_bnode *node; 386 struct hfs_bnode *node;
387 387
388 if (cnid >= tree->node_count) { 388 if (cnid >= tree->node_count) {
389 pr_err("request for non-existent node " 389 pr_err("request for non-existent node %d in B*Tree\n",
390 "%d in B*Tree\n", 390 cnid);
391 cnid);
392 return NULL; 391 return NULL;
393 } 392 }
394 393
@@ -409,9 +408,8 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
409 loff_t off; 408 loff_t off;
410 409
411 if (cnid >= tree->node_count) { 410 if (cnid >= tree->node_count) {
412 pr_err("request for non-existent node " 411 pr_err("request for non-existent node %d in B*Tree\n",
413 "%d in B*Tree\n", 412 cnid);
414 cnid);
415 return NULL; 413 return NULL;
416 } 414 }
417 415
@@ -602,7 +600,7 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
602 600
603 pagep = node->page; 601 pagep = node->page;
604 memset(kmap(*pagep) + node->page_offset, 0, 602 memset(kmap(*pagep) + node->page_offset, 0,
605 min((int)PAGE_CACHE_SIZE, (int)tree->node_size)); 603 min_t(int, PAGE_CACHE_SIZE, tree->node_size));
606 set_page_dirty(*pagep); 604 set_page_dirty(*pagep);
607 kunmap(*pagep); 605 kunmap(*pagep);
608 for (i = 1; i < tree->pages_per_bnode; i++) { 606 for (i = 1; i < tree->pages_per_bnode; i++) {