diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-24 11:32:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-24 11:32:11 -0400 |
commit | 6c5daf012c9155aafd2c7973e4278766c30dfad0 (patch) | |
tree | 33959d7b36d03e1610615641a2940cb2de5e8603 | |
parent | 6d39b27f0ac7e805ae3bd9efa51d7da04bec0360 (diff) | |
parent | c08d3b0e33edce28e9cfa7b64f7fe5bdeeb29248 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6:
truncate: use new helpers
truncate: new helpers
fs: fix overflow in sys_mount() for in-kernel calls
fs: Make unload_nls() NULL pointer safe
freeze_bdev: grab active reference to frozen superblocks
freeze_bdev: kill bd_mount_sem
exofs: remove BKL from super operations
fs/romfs: correct error-handling code
vfs: seq_file: add helpers for data filling
vfs: remove redundant position check in do_sendfile
vfs: change sb->s_maxbytes to a loff_t
vfs: explicitly cast s_maxbytes in fiemap_check_ranges
libfs: return error code on failed attr set
seq_file: return a negative error code when seq_path_root() fails.
vfs: optimize touch_time() too
vfs: optimization for touch_atime()
vfs: split generic_forget_inode() so that hugetlbfs does not have to copy it
fs/inode.c: add dev-id and inode number for debugging in init_special_inode()
libfs: make simple_read_from_buffer conventional
41 files changed, 532 insertions, 533 deletions
diff --git a/Documentation/vm/locking b/Documentation/vm/locking index f366fa956179..25fadb448760 100644 --- a/Documentation/vm/locking +++ b/Documentation/vm/locking | |||
@@ -80,7 +80,7 @@ Note: PTL can also be used to guarantee that no new clones using the | |||
80 | mm start up ... this is a loose form of stability on mm_users. For | 80 | mm start up ... this is a loose form of stability on mm_users. For |
81 | example, it is used in copy_mm to protect against a racing tlb_gather_mmu | 81 | example, it is used in copy_mm to protect against a racing tlb_gather_mmu |
82 | single address space optimization, so that the zap_page_range (from | 82 | single address space optimization, so that the zap_page_range (from |
83 | vmtruncate) does not lose sending ipi's to cloned threads that might | 83 | truncate) does not lose sending ipi's to cloned threads that might |
84 | be spawned underneath it and go to user mode to drag in pte's into tlbs. | 84 | be spawned underneath it and go to user mode to drag in pte's into tlbs. |
85 | 85 | ||
86 | swap_lock | 86 | swap_lock |
@@ -18,7 +18,7 @@ | |||
18 | /* Taken over from the old code... */ | 18 | /* Taken over from the old code... */ |
19 | 19 | ||
20 | /* POSIX UID/GID verification for setting inode attributes. */ | 20 | /* POSIX UID/GID verification for setting inode attributes. */ |
21 | int inode_change_ok(struct inode *inode, struct iattr *attr) | 21 | int inode_change_ok(const struct inode *inode, struct iattr *attr) |
22 | { | 22 | { |
23 | int retval = -EPERM; | 23 | int retval = -EPERM; |
24 | unsigned int ia_valid = attr->ia_valid; | 24 | unsigned int ia_valid = attr->ia_valid; |
@@ -60,9 +60,51 @@ fine: | |||
60 | error: | 60 | error: |
61 | return retval; | 61 | return retval; |
62 | } | 62 | } |
63 | |||
64 | EXPORT_SYMBOL(inode_change_ok); | 63 | EXPORT_SYMBOL(inode_change_ok); |
65 | 64 | ||
65 | /** | ||
66 | * inode_newsize_ok - may this inode be truncated to a given size | ||
67 | * @inode: the inode to be truncated | ||
68 | * @offset: the new size to assign to the inode | ||
69 | * @Returns: 0 on success, -ve errno on failure | ||
70 | * | ||
71 | * inode_newsize_ok will check filesystem limits and ulimits to check that the | ||
72 | * new inode size is within limits. inode_newsize_ok will also send SIGXFSZ | ||
73 | * when necessary. Caller must not proceed with inode size change if failure is | ||
74 | * returned. @inode must be a file (not directory), with appropriate | ||
75 | * permissions to allow truncate (inode_newsize_ok does NOT check these | ||
76 | * conditions). | ||
77 | * | ||
78 | * inode_newsize_ok must be called with i_mutex held. | ||
79 | */ | ||
80 | int inode_newsize_ok(const struct inode *inode, loff_t offset) | ||
81 | { | ||
82 | if (inode->i_size < offset) { | ||
83 | unsigned long limit; | ||
84 | |||
85 | limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; | ||
86 | if (limit != RLIM_INFINITY && offset > limit) | ||
87 | goto out_sig; | ||
88 | if (offset > inode->i_sb->s_maxbytes) | ||
89 | goto out_big; | ||
90 | } else { | ||
91 | /* | ||
92 | * truncation of in-use swapfiles is disallowed - it would | ||
93 | * cause subsequent swapout to scribble on the now-freed | ||
94 | * blocks. | ||
95 | */ | ||
96 | if (IS_SWAPFILE(inode)) | ||
97 | return -ETXTBSY; | ||
98 | } | ||
99 | |||
100 | return 0; | ||
101 | out_sig: | ||
102 | send_sig(SIGXFSZ, current, 0); | ||
103 | out_big: | ||
104 | return -EFBIG; | ||
105 | } | ||
106 | EXPORT_SYMBOL(inode_newsize_ok); | ||
107 | |||
66 | int inode_setattr(struct inode * inode, struct iattr * attr) | 108 | int inode_setattr(struct inode * inode, struct iattr * attr) |
67 | { | 109 | { |
68 | unsigned int ia_valid = attr->ia_valid; | 110 | unsigned int ia_valid = attr->ia_valid; |
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index dd376c124e71..33baf27fac78 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c | |||
@@ -737,12 +737,7 @@ befs_put_super(struct super_block *sb) | |||
737 | { | 737 | { |
738 | kfree(BEFS_SB(sb)->mount_opts.iocharset); | 738 | kfree(BEFS_SB(sb)->mount_opts.iocharset); |
739 | BEFS_SB(sb)->mount_opts.iocharset = NULL; | 739 | BEFS_SB(sb)->mount_opts.iocharset = NULL; |
740 | 740 | unload_nls(BEFS_SB(sb)->nls); | |
741 | if (BEFS_SB(sb)->nls) { | ||
742 | unload_nls(BEFS_SB(sb)->nls); | ||
743 | BEFS_SB(sb)->nls = NULL; | ||
744 | } | ||
745 | |||
746 | kfree(sb->s_fs_info); | 741 | kfree(sb->s_fs_info); |
747 | sb->s_fs_info = NULL; | 742 | sb->s_fs_info = NULL; |
748 | } | 743 | } |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 5d1ed50bd46c..9cf4b926f8e4 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -216,8 +216,6 @@ EXPORT_SYMBOL(fsync_bdev); | |||
216 | * freeze_bdev -- lock a filesystem and force it into a consistent state | 216 | * freeze_bdev -- lock a filesystem and force it into a consistent state |
217 | * @bdev: blockdevice to lock | 217 | * @bdev: blockdevice to lock |
218 | * | 218 | * |
219 | * This takes the block device bd_mount_sem to make sure no new mounts | ||
220 | * happen on bdev until thaw_bdev() is called. | ||
221 | * If a superblock is found on this device, we take the s_umount semaphore | 219 | * If a superblock is found on this device, we take the s_umount semaphore |
222 | * on it to make sure nobody unmounts until the snapshot creation is done. | 220 | * on it to make sure nobody unmounts until the snapshot creation is done. |
223 | * The reference counter (bd_fsfreeze_count) guarantees that only the last | 221 | * The reference counter (bd_fsfreeze_count) guarantees that only the last |
@@ -232,46 +230,55 @@ struct super_block *freeze_bdev(struct block_device *bdev) | |||
232 | int error = 0; | 230 | int error = 0; |
233 | 231 | ||
234 | mutex_lock(&bdev->bd_fsfreeze_mutex); | 232 | mutex_lock(&bdev->bd_fsfreeze_mutex); |
235 | if (bdev->bd_fsfreeze_count > 0) { | 233 | if (++bdev->bd_fsfreeze_count > 1) { |
236 | bdev->bd_fsfreeze_count++; | 234 | /* |
235 | * We don't even need to grab a reference - the first call | ||
236 | * to freeze_bdev grab an active reference and only the last | ||
237 | * thaw_bdev drops it. | ||
238 | */ | ||
237 | sb = get_super(bdev); | 239 | sb = get_super(bdev); |
240 | drop_super(sb); | ||
238 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | 241 | mutex_unlock(&bdev->bd_fsfreeze_mutex); |
239 | return sb; | 242 | return sb; |
240 | } | 243 | } |
241 | bdev->bd_fsfreeze_count++; | 244 | |
242 | 245 | sb = get_active_super(bdev); | |
243 | down(&bdev->bd_mount_sem); | 246 | if (!sb) |
244 | sb = get_super(bdev); | 247 | goto out; |
245 | if (sb && !(sb->s_flags & MS_RDONLY)) { | 248 | if (sb->s_flags & MS_RDONLY) { |
246 | sb->s_frozen = SB_FREEZE_WRITE; | 249 | deactivate_locked_super(sb); |
247 | smp_wmb(); | 250 | mutex_unlock(&bdev->bd_fsfreeze_mutex); |
248 | 251 | return sb; | |
249 | sync_filesystem(sb); | 252 | } |
250 | 253 | ||
251 | sb->s_frozen = SB_FREEZE_TRANS; | 254 | sb->s_frozen = SB_FREEZE_WRITE; |
252 | smp_wmb(); | 255 | smp_wmb(); |
253 | 256 | ||
254 | sync_blockdev(sb->s_bdev); | 257 | sync_filesystem(sb); |
255 | 258 | ||
256 | if (sb->s_op->freeze_fs) { | 259 | sb->s_frozen = SB_FREEZE_TRANS; |
257 | error = sb->s_op->freeze_fs(sb); | 260 | smp_wmb(); |
258 | if (error) { | 261 | |
259 | printk(KERN_ERR | 262 | sync_blockdev(sb->s_bdev); |
260 | "VFS:Filesystem freeze failed\n"); | 263 | |
261 | sb->s_frozen = SB_UNFROZEN; | 264 | if (sb->s_op->freeze_fs) { |
262 | drop_super(sb); | 265 | error = sb->s_op->freeze_fs(sb); |
263 | up(&bdev->bd_mount_sem); | 266 | if (error) { |
264 | bdev->bd_fsfreeze_count--; | 267 | printk(KERN_ERR |
265 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | 268 | "VFS:Filesystem freeze failed\n"); |
266 | return ERR_PTR(error); | 269 | sb->s_frozen = SB_UNFROZEN; |
267 | } | 270 | deactivate_locked_super(sb); |
271 | bdev->bd_fsfreeze_count--; | ||
272 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | ||
273 | return ERR_PTR(error); | ||
268 | } | 274 | } |
269 | } | 275 | } |
276 | up_write(&sb->s_umount); | ||
270 | 277 | ||
278 | out: | ||
271 | sync_blockdev(bdev); | 279 | sync_blockdev(bdev); |
272 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | 280 | mutex_unlock(&bdev->bd_fsfreeze_mutex); |
273 | 281 | return sb; /* thaw_bdev releases s->s_umount */ | |
274 | return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */ | ||
275 | } | 282 | } |
276 | EXPORT_SYMBOL(freeze_bdev); | 283 | EXPORT_SYMBOL(freeze_bdev); |
277 | 284 | ||
@@ -284,44 +291,44 @@ EXPORT_SYMBOL(freeze_bdev); | |||
284 | */ | 291 | */ |
285 | int thaw_bdev(struct block_device *bdev, struct super_block *sb) | 292 | int thaw_bdev(struct block_device *bdev, struct super_block *sb) |
286 | { | 293 | { |
287 | int error = 0; | 294 | int error = -EINVAL; |
288 | 295 | ||
289 | mutex_lock(&bdev->bd_fsfreeze_mutex); | 296 | mutex_lock(&bdev->bd_fsfreeze_mutex); |
290 | if (!bdev->bd_fsfreeze_count) { | 297 | if (!bdev->bd_fsfreeze_count) |
291 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | 298 | goto out_unlock; |
292 | return -EINVAL; | 299 | |
293 | } | 300 | error = 0; |
294 | 301 | if (--bdev->bd_fsfreeze_count > 0) | |
295 | bdev->bd_fsfreeze_count--; | 302 | goto out_unlock; |
296 | if (bdev->bd_fsfreeze_count > 0) { | 303 | |
297 | if (sb) | 304 | if (!sb) |
298 | drop_super(sb); | 305 | goto out_unlock; |
299 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | 306 | |
300 | return 0; | 307 | BUG_ON(sb->s_bdev != bdev); |
301 | } | 308 | down_write(&sb->s_umount); |
302 | 309 | if (sb->s_flags & MS_RDONLY) | |
303 | if (sb) { | 310 | goto out_deactivate; |
304 | BUG_ON(sb->s_bdev != bdev); | 311 | |
305 | if (!(sb->s_flags & MS_RDONLY)) { | 312 | if (sb->s_op->unfreeze_fs) { |
306 | if (sb->s_op->unfreeze_fs) { | 313 | error = sb->s_op->unfreeze_fs(sb); |
307 | error = sb->s_op->unfreeze_fs(sb); | 314 | if (error) { |
308 | if (error) { | 315 | printk(KERN_ERR |
309 | printk(KERN_ERR | 316 | "VFS:Filesystem thaw failed\n"); |
310 | "VFS:Filesystem thaw failed\n"); | 317 | sb->s_frozen = SB_FREEZE_TRANS; |
311 | sb->s_frozen = SB_FREEZE_TRANS; | 318 | bdev->bd_fsfreeze_count++; |
312 | bdev->bd_fsfreeze_count++; | 319 | mutex_unlock(&bdev->bd_fsfreeze_mutex); |
313 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | 320 | return error; |
314 | return error; | ||
315 | } | ||
316 | } | ||
317 | sb->s_frozen = SB_UNFROZEN; | ||
318 | smp_wmb(); | ||
319 | wake_up(&sb->s_wait_unfrozen); | ||
320 | } | 321 | } |
321 | drop_super(sb); | ||
322 | } | 322 | } |
323 | 323 | ||
324 | up(&bdev->bd_mount_sem); | 324 | sb->s_frozen = SB_UNFROZEN; |
325 | smp_wmb(); | ||
326 | wake_up(&sb->s_wait_unfrozen); | ||
327 | |||
328 | out_deactivate: | ||
329 | if (sb) | ||
330 | deactivate_locked_super(sb); | ||
331 | out_unlock: | ||
325 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | 332 | mutex_unlock(&bdev->bd_fsfreeze_mutex); |
326 | return 0; | 333 | return 0; |
327 | } | 334 | } |
@@ -430,7 +437,6 @@ static void init_once(void *foo) | |||
430 | 437 | ||
431 | memset(bdev, 0, sizeof(*bdev)); | 438 | memset(bdev, 0, sizeof(*bdev)); |
432 | mutex_init(&bdev->bd_mutex); | 439 | mutex_init(&bdev->bd_mutex); |
433 | sema_init(&bdev->bd_mount_sem, 1); | ||
434 | INIT_LIST_HEAD(&bdev->bd_inodes); | 440 | INIT_LIST_HEAD(&bdev->bd_inodes); |
435 | INIT_LIST_HEAD(&bdev->bd_list); | 441 | INIT_LIST_HEAD(&bdev->bd_list); |
436 | #ifdef CONFIG_SYSFS | 442 | #ifdef CONFIG_SYSFS |
diff --git a/fs/buffer.c b/fs/buffer.c index 209f7f15f5f8..24afd7422ae8 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -2239,16 +2239,10 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size) | |||
2239 | struct address_space *mapping = inode->i_mapping; | 2239 | struct address_space *mapping = inode->i_mapping; |
2240 | struct page *page; | 2240 | struct page *page; |
card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8); card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4); card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4); card->cid.serial = UNSTUFF_BITS(resp, 16, 24); card->cid.month = UNSTUFF_BITS(resp, 12, 4); card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; break; case 2: /* MMC v2.0 - v2.2 */ case 3: /* MMC v3.1 - v3.3 */ case 4: /* MMC v4 */ card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); card->cid.serial = UNSTUFF_BITS(resp, 16, 32); card->cid.month = UNSTUFF_BITS(resp, 12, 4); card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; break; default: printk("%s: card has unknown MMCA version %d\n", mmc_hostname(card->host), card->csd.mmca_vsn); mmc_card_set_bad(card); break; } } } /* * Given a 128-bit response, decode to our card CSD structure. */ static void mmc_decode_csd(struct mmc_card *card) { struct mmc_csd *csd = &card->csd; unsigned int e, m, csd_struct; u32 *resp = card->raw_csd; if (mmc_card_sd(card)) { csd_struct = UNSTUFF_BITS(resp, 126, 2); if (csd_struct != 0) { printk("%s: unrecognised CSD structure version %d\n", mmc_hostname(card->host), csd_struct); mmc_card_set_bad(card); return; } m = UNSTUFF_BITS(resp, 115, 4); e = UNSTUFF_BITS(resp, 112, 3); csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; m = UNSTUFF_BITS(resp, 99, 4); e = UNSTUFF_BITS(resp, 96, 3); csd->max_dtr = tran_exp[e] * tran_mant[m]; csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); e = UNSTUFF_BITS(resp, 47, 3); m = UNSTUFF_BITS(resp, 62, 12); csd->capacity = (1 + m) << (e + 2); csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); csd->read_partial = UNSTUFF_BITS(resp, 79, 1); csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); csd->write_partial = UNSTUFF_BITS(resp, 21, 1); } else { /* * We only understand CSD structure v1.1 and v1.2. * v1.2 has extra information in bits 15, 11 and 10. */ csd_struct = UNSTUFF_BITS(resp, 126, 2); if (csd_struct != 1 && csd_struct != 2) { printk("%s: unrecognised CSD structure version %d\n", mmc_hostname(card->host), csd_struct); mmc_card_set_bad(card); return; } csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4); m = UNSTUFF_BITS(resp, 115, 4); e = UNSTUFF_BITS(resp, 112, 3); csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; m = UNSTUFF_BITS(resp, 99, 4); e = UNSTUFF_BITS(resp, 96, 3); csd->max_dtr = tran_exp[e] * tran_mant[m]; csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); e = UNSTUFF_BITS(resp, 47, 3); m = UNSTUFF_BITS(resp, 62, 12); csd->capacity = (1 + m) << (e + 2); csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); csd->read_partial = UNSTUFF_BITS(resp, 79, 1); csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); csd->write_partial = UNSTUFF_BITS(resp, 21, 1); } } /* * Given a 64-bit response, decode to our card SCR structure. */ static void mmc_decode_scr(struct mmc_card *card) { struct sd_scr *scr = &card->scr; unsigned int scr_struct; u32 resp[4]; BUG_ON(!mmc_card_sd(card)); resp[3] = card->raw_scr[1]; resp[2] = card->raw_scr[0]; scr_struct = UNSTUFF_BITS(resp, 60, 4); if (scr_struct != 0) { printk("%s: unrecognised SCR structure version %d\n", mmc_hostname(card->host), scr_struct); mmc_card_set_bad(card); return; } scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4); scr->bus_widths = UNSTUFF_BITS(resp, 48, 4); } /* * Locate a MMC card on this MMC host given a raw CID. */ static struct mmc_card *mmc_find_card(struct mmc_host *host, u32 *raw_cid) { struct mmc_card *card; list_for_each_entry(card, &host->cards, node) { if (memcmp(card->raw_cid, raw_cid, sizeof(card->raw_cid)) == 0) return card; } return NULL; } /* * Allocate a new MMC card, and assign a unique RCA. */ static struct mmc_card * mmc_alloc_card(struct mmc_host *host, u32 *raw_cid, unsigned int *frca) { struct mmc_card *card, *c; unsigned int rca = *frca; card = kmalloc(sizeof(struct mmc_card), GFP_KERNEL); if (!card) return ERR_PTR(-ENOMEM); mmc_init_card(card, host); memcpy(card->raw_cid, raw_cid, sizeof(card->raw_cid)); again: list_for_each_entry(c, &host->cards, node) if (c->rca == rca) { rca++; goto again; } card->rca = rca; *frca = rca; return card; } /* * Tell attached cards to go to IDLE state */ static void mmc_idle_cards(struct mmc_host *host) { struct mmc_command cmd; host->ios.chip_select = MMC_CS_HIGH; host->ops->set_ios(host, &host->ios); mmc_delay(1); cmd.opcode = MMC_GO_IDLE_STATE; cmd.arg = 0; cmd.flags = MMC_RSP_NONE | MMC_CMD_BC; mmc_wait_for_cmd(host, &cmd, 0); mmc_delay(1); host->ios.chip_select = MMC_CS_DONTCARE; host->ops->set_ios(host, &host->ios); mmc_delay(1); } /* * Apply power to the MMC stack. This is a two-stage process. * First, we enable power to the card without the clock running. * We then wait a bit for the power to stabilise. Finally, * enable the bus drivers and clock to the card. * * We must _NOT_ enable the clock prior to power stablising. * * If a host does all the power sequencing itself, ignore the * initial MMC_POWER_UP stage. */ static void mmc_power_up(struct mmc_host *host) { int bit = fls(host->ocr_avail) - 1; host->ios.vdd = bit; host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; host->ios.chip_select = MMC_CS_DONTCARE; host->ios.power_mode = MMC_POWER_UP; host->ios.bus_width = MMC_BUS_WIDTH_1; host->ops->set_ios(host, &host->ios); mmc_delay(1); host->ios.clock = host->f_min; host->ios.power_mode = MMC_POWER_ON; host->ops->set_ios(host, &host->ios); mmc_delay(2); } static void mmc_power_off(struct mmc_host *host) { host->ios.clock = 0; host->ios.vdd = 0; host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; host->ios.chip_select = MMC_CS_DONTCARE; host->ios.power_mode = MMC_POWER_OFF; host->ios.bus_width = MMC_BUS_WIDTH_1; host->ops->set_ios(host, &host->ios); } static int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) { struct mmc_command cmd; int i, err = 0; cmd.opcode = MMC_SEND_OP_COND; cmd.arg = ocr; cmd.flags = MMC_RSP_R3 | MMC_CMD_BCR; for (i = 100; i; i--) { err = mmc_wait_for_cmd(host, &cmd, 0); if (err != MMC_ERR_NONE) break; if (cmd.resp[0] & MMC_CARD_BUSY || ocr == 0) break; err = MMC_ERR_TIMEOUT; mmc_delay(10); } if (rocr) *rocr = cmd.resp[0]; return err; } static int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) { struct mmc_command cmd; int i, err = 0; cmd.opcode = SD_APP_OP_COND; cmd.arg = ocr; cmd.flags = MMC_RSP_R3 | MMC_CMD_BCR; for (i = 100; i; i--) { err = mmc_wait_for_app_cmd(host, 0, &cmd, CMD_RETRIES); if (err != MMC_ERR_NONE) break; if (cmd.resp[0] & MMC_CARD_BUSY || ocr == 0) break; err = MMC_ERR_TIMEOUT; mmc_delay(10); } if (rocr) *rocr = cmd.resp[0]; return err; } /* * Discover cards by requesting their CID. If this command * times out, it is not an error; there are no further cards * to be discovered. Add new cards to the list. * * Create a mmc_card entry for each discovered card, assigning * it an RCA, and save the raw CID for decoding later. */ static void mmc_discover_cards(struct mmc_host *host) { struct mmc_card *card; unsigned int first_rca = 1, err; while (1) { struct mmc_command cmd; cmd.opcode = MMC_ALL_SEND_CID; cmd.arg = 0; cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); if (err == MMC_ERR_TIMEOUT) { err = MMC_ERR_NONE; break; } if (err != MMC_ERR_NONE) { printk(KERN_ERR "%s: error requesting CID: %d\n", mmc_hostname(host), err); break; } card = mmc_find_card(host, cmd.resp); if (!card) { card = mmc_alloc_card(host, cmd.resp, &first_rca); if (IS_ERR(card)) { err = PTR_ERR(card); break; } list_add(&card->node, &host->cards); } card->state &= ~MMC_STATE_DEAD; if (host->mode == MMC_MODE_SD) { mmc_card_set_sd(card); cmd.opcode = SD_SEND_RELATIVE_ADDR; cmd.arg = 0; cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); if (err != MMC_ERR_NONE) mmc_card_set_dead(card); else { card->rca = cmd.resp[0] >> 16; if (!host->ops->get_ro) { printk(KERN_WARNING "%s: host does not " "support reading read-only " "switch. assuming write-enable.\n", mmc_hostname(host)); } else { if (host->ops->get_ro(host)) mmc_card_set_readonly(card); } } } else { cmd.opcode = MMC_SET_RELATIVE_ADDR; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); if (err != MMC_ERR_NONE) mmc_card_set_dead(card); } } } static void mmc_read_csds(struct mmc_host *host) { struct mmc_card *card; list_for_each_entry(card, &host->cards, node) { struct mmc_command cmd; int err; if (card->state & (MMC_STATE_DEAD|MMC_STATE_PRESENT)) continue; cmd.opcode = MMC_SEND_CSD; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); if (err != MMC_ERR_NONE) { mmc_card_set_dead(card); continue; } memcpy(card->raw_csd, cmd.resp, sizeof(card->raw_csd)); mmc_decode_csd(card); mmc_decode_cid(card); } } static void mmc_read_scrs(struct mmc_host *host) { int err; struct mmc_card *card; struct mmc_request mrq; struct mmc_command cmd; struct mmc_data data; struct scatterlist sg; list_for_each_entry(card, &host->cards, node) { if (card->state & (MMC_STATE_DEAD|MMC_STATE_PRESENT)) continue; if (!mmc_card_sd(card)) continue; err = mmc_select_card(host, card); if (err != MMC_ERR_NONE) { mmc_card_set_dead(card); continue; } memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_APP_CMD; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(host, &cmd, 0); if ((err != MMC_ERR_NONE) || !(cmd.resp[0] & R1_APP_CMD)) { mmc_card_set_dead(card); continue; } memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = SD_APP_SEND_SCR; cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; memset(&data, 0, sizeof(struct mmc_data)); data.timeout_ns = card->csd.tacc_ns * 10; data.timeout_clks = card->csd.tacc_clks * 10; data.blksz_bits = 3; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; memset(&mrq, 0, sizeof(struct mmc_request)); mrq.cmd = &cmd; mrq.data = &data; sg_init_one(&sg, (u8*)card->raw_scr, 8); mmc_wait_for_req(host, &mrq); if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { mmc_card_set_dead(card); continue; } card->raw_scr[0] = ntohl(card->raw_scr[0]); card->raw_scr[1] = ntohl(card->raw_scr[1]); mmc_decode_scr(card); } mmc_deselect_cards(host); } static unsigned int mmc_calculate_clock(struct mmc_host *host) { struct mmc_card *card; unsigned int max_dtr = host->f_max; list_for_each_entry(card, &host->cards, node) if (!mmc_card_dead(card) && max_dtr > card->csd.max_dtr) max_dtr = card->csd.max_dtr; DBG("MMC: selected %d.%03dMHz transfer rate\n", max_dtr / 1000000, (max_dtr / 1000) % 1000); return max_dtr; } /* * Check whether cards we already know about are still present. * We do this by requesting status, and checking whether a card * responds. * * A request for status does not cause a state change in data * transfer mode. */ static void mmc_check_cards(struct mmc_host *host) { struct list_head *l, *n; mmc_deselect_cards(host); list_for_each_safe(l, n, &host->cards) { struct mmc_card *card = mmc_list_to_card(l); struct mmc_command cmd; int err; cmd.opcode = MMC_SEND_STATUS; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); if (err == MMC_ERR_NONE) continue; mmc_card_set_dead(card); } } static void mmc_setup(struct mmc_host *host) { if (host->ios.power_mode != MMC_POWER_ON) { int err; u32 ocr; host->mode = MMC_MODE_SD; mmc_power_up(host); mmc_idle_cards(host); err = mmc_send_app_op_cond(host, 0, &ocr); /* * If we fail to detect any SD cards then try * searching for MMC cards. */ if (err != MMC_ERR_NONE) { host->mode = MMC_MODE_MMC; err = mmc_send_op_cond(host, 0, &ocr); if (err != MMC_ERR_NONE) return; } host->ocr = mmc_select_voltage(host, ocr); /* * Since we're changing the OCR value, we seem to * need to tell some cards to go back to the idle * state. We wait 1ms to give cards time to * respond. */ if (host->ocr) mmc_idle_cards(host); } else { host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; host->ios.clock = host->f_min; host->ops->set_ios(host, &host->ios); /* * We should remember the OCR mask from the existing * cards, and detect the new cards OCR mask, combine * the two and re-select the VDD. However, if we do * change VDD, we should do an idle, and then do a * full re-initialisation. We would need to notify * drivers so that they can re-setup the cards as * well, while keeping their queues at bay. * * For the moment, we take the easy way out - if the * new cards don't like our currently selected VDD, * they drop off the bus. */ } if (host->ocr == 0) return; /* * Send the selected OCR multiple times... until the cards * all get the idea that they should be ready for CMD2. * (My SanDisk card seems to need this.) */ if (host->mode == MMC_MODE_SD) mmc_send_app_op_cond(host, host->ocr, NULL); else mmc_send_op_cond(host, host->ocr, NULL); mmc_discover_cards(host); /* * Ok, now switch to push-pull mode. */ host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; host->ops->set_ios(host, &host->ios); mmc_read_csds(host); if (host->mode == MMC_MODE_SD) mmc_read_scrs(host); } /** * mmc_detect_change - process change of state on a MMC socket * @host: host which changed state. * @delay: optional delay to wait before detection (jiffies) * * All we know is that card(s) have been inserted or removed * from the socket(s). We don't know which socket or cards. */ void mmc_detect_change(struct mmc_host *host, unsigned long delay) { if (delay) schedule_delayed_work(&host->detect, delay); else schedule_work(&host->detect); } EXPORT_SYMBOL(mmc_detect_change); static void mmc_rescan(void *data) { struct mmc_host *host = data; struct list_head *l, *n; mmc_claim_host(host); if (host->ios.power_mode == MMC_POWER_ON) mmc_check_cards(host); mmc_setup(host); if (!list_empty(&host->cards)) { /* * (Re-)calculate the fastest clock rate which the * attached cards and the host support. */ host->ios.clock = mmc_calculate_clock(host); host->ops->set_ios(host, &host->ios); } mmc_release_host(host); list_for_each_safe(l, n, &host->cards) { struct mmc_card *card = mmc_list_to_card(l); /* * If this is a new and good card, register it. */ if (!mmc_card_present(card) && !mmc_card_dead(card)) { if (mmc_register_card(card)) mmc_card_set_dead(card); else mmc_card_set_present(card); } /* * If this card is dead, destroy it. */ if (mmc_card_dead(card)) { list_del(&card->node); mmc_remove_card(card); } } /* * If we discover that there are no cards on the * bus, turn off the clock and power down. */ if (list_empty(&host->cards)) mmc_power_off(host); } /** * mmc_alloc_host - initialise the per-host structure. * @extra: sizeof private data structure * @dev: pointer to host device model structure * * Initialise the per-host structure. */ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) { struct mmc_host *host; host = mmc_alloc_host_sysfs(extra, dev); if (host) { spin_lock_init(&host->lock); init_waitqueue_head(&host->wq); INIT_LIST_HEAD(&host->cards); INIT_WORK(&host->detect, mmc_rescan, host); /* * By default, hosts do not support SGIO or large requests. * They have to set these according to their abilities. */ host->max_hw_segs = 1; host->max_phys_segs = 1; host->max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); host->max_seg_size = PAGE_CACHE_SIZE; } return host; } EXPORT_SYMBOL(mmc_alloc_host); /** * mmc_add_host - initialise host hardware * @host: mmc host */ int mmc_add_host(struct mmc_host *host) { int ret; ret = mmc_add_host_sysfs(host); if (ret == 0) { mmc_power_off(host); mmc_detect_change(host, 0); } return ret; } EXPORT_SYMBOL(mmc_add_host); /** * mmc_remove_host - remove host hardware * @host: mmc host * * Unregister and remove all cards associated with this host, * and power down the MMC bus. */ void mmc_remove_host(struct mmc_host *host) { struct list_head *l, *n; list_for_each_safe(l, n, &host->cards) { struct mmc_card *card = mmc_list_to_card(l); mmc_remove_card(card); } mmc_power_off(host); mmc_remove_host_sysfs(host); } EXPORT_SYMBOL(mmc_remove_host); /** * mmc_free_host - free the host structure * @host: mmc host * * Free the host once all references to it have been dropped. */ void mmc_free_host(struct mmc_host *host) { flush_scheduled_work(); mmc_free_host_sysfs(host); } EXPORT_SYMBOL(mmc_free_host); #ifdef CONFIG_PM /** * mmc_suspend_host - suspend a host * @host: mmc host * @state: suspend mode (PM_SUSPEND_xxx) */ int mmc_suspend_host(struct mmc_host *host, pm_message_t state) { mmc_claim_host(host); mmc_deselect_cards(host); mmc_power_off(host); mmc_release_host(host); return 0; } EXPORT_SYMBOL(mmc_suspend_host); /** * mmc_resume_host - resume a previously suspended host * @host: mmc host */ int mmc_resume_host(struct mmc_host *host) { mmc_rescan(host); return 0; } EXPORT_SYMBOL(mmc_resume_host); #endif MODULE_LICENSE("GPL"); |