super.c 34.6 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 *  linux/fs/super.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  super.c contains code to handle: - mount structures
 *                                   - super-block tables
 *                                   - filesystem drivers list
 *                                   - mount system call
 *                                   - umount system call
 *                                   - ustat system call
 *
 * GK 2/5/95  -  Changed to support mounting the root fs via NFS
 *
 *  Added kerneld support: Jacques Gelinas and Bjorn Ekwall
 *  Added change_root: Werner Almesberger & Hans Lermen, Feb '96
 *  Added options to /proc/mounts:
18
 *    Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
Linus Torvalds's avatar
Linus Torvalds committed
19 20 21 22
 *  Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
 *  Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
 */

23
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
24 25 26 27 28 29 30
#include <linux/slab.h>
#include <linux/acct.h>
#include <linux/blkdev.h>
#include <linux/mount.h>
#include <linux/security.h>
#include <linux/writeback.h>		/* for the emergency remount stuff */
#include <linux/idr.h>
Ingo Molnar's avatar
Ingo Molnar committed
31
#include <linux/mutex.h>
32
#include <linux/backing-dev.h>
33
#include <linux/rculist_bl.h>
34
#include <linux/cleancache.h>
Al Viro's avatar
Al Viro committed
35
#include <linux/fsnotify.h>
36
#include <linux/lockdep.h>
37
#include "internal.h"
Linus Torvalds's avatar
Linus Torvalds committed
38 39 40 41 42


LIST_HEAD(super_blocks);
DEFINE_SPINLOCK(sb_lock);

43 44 45 46 47 48
static char *sb_writers_name[SB_FREEZE_LEVELS] = {
	"sb_writers",
	"sb_pagefaults",
	"sb_internal",
};

49 50 51 52 53 54 55 56 57 58
/*
 * One thing we have to be careful of with a per-sb shrinker is that we don't
 * drop the last active reference to the superblock from within the shrinker.
 * If that happens we could trigger unregistering the shrinker from within the
 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
 * take a passive reference to the superblock to avoid this from occurring.
 */
static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
{
	struct super_block *sb;
59 60
	int	fs_objects = 0;
	int	total_objects;
61 62 63 64 65 66 67 68 69 70 71

	sb = container_of(shrink, struct super_block, s_shrink);

	/*
	 * Deadlock avoidance.  We may hold various FS locks, and we don't want
	 * to recurse into the FS that called us in clear_inode() and friends..
	 */
	if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
		return -1;

	if (!grab_super_passive(sb))
72
		return -1;
73

74 75 76 77 78
	if (sb->s_op && sb->s_op->nr_cached_objects)
		fs_objects = sb->s_op->nr_cached_objects(sb);

	total_objects = sb->s_nr_dentry_unused +
			sb->s_nr_inodes_unused + fs_objects + 1;
79 80
	if (!total_objects)
		total_objects = 1;
81

82
	if (sc->nr_to_scan) {
83 84
		int	dentries;
		int	inodes;
85

86 87 88 89 90 91 92 93 94 95 96 97 98 99
		/* proportion the scan between the caches */
		dentries = (sc->nr_to_scan * sb->s_nr_dentry_unused) /
							total_objects;
		inodes = (sc->nr_to_scan * sb->s_nr_inodes_unused) /
							total_objects;
		if (fs_objects)
			fs_objects = (sc->nr_to_scan * fs_objects) /
							total_objects;
		/*
		 * prune the dcache first as the icache is pinned by it, then
		 * prune the icache, followed by the filesystem specific caches
		 */
		prune_dcache_sb(sb, dentries);
		prune_icache_sb(sb, inodes);
100

101 102 103 104 105 106
		if (fs_objects && sb->s_op->free_cached_objects) {
			sb->s_op->free_cached_objects(sb, fs_objects);
			fs_objects = sb->s_op->nr_cached_objects(sb);
		}
		total_objects = sb->s_nr_dentry_unused +
				sb->s_nr_inodes_unused + fs_objects;
107 108
	}

109
	total_objects = (total_objects / 100) * sysctl_vfs_cache_pressure;
110
	drop_super(sb);
111
	return total_objects;
112 113
}

114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
static int init_sb_writers(struct super_block *s, struct file_system_type *type)
{
	int err;
	int i;

	for (i = 0; i < SB_FREEZE_LEVELS; i++) {
		err = percpu_counter_init(&s->s_writers.counter[i], 0);
		if (err < 0)
			goto err_out;
		lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i],
				 &type->s_writers_key[i], 0);
	}
	init_waitqueue_head(&s->s_writers.wait);
	init_waitqueue_head(&s->s_writers.wait_unfrozen);
	return 0;
err_out:
	while (--i >= 0)
		percpu_counter_destroy(&s->s_writers.counter[i]);
	return err;
}

static void destroy_sb_writers(struct super_block *s)
{
	int i;

	for (i = 0; i < SB_FREEZE_LEVELS; i++)
		percpu_counter_destroy(&s->s_writers.counter[i]);
}

Linus Torvalds's avatar
Linus Torvalds committed
143 144
/**
 *	alloc_super	-	create new superblock
145
 *	@type:	filesystem type superblock should belong to
146
 *	@flags: the mount flags
Linus Torvalds's avatar
Linus Torvalds committed
147 148 149 150
 *
 *	Allocates and initializes a new &struct super_block.  alloc_super()
 *	returns a pointer new superblock or %NULL if allocation had failed.
 */
151
static struct super_block *alloc_super(struct file_system_type *type, int flags)
Linus Torvalds's avatar
Linus Torvalds committed
152
{
153
	struct super_block *s = kzalloc(sizeof(struct super_block),  GFP_USER);
154
	static const struct super_operations default_op;
Linus Torvalds's avatar
Linus Torvalds committed
155 156 157

	if (s) {
		if (security_sb_alloc(s)) {
158 159 160 161
			/*
			 * We cannot call security_sb_free() without
			 * security_sb_alloc() succeeding. So bail out manually
			 */
Linus Torvalds's avatar
Linus Torvalds committed
162 163 164 165
			kfree(s);
			s = NULL;
			goto out;
		}
Nick Piggin's avatar
Nick Piggin committed
166 167
#ifdef CONFIG_SMP
		s->s_files = alloc_percpu(struct list_head);
168 169 170
		if (!s->s_files)
			goto err_out;
		else {
Nick Piggin's avatar
Nick Piggin committed
171 172 173 174 175 176
			int i;

			for_each_possible_cpu(i)
				INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i));
		}
#else
Linus Torvalds's avatar
Linus Torvalds committed
177
		INIT_LIST_HEAD(&s->s_files);
Nick Piggin's avatar
Nick Piggin committed
178
#endif
179 180
		if (init_sb_writers(s, type))
			goto err_out;
181
		s->s_flags = flags;
182
		s->s_bdi = &default_backing_dev_info;
183
		INIT_HLIST_NODE(&s->s_instances);
184
		INIT_HLIST_BL_HEAD(&s->s_anon);
Linus Torvalds's avatar
Linus Torvalds committed
185
		INIT_LIST_HEAD(&s->s_inodes);
186
		INIT_LIST_HEAD(&s->s_dentry_lru);
187
		INIT_LIST_HEAD(&s->s_inode_lru);
188
		spin_lock_init(&s->s_inode_lru_lock);
189
		INIT_LIST_HEAD(&s->s_mounts);
Linus Torvalds's avatar
Linus Torvalds committed
190
		init_rwsem(&s->s_umount);
191
		lockdep_set_class(&s->s_umount, &type->s_umount_key);
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
		/*
		 * sget() can have s_umount recursion.
		 *
		 * When it cannot find a suitable sb, it allocates a new
		 * one (this one), and tries again to find a suitable old
		 * one.
		 *
		 * In case that succeeds, it will acquire the s_umount
		 * lock of the old one. Since these are clearly distrinct
		 * locks, and this object isn't exposed yet, there's no
		 * risk of deadlocks.
		 *
		 * Annotate this by putting this lock in a different
		 * subclass.
		 */
		down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
Al Viro's avatar
Al Viro committed
208
		s->s_count = 1;
Linus Torvalds's avatar
Linus Torvalds committed
209
		atomic_set(&s->s_active, 1);
210
		mutex_init(&s->s_vfs_rename_mutex);
211
		lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
Ingo Molnar's avatar
Ingo Molnar committed
212 213
		mutex_init(&s->s_dquot.dqio_mutex);
		mutex_init(&s->s_dquot.dqonoff_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
214 215 216 217
		init_rwsem(&s->s_dquot.dqptr_sem);
		s->s_maxbytes = MAX_NON_LFS;
		s->s_op = &default_op;
		s->s_time_gran = 1000000000;
218
		s->cleancache_poolid = -1;
219 220 221

		s->s_shrink.seeks = DEFAULT_SEEKS;
		s->s_shrink.shrink = prune_super;
222
		s->s_shrink.batch = 1024;
Linus Torvalds's avatar
Linus Torvalds committed
223 224 225
	}
out:
	return s;
226 227 228 229 230 231 232 233 234 235
err_out:
	security_sb_free(s);
#ifdef CONFIG_SMP
	if (s->s_files)
		free_percpu(s->s_files);
#endif
	destroy_sb_writers(s);
	kfree(s);
	s = NULL;
	goto out;
Linus Torvalds's avatar
Linus Torvalds committed
236 237 238 239 240 241 242 243 244 245
}

/**
 *	destroy_super	-	frees a superblock
 *	@s: superblock to free
 *
 *	Frees a superblock.
 */
static inline void destroy_super(struct super_block *s)
{
Nick Piggin's avatar
Nick Piggin committed
246 247 248
#ifdef CONFIG_SMP
	free_percpu(s->s_files);
#endif
249
	destroy_sb_writers(s);
Linus Torvalds's avatar
Linus Torvalds committed
250
	security_sb_free(s);
251
	WARN_ON(!list_empty(&s->s_mounts));
252
	kfree(s->s_subtype);
253
	kfree(s->s_options);
Linus Torvalds's avatar
Linus Torvalds committed
254 255 256 257 258 259
	kfree(s);
}

/* Superblock refcounting  */

/*
260
 * Drop a superblock's refcount.  The caller must hold sb_lock.
Linus Torvalds's avatar
Linus Torvalds committed
261
 */
Al Viro's avatar
Al Viro committed
262
static void __put_super(struct super_block *sb)
Linus Torvalds's avatar
Linus Torvalds committed
263 264
{
	if (!--sb->s_count) {
265
		list_del_init(&sb->s_list);
Linus Torvalds's avatar
Linus Torvalds committed
266 267 268 269 270 271 272 273 274 275 276
		destroy_super(sb);
	}
}

/**
 *	put_super	-	drop a temporary reference to superblock
 *	@sb: superblock in question
 *
 *	Drops a temporary reference, frees superblock if there's no
 *	references left.
 */
Al Viro's avatar
Al Viro committed
277
static void put_super(struct super_block *sb)
Linus Torvalds's avatar
Linus Torvalds committed
278 279 280 281 282 283 284 285
{
	spin_lock(&sb_lock);
	__put_super(sb);
	spin_unlock(&sb_lock);
}


/**
286
 *	deactivate_locked_super	-	drop an active reference to superblock
Linus Torvalds's avatar
Linus Torvalds committed
287 288
 *	@s: superblock to deactivate
 *
289 290
 *	Drops an active reference to superblock, converting it into a temprory
 *	one if there is no other active references left.  In that case we
Linus Torvalds's avatar
Linus Torvalds committed
291 292
 *	tell fs driver to shut it down and drop the temporary reference we
 *	had just acquired.
293 294
 *
 *	Caller holds exclusive lock on superblock; that lock is released.
Linus Torvalds's avatar
Linus Torvalds committed
295
 */
296
void deactivate_locked_super(struct super_block *s)
Linus Torvalds's avatar
Linus Torvalds committed
297 298
{
	struct file_system_type *fs = s->s_type;
Al Viro's avatar
Al Viro committed
299
	if (atomic_dec_and_test(&s->s_active)) {
300
		cleancache_invalidate_fs(s);
Linus Torvalds's avatar
Linus Torvalds committed
301
		fs->kill_sb(s);
302 303 304

		/* caches are now gone, we can safely kill the shrinker now */
		unregister_shrinker(&s->s_shrink);
Linus Torvalds's avatar
Linus Torvalds committed
305 306
		put_filesystem(fs);
		put_super(s);
307 308
	} else {
		up_write(&s->s_umount);
Linus Torvalds's avatar
Linus Torvalds committed
309 310 311
	}
}

312
EXPORT_SYMBOL(deactivate_locked_super);
Linus Torvalds's avatar
Linus Torvalds committed
313

314
/**
315
 *	deactivate_super	-	drop an active reference to superblock
316 317
 *	@s: superblock to deactivate
 *
318 319 320
 *	Variant of deactivate_locked_super(), except that superblock is *not*
 *	locked by caller.  If we are going to drop the final active reference,
 *	lock will be acquired prior to that.
321
 */
322
void deactivate_super(struct super_block *s)
323
{
324 325 326
        if (!atomic_add_unless(&s->s_active, -1, 1)) {
		down_write(&s->s_umount);
		deactivate_locked_super(s);
327 328 329
	}
}

330
EXPORT_SYMBOL(deactivate_super);
331

Linus Torvalds's avatar
Linus Torvalds committed
332 333 334 335 336 337 338 339 340
/**
 *	grab_super - acquire an active reference
 *	@s: reference we are trying to make active
 *
 *	Tries to acquire an active reference.  grab_super() is used when we
 * 	had just found a superblock in super_blocks or fs_type->fs_supers
 *	and want to turn it into a full-blown active reference.  grab_super()
 *	is called with sb_lock held and drops it.  Returns 1 in case of
 *	success, 0 if we had failed (superblock contents was already dead or
Al Viro's avatar
Al Viro committed
341 342 343
 *	dying when grab_super() had been called).  Note that this is only
 *	called for superblocks not in rundown mode (== ones still on ->fs_supers
 *	of their type), so increment of ->s_count is OK here.
Linus Torvalds's avatar
Linus Torvalds committed
344
 */
345
static int grab_super(struct super_block *s) __releases(sb_lock)
Linus Torvalds's avatar
Linus Torvalds committed
346 347 348 349
{
	s->s_count++;
	spin_unlock(&sb_lock);
	down_write(&s->s_umount);
Al Viro's avatar
Al Viro committed
350 351 352 353
	if ((s->s_flags & MS_BORN) && atomic_inc_not_zero(&s->s_active)) {
		put_super(s);
		return 1;
	}
Linus Torvalds's avatar
Linus Torvalds committed
354 355 356 357 358
	up_write(&s->s_umount);
	put_super(s);
	return 0;
}

359 360
/*
 *	grab_super_passive - acquire a passive reference
361
 *	@sb: reference we are trying to grab
362 363 364 365 366 367 368 369 370 371 372 373
 *
 *	Tries to acquire a passive reference. This is used in places where we
 *	cannot take an active reference but we need to ensure that the
 *	superblock does not go away while we are working on it. It returns
 *	false if a reference was not gained, and returns true with the s_umount
 *	lock held in read mode if a reference is gained. On successful return,
 *	the caller must drop the s_umount lock and the passive reference when
 *	done.
 */
bool grab_super_passive(struct super_block *sb)
{
	spin_lock(&sb_lock);
374
	if (hlist_unhashed(&sb->s_instances)) {
375 376 377 378 379 380 381 382
		spin_unlock(&sb_lock);
		return false;
	}

	sb->s_count++;
	spin_unlock(&sb_lock);

	if (down_read_trylock(&sb->s_umount)) {
383
		if (sb->s_root && (sb->s_flags & MS_BORN))
384 385 386 387 388 389 390 391
			return true;
		up_read(&sb->s_umount);
	}

	put_super(sb);
	return false;
}

Linus Torvalds's avatar
Linus Torvalds committed
392 393 394 395 396 397 398 399 400
/**
 *	generic_shutdown_super	-	common helper for ->kill_sb()
 *	@sb: superblock to kill
 *
 *	generic_shutdown_super() does all fs-independent work on superblock
 *	shutdown.  Typical ->kill_sb() should pick all fs-specific objects
 *	that need destruction out of superblock, call generic_shutdown_super()
 *	and release aforementioned objects.  Note: dentries and inodes _are_
 *	taken care of and do not need specific handling.
401 402 403 404
 *
 *	Upon calling this function, the filesystem may no longer alter or
 *	rearrange the set of dentries belonging to this super_block, nor may it
 *	change the attachments of dentries to inodes.
Linus Torvalds's avatar
Linus Torvalds committed
405 406 407
 */
void generic_shutdown_super(struct super_block *sb)
{
408
	const struct super_operations *sop = sb->s_op;
Linus Torvalds's avatar
Linus Torvalds committed
409

410 411
	if (sb->s_root) {
		shrink_dcache_for_umount(sb);
412
		sync_filesystem(sb);
Linus Torvalds's avatar
Linus Torvalds committed
413
		sb->s_flags &= ~MS_ACTIVE;
414

Al Viro's avatar
Al Viro committed
415 416 417
		fsnotify_unmount_inodes(&sb->s_inodes);

		evict_inodes(sb);
Linus Torvalds's avatar
Linus Torvalds committed
418 419 420 421

		if (sop->put_super)
			sop->put_super(sb);

Al Viro's avatar
Al Viro committed
422
		if (!list_empty(&sb->s_inodes)) {
423 424 425
			printk("VFS: Busy inodes after unmount of %s. "
			   "Self-destruct in 5 seconds.  Have a nice day...\n",
			   sb->s_id);
Linus Torvalds's avatar
Linus Torvalds committed
426 427 428 429
		}
	}
	spin_lock(&sb_lock);
	/* should be initialized for __put_super_and_need_restart() */
430
	hlist_del_init(&sb->s_instances);
Linus Torvalds's avatar
Linus Torvalds committed
431 432 433 434 435 436 437 438 439 440 441
	spin_unlock(&sb_lock);
	up_write(&sb->s_umount);
}

EXPORT_SYMBOL(generic_shutdown_super);

/**
 *	sget	-	find or create a superblock
 *	@type:	filesystem type superblock should belong to
 *	@test:	comparison callback
 *	@set:	setup callback
442
 *	@flags:	mount flags
Linus Torvalds's avatar
Linus Torvalds committed
443 444 445 446 447
 *	@data:	argument to each of them
 */
struct super_block *sget(struct file_system_type *type,
			int (*test)(struct super_block *,void *),
			int (*set)(struct super_block *,void *),
448
			int flags,
Linus Torvalds's avatar
Linus Torvalds committed
449 450 451
			void *data)
{
	struct super_block *s = NULL;
452
	struct super_block *old;
Linus Torvalds's avatar
Linus Torvalds committed
453 454 455 456
	int err;

retry:
	spin_lock(&sb_lock);
457
	if (test) {
458
		hlist_for_each_entry(old, &type->fs_supers, s_instances) {
459 460 461 462
			if (!test(old, data))
				continue;
			if (!grab_super(old))
				goto retry;
463 464
			if (s) {
				up_write(&s->s_umount);
465
				destroy_super(s);
466
				s = NULL;
467
			}
468 469
			return old;
		}
Linus Torvalds's avatar
Linus Torvalds committed
470 471 472
	}
	if (!s) {
		spin_unlock(&sb_lock);
473
		s = alloc_super(type, flags);
Linus Torvalds's avatar
Linus Torvalds committed
474 475 476 477 478 479 480 481
		if (!s)
			return ERR_PTR(-ENOMEM);
		goto retry;
	}
		
	err = set(s, data);
	if (err) {
		spin_unlock(&sb_lock);
482
		up_write(&s->s_umount);
Linus Torvalds's avatar
Linus Torvalds committed
483 484 485 486 487 488
		destroy_super(s);
		return ERR_PTR(err);
	}
	s->s_type = type;
	strlcpy(s->s_id, type->name, sizeof(s->s_id));
	list_add_tail(&s->s_list, &super_blocks);
489
	hlist_add_head(&s->s_instances, &type->fs_supers);
Linus Torvalds's avatar
Linus Torvalds committed
490 491
	spin_unlock(&sb_lock);
	get_filesystem(type);
492
	register_shrinker(&s->s_shrink);
Linus Torvalds's avatar
Linus Torvalds committed
493 494 495 496 497 498 499 500 501 502 503 504 505
	return s;
}

EXPORT_SYMBOL(sget);

void drop_super(struct super_block *sb)
{
	up_read(&sb->s_umount);
	put_super(sb);
}

EXPORT_SYMBOL(drop_super);

Al Viro's avatar
Al Viro committed
506 507 508 509 510 511 512 513 514 515
/**
 *	iterate_supers - call function for all active superblocks
 *	@f: function to call
 *	@arg: argument to pass to it
 *
 *	Scans the superblock list and calls given function, passing it
 *	locked superblock and given argument.
 */
void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
{
516
	struct super_block *sb, *p = NULL;
Al Viro's avatar
Al Viro committed
517 518

	spin_lock(&sb_lock);
519
	list_for_each_entry(sb, &super_blocks, s_list) {
520
		if (hlist_unhashed(&sb->s_instances))
Al Viro's avatar
Al Viro committed
521 522 523 524 525
			continue;
		sb->s_count++;
		spin_unlock(&sb_lock);

		down_read(&sb->s_umount);
526
		if (sb->s_root && (sb->s_flags & MS_BORN))
Al Viro's avatar
Al Viro committed
527 528 529 530
			f(sb, arg);
		up_read(&sb->s_umount);

		spin_lock(&sb_lock);
531 532 533
		if (p)
			__put_super(p);
		p = sb;
Al Viro's avatar
Al Viro committed
534
	}
535 536
	if (p)
		__put_super(p);
Al Viro's avatar
Al Viro committed
537 538 539
	spin_unlock(&sb_lock);
}

540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
/**
 *	iterate_supers_type - call function for superblocks of given type
 *	@type: fs type
 *	@f: function to call
 *	@arg: argument to pass to it
 *
 *	Scans the superblock list and calls given function, passing it
 *	locked superblock and given argument.
 */
void iterate_supers_type(struct file_system_type *type,
	void (*f)(struct super_block *, void *), void *arg)
{
	struct super_block *sb, *p = NULL;

	spin_lock(&sb_lock);
555
	hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
556 557 558 559
		sb->s_count++;
		spin_unlock(&sb_lock);

		down_read(&sb->s_umount);
560
		if (sb->s_root && (sb->s_flags & MS_BORN))
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
			f(sb, arg);
		up_read(&sb->s_umount);

		spin_lock(&sb_lock);
		if (p)
			__put_super(p);
		p = sb;
	}
	if (p)
		__put_super(p);
	spin_unlock(&sb_lock);
}

EXPORT_SYMBOL(iterate_supers_type);

Linus Torvalds's avatar
Linus Torvalds committed
576 577 578 579 580 581 582 583
/**
 *	get_super - get the superblock of a device
 *	@bdev: device to get the superblock for
 *	
 *	Scans the superblock list and finds the superblock of the file system
 *	mounted on the device given. %NULL is returned if no match is found.
 */

584
struct super_block *get_super(struct block_device *bdev)
Linus Torvalds's avatar
Linus Torvalds committed
585
{
586 587
	struct super_block *sb;

Linus Torvalds's avatar
Linus Torvalds committed
588 589
	if (!bdev)
		return NULL;
590

Linus Torvalds's avatar
Linus Torvalds committed
591
	spin_lock(&sb_lock);
592 593
rescan:
	list_for_each_entry(sb, &super_blocks, s_list) {
594
		if (hlist_unhashed(&sb->s_instances))
595
			continue;
596 597
		if (sb->s_bdev == bdev) {
			sb->s_count++;
Linus Torvalds's avatar
Linus Torvalds committed
598
			spin_unlock(&sb_lock);
599
			down_read(&sb->s_umount);
600
			/* still alive? */
601
			if (sb->s_root && (sb->s_flags & MS_BORN))
602 603
				return sb;
			up_read(&sb->s_umount);
604
			/* nope, got unmounted */
605
			spin_lock(&sb_lock);
606 607
			__put_super(sb);
			goto rescan;
Linus Torvalds's avatar
Linus Torvalds committed
608 609 610 611 612 613 614
		}
	}
	spin_unlock(&sb_lock);
	return NULL;
}

EXPORT_SYMBOL(get_super);
615

616 617 618 619 620 621 622 623 624 625 626 627 628
/**
 *	get_super_thawed - get thawed superblock of a device
 *	@bdev: device to get the superblock for
 *
 *	Scans the superblock list and finds the superblock of the file system
 *	mounted on the device. The superblock is returned once it is thawed
 *	(or immediately if it was not frozen). %NULL is returned if no match
 *	is found.
 */
struct super_block *get_super_thawed(struct block_device *bdev)
{
	while (1) {
		struct super_block *s = get_super(bdev);
629
		if (!s || s->s_writers.frozen == SB_UNFROZEN)
630 631
			return s;
		up_read(&s->s_umount);
632 633
		wait_event(s->s_writers.wait_unfrozen,
			   s->s_writers.frozen == SB_UNFROZEN);
634 635 636 637 638
		put_super(s);
	}
}
EXPORT_SYMBOL(get_super_thawed);

639 640 641 642 643 644
/**
 * get_active_super - get an active reference to the superblock of a device
 * @bdev: device to get the superblock for
 *
 * Scans the superblock list and finds the superblock of the file system
 * mounted on the device given.  Returns the superblock with an active
645
 * reference or %NULL if none was found.
646 647 648 649 650 651 652 653
 */
struct super_block *get_active_super(struct block_device *bdev)
{
	struct super_block *sb;

	if (!bdev)
		return NULL;

654
restart:
655 656
	spin_lock(&sb_lock);
	list_for_each_entry(sb, &super_blocks, s_list) {
657
		if (hlist_unhashed(&sb->s_instances))
658
			continue;
659
		if (sb->s_bdev == bdev) {
Al Viro's avatar
Al Viro committed
660
			if (!grab_super(sb))
661
				goto restart;
Al Viro's avatar
Al Viro committed
662 663
			up_write(&sb->s_umount);
			return sb;
664
		}
665 666 667 668
	}
	spin_unlock(&sb_lock);
	return NULL;
}
Linus Torvalds's avatar
Linus Torvalds committed
669
 
670
struct super_block *user_get_super(dev_t dev)
Linus Torvalds's avatar
Linus Torvalds committed
671
{
672
	struct super_block *sb;
Linus Torvalds's avatar
Linus Torvalds committed
673 674

	spin_lock(&sb_lock);
675 676
rescan:
	list_for_each_entry(sb, &super_blocks, s_list) {
677
		if (hlist_unhashed(&sb->s_instances))
678
			continue;
679 680
		if (sb->s_dev ==  dev) {
			sb->s_count++;
Linus Torvalds's avatar
Linus Torvalds committed
681
			spin_unlock(&sb_lock);
682
			down_read(&sb->s_umount);
683
			/* still alive? */
684
			if (sb->s_root && (sb->s_flags & MS_BORN))
685 686
				return sb;
			up_read(&sb->s_umount);
687
			/* nope, got unmounted */
688
			spin_lock(&sb_lock);
689 690
			__put_super(sb);
			goto rescan;
Linus Torvalds's avatar
Linus Torvalds committed
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
		}
	}
	spin_unlock(&sb_lock);
	return NULL;
}

/**
 *	do_remount_sb - asks filesystem to change mount options.
 *	@sb:	superblock in question
 *	@flags:	numeric part of options
 *	@data:	the rest of options
 *      @force: whether or not to force the change
 *
 *	Alters the mount options of a mounted file system.
 */
int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
{
	int retval;
709
	int remount_ro;
710

711
	if (sb->s_writers.frozen != SB_UNFROZEN)
712 713
		return -EBUSY;

714
#ifdef CONFIG_BLOCK
Linus Torvalds's avatar
Linus Torvalds committed
715 716
	if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev))
		return -EACCES;
717
#endif
718

Linus Torvalds's avatar
Linus Torvalds committed
719 720 721
	if (flags & MS_RDONLY)
		acct_auto_close(sb);
	shrink_dcache_sb(sb);
722
	sync_filesystem(sb);
Linus Torvalds's avatar
Linus Torvalds committed
723

724 725
	remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);

Linus Torvalds's avatar
Linus Torvalds committed
726 727
	/* If we are remounting RDONLY and current sb is read/write,
	   make sure there are no rw files opened */
728
	if (remount_ro) {
729
		if (force) {
Linus Torvalds's avatar
Linus Torvalds committed
730
			mark_files_ro(sb);
731 732 733 734 735
		} else {
			retval = sb_prepare_remount_readonly(sb);
			if (retval)
				return retval;
		}
Linus Torvalds's avatar
Linus Torvalds committed
736 737 738 739
	}

	if (sb->s_op->remount_fs) {
		retval = sb->s_op->remount_fs(sb, &flags, data);
740 741
		if (retval) {
			if (!force)
742
				goto cancel_readonly;
743 744 745 746
			/* If forced remount, go ahead despite any errors */
			WARN(1, "forced remount of a %s fs returned %i\n",
			     sb->s_type->name, retval);
		}
Linus Torvalds's avatar
Linus Torvalds committed
747 748
	}
	sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
749 750 751
	/* Needs to be ordered wrt mnt_is_readonly() */
	smp_wmb();
	sb->s_readonly_remount = 0;
752

753 754 755 756 757 758 759 760 761 762
	/*
	 * Some filesystems modify their metadata via some other path than the
	 * bdev buffer cache (eg. use a private mapping, or directories in
	 * pagecache, etc). Also file data modifications go via their own
	 * mappings. So If we try to mount readonly then copy the filesystem
	 * from bdev, we could get stale data, so invalidate it to give a best
	 * effort at coherency.
	 */
	if (remount_ro && sb->s_bdev)
		invalidate_bdev(sb->s_bdev);
Linus Torvalds's avatar
Linus Torvalds committed
763
	return 0;
764 765 766 767

cancel_readonly:
	sb->s_readonly_remount = 0;
	return retval;
Linus Torvalds's avatar
Linus Torvalds committed
768 769
}

770
static void do_emergency_remount(struct work_struct *work)
Linus Torvalds's avatar
Linus Torvalds committed
771
{
772
	struct super_block *sb, *p = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
773 774

	spin_lock(&sb_lock);
775
	list_for_each_entry(sb, &super_blocks, s_list) {
776
		if (hlist_unhashed(&sb->s_instances))
777
			continue;
Linus Torvalds's avatar
Linus Torvalds committed
778 779
		sb->s_count++;
		spin_unlock(&sb_lock);
780
		down_write(&sb->s_umount);
781 782
		if (sb->s_root && sb->s_bdev && (sb->s_flags & MS_BORN) &&
		    !(sb->s_flags & MS_RDONLY)) {
Linus Torvalds's avatar
Linus Torvalds committed
783 784 785 786 787
			/*
			 * What lock protects sb->s_flags??
			 */
			do_remount_sb(sb, MS_RDONLY, NULL, 1);
		}
788
		up_write(&sb->s_umount);
Linus Torvalds's avatar
Linus Torvalds committed
789
		spin_lock(&sb_lock);
790 791 792
		if (p)
			__put_super(p);
		p = sb;
Linus Torvalds's avatar
Linus Torvalds committed
793
	}
794 795
	if (p)
		__put_super(p);
Linus Torvalds's avatar
Linus Torvalds committed
796
	spin_unlock(&sb_lock);
797
	kfree(work);
Linus Torvalds's avatar
Linus Torvalds committed
798 799 800 801 802
	printk("Emergency Remount complete\n");
}

void emergency_remount(void)
{
803 804 805 806 807 808 809
	struct work_struct *work;

	work = kmalloc(sizeof(*work), GFP_ATOMIC);
	if (work) {
		INIT_WORK(work, do_emergency_remount);
		schedule_work(work);
	}
Linus Torvalds's avatar
Linus Torvalds committed
810 811 812 813 814 815 816
}

/*
 * Unnamed block devices are dummy devices used by virtual
 * filesystems which don't use real block-devices.  -- jrs
 */

817
static DEFINE_IDA(unnamed_dev_ida);
Linus Torvalds's avatar
Linus Torvalds committed
818
static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
819
static int unnamed_dev_start = 0; /* don't bother trying below it */
Linus Torvalds's avatar
Linus Torvalds committed
820

821
int get_anon_bdev(dev_t *p)
Linus Torvalds's avatar
Linus Torvalds committed
822 823 824 825 826
{
	int dev;
	int error;

 retry:
827
	if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
Linus Torvalds's avatar
Linus Torvalds committed
828 829
		return -ENOMEM;
	spin_lock(&unnamed_dev_lock);
830
	error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
831 832
	if (!error)
		unnamed_dev_start = dev + 1;
Linus Torvalds's avatar
Linus Torvalds committed
833 834 835 836 837 838 839
	spin_unlock(&unnamed_dev_lock);
	if (error == -EAGAIN)
		/* We raced and lost with another CPU. */
		goto retry;
	else if (error)
		return -EAGAIN;

840
	if (dev == (1 << MINORBITS)) {
Linus Torvalds's avatar
Linus Torvalds committed
841
		spin_lock(&unnamed_dev_lock);
842
		ida_remove(&unnamed_dev_ida, dev);
843 844
		if (unnamed_dev_start > dev)
			unnamed_dev_start = dev;
Linus Torvalds's avatar
Linus Torvalds committed
845 846 847
		spin_unlock(&unnamed_dev_lock);
		return -EMFILE;
	}
848
	*p = MKDEV(0, dev & MINORMASK);
Linus Torvalds's avatar
Linus Torvalds committed
849 850
	return 0;
}
851
EXPORT_SYMBOL(get_anon_bdev);
Linus Torvalds's avatar
Linus Torvalds committed
852

853
void free_anon_bdev(dev_t dev)
Linus Torvalds's avatar
Linus Torvalds committed
854
{
855
	int slot = MINOR(dev);
Linus Torvalds's avatar
Linus Torvalds committed
856
	spin_lock(&unnamed_dev_lock);
857
	ida_remove(&unnamed_dev_ida, slot);
858 859
	if (slot < unnamed_dev_start)
		unnamed_dev_start = slot;
Linus Torvalds's avatar
Linus Torvalds committed
860 861
	spin_unlock(&unnamed_dev_lock);
}
862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879
EXPORT_SYMBOL(free_anon_bdev);

int set_anon_super(struct super_block *s, void *data)
{
	int error = get_anon_bdev(&s->s_dev);
	if (!error)
		s->s_bdi = &noop_backing_dev_info;
	return error;
}

EXPORT_SYMBOL(set_anon_super);

void kill_anon_super(struct super_block *sb)
{
	dev_t dev = sb->s_dev;
	generic_shutdown_super(sb);
	free_anon_bdev(dev);
}
Linus Torvalds's avatar
Linus Torvalds committed
880 881 882 883 884 885 886 887 888 889 890 891

EXPORT_SYMBOL(kill_anon_super);

void kill_litter_super(struct super_block *sb)
{
	if (sb->s_root)
		d_genocide(sb->s_root);
	kill_anon_super(sb);
}

EXPORT_SYMBOL(kill_litter_super);

892 893 894 895 896 897 898 899 900 901 902
static int ns_test_super(struct super_block *sb, void *data)
{
	return sb->s_fs_info == data;
}

static int ns_set_super(struct super_block *sb, void *data)
{
	sb->s_fs_info = data;
	return set_anon_super(sb, NULL);
}

Al Viro's avatar
Al Viro committed
903 904
struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
	void *data, int (*fill_super)(struct super_block *, void *, int))
905 906 907
{
	struct super_block *sb;

908
	sb = sget(fs_type, ns_test_super, ns_set_super, flags, data);
909
	if (IS_ERR(sb))
Al Viro's avatar
Al Viro committed
910
		return ERR_CAST(sb);
911 912 913 914 915

	if (!sb->s_root) {
		int err;
		err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
		if (err) {
916
			deactivate_locked_super(sb);
Al Viro's avatar
Al Viro committed
917
			return ERR_PTR(err);
918 919 920 921 922
		}

		sb->s_flags |= MS_ACTIVE;
	}

Al Viro's avatar
Al Viro committed
923
	return dget(sb->s_root);
924 925
}

Al Viro's avatar
Al Viro committed
926
EXPORT_SYMBOL(mount_ns);
927

928
#ifdef CONFIG_BLOCK
Linus Torvalds's avatar
Linus Torvalds committed
929 930 931 932
static int set_bdev_super(struct super_block *s, void *data)
{
	s->s_bdev = data;
	s->s_dev = s->s_bdev->bd_dev;
933 934 935 936 937 938

	/*
	 * We set the bdi here to the queue backing, file systems can
	 * overwrite this in ->fill_super()
	 */
	s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
Linus Torvalds's avatar
Linus Torvalds committed
939 940 941 942 943 944 945 946
	return 0;
}

static int test_bdev_super(struct super_block *s, void *data)
{
	return (void *)s->s_bdev == data;
}

Al Viro's avatar
Al Viro committed
947
struct dentry *mount_bdev(struct file_system_type *fs_type,
Linus Torvalds's avatar
Linus Torvalds committed
948
	int flags, const char *dev_name, void *data,
Al Viro's avatar
Al Viro committed
949
	int (*fill_super)(struct super_block *, void *, int))
Linus Torvalds's avatar
Linus Torvalds committed
950 951 952
{
	struct block_device *bdev;
	struct super_block *s;
953
	fmode_t mode = FMODE_READ | FMODE_EXCL;
Linus Torvalds's avatar
Linus Torvalds committed
954 955
	int error = 0;

956 957 958
	if (!(flags & MS_RDONLY))
		mode |= FMODE_WRITE;

959
	bdev = blkdev_get_by_path(dev_name, mode, fs_type);
Linus Torvalds's avatar
Linus Torvalds committed
960
	if (IS_ERR(bdev))
Al Viro's avatar
Al Viro committed
961
		return ERR_CAST(bdev);
Linus Torvalds's avatar
Linus Torvalds committed
962 963 964 965 966 967

	/*
	 * once the super is inserted into the list by sget, s_umount
	 * will protect the lockfs code from trying to start a snapshot
	 * while we are mounting
	 */
968 969 970 971 972 973
	mutex_lock(&bdev->bd_fsfreeze_mutex);
	if (bdev->bd_fsfreeze_count > 0) {
		mutex_unlock(&bdev->bd_fsfreeze_mutex);
		error = -EBUSY;
		goto error_bdev;
	}
974 975
	s = sget(fs_type, test_bdev_super, set_bdev_super, flags | MS_NOSEC,
		 bdev);
976
	mutex_unlock(&bdev->bd_fsfreeze_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
977
	if (IS_ERR(s))
978
		goto error_s;
Linus Torvalds's avatar
Linus Torvalds committed
979 980 981

	if (s->s_root) {
		if ((flags ^ s->s_flags) & MS_RDONLY) {
982
			deactivate_locked_super(s);
983 984
			error = -EBUSY;
			goto error_bdev;
Linus Torvalds's avatar
Linus Torvalds committed
985
		}
986

987 988
		/*
		 * s_umount nests inside bd_mutex during
989 990 991 992
		 * __invalidate_device().  blkdev_put() acquires
		 * bd_mutex and can't be called under s_umount.  Drop
		 * s_umount temporarily.  This is safe as we're
		 * holding an active reference.
993 994
		 */
		up_write(&s->s_umount);
995
		blkdev_put(bdev, mode);
996
		down_write(&s->s_umount);
Linus Torvalds's avatar
Linus Torvalds committed
997 998 999
	} else {
		char b[BDEVNAME_SIZE];

1000
		s->s_mode = mode;
Linus Torvalds's avatar
Linus Torvalds committed
1001
		strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
1002
		sb_set_blocksize(s, block_size(bdev));
1003
		error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
Linus Torvalds's avatar
Linus Torvalds committed
1004
		if (error) {
1005
			deactivate_locked_super(s);
1006
			goto error;
1007
		}
1008 1009

		s->s_flags |= MS_ACTIVE;
1010
		bdev->bd_super = s;
Linus Torvalds's avatar
Linus Torvalds committed
1011 1012
	}

Al Viro's avatar
Al Viro committed
1013
	return dget(s->s_root);
Linus Torvalds's avatar
Linus Torvalds committed
1014

1015 1016 1017
error_s:
	error = PTR_ERR(s);
error_bdev:
1018
	blkdev_put(bdev, mode);
1019
error:
Al Viro's avatar
Al Viro committed
1020 1021 1022 1023
	return ERR_PTR(error);
}
EXPORT_SYMBOL(mount_bdev);

Linus Torvalds's avatar
Linus Torvalds committed
1024 1025 1026
void kill_block_super(struct super_block *sb)
{
	struct block_device *bdev = sb->s_bdev;
1027
	fmode_t mode = sb->s_mode;
Linus Torvalds's avatar
Linus Torvalds committed
1028

1029
	bdev->bd_super = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
1030 1031
	generic_shutdown_super(sb);
	sync_blockdev(bdev);
1032
	WARN_ON_ONCE(!(mode & FMODE_EXCL));
1033
	blkdev_put(bdev, mode | FMODE_EXCL);
Linus Torvalds's avatar
Linus Torvalds committed
1034 1035 1036
}

EXPORT_SYMBOL(kill_block_super);
1037
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1038

Al Viro's avatar
Al Viro committed
1039
struct dentry *mount_nodev(struct file_system_type *fs_type,
Linus Torvalds's avatar
Linus Torvalds committed
1040
	int flags, void *data,
Al Viro's avatar
Al Viro committed
1041
	int (*fill_super)(struct super_block *, void *, int))
Linus Torvalds's avatar
Linus Torvalds committed
1042 1043
{
	int error;
1044
	struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
1045 1046

	if (IS_ERR(s))
Al Viro's avatar
Al Viro committed
1047
		return ERR_CAST(s);
Linus Torvalds's avatar
Linus Torvalds committed
1048

1049
	error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
Linus Torvalds's avatar
Linus Torvalds committed
1050
	if (error) {
1051
		deactivate_locked_super(s);
Al Viro's avatar
Al Viro committed
1052
		return ERR_PTR(error);
Linus Torvalds's avatar
Linus Torvalds committed
1053 1054
	}
	s->s_flags |= MS_ACTIVE;
Al Viro's avatar
Al Viro committed
1055
	return dget(s->s_root);
Linus Torvalds's avatar
Linus Torvalds committed
1056
}
Al Viro's avatar
Al Viro committed
1057 1058
EXPORT_SYMBOL(mount_nodev);

Linus Torvalds's avatar
Linus Torvalds committed
1059 1060 1061 1062 1063
static int compare_single(struct super_block *s, void *p)
{
	return 1;
}

Al Viro's avatar
Al Viro committed
1064
struct dentry *mount_single(struct file_system_type *fs_type,
Linus Torvalds's avatar
Linus Torvalds committed
1065
	int flags, void *data,
Al Viro's avatar
Al Viro committed
1066
	int (*fill_super)(struct super_block *, void *, int))
Linus Torvalds's avatar
Linus Torvalds committed
1067 1068 1069 1070
{
	struct super_block *s;
	int error;

1071
	s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
1072
	if (IS_ERR(s))
Al Viro's avatar
Al Viro committed
1073
		return ERR_CAST(s);
Linus Torvalds's avatar
Linus Torvalds committed
1074
	if (!s->s_root) {
1075
		error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
Linus Torvalds's avatar
Linus Torvalds committed
1076
		if (error) {
1077
			deactivate_locked_super(s);
Al Viro's avatar
Al Viro committed
1078
			return ERR_PTR(error);
Linus Torvalds's avatar
Linus Torvalds committed
1079 1080
		}
		s->s_flags |= MS_ACTIVE;
1081 1082
	} else {
		do_remount_sb(s, flags, data, 0);
Linus Torvalds's avatar
Linus Torvalds committed
1083
	}
Al Viro's avatar
Al Viro committed
1084 1085 1086 1087
	return dget(s->s_root);
}
EXPORT_SYMBOL(mount_single);

1088 1089
struct dentry *
mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
Linus Torvalds's avatar
Linus Torvalds committed
1090
{
1091
	struct dentry *root;
1092
	struct super_block *sb;
Linus Torvalds's avatar
Linus Torvalds committed
1093
	char *secdata = NULL;
1094
	int error = -ENOMEM;
1095

1096
	if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
Linus Torvalds's avatar
Linus Torvalds committed
1097
		secdata = alloc_secdata();
1098
		if (!secdata)
1099
			goto out;
Linus Torvalds's avatar
Linus Torvalds committed
1100

1101
		error = security_sb_copy_data(data, secdata);
1102
		if (error)
Linus Torvalds's avatar
Linus Torvalds committed
1103 1104 1105
			goto out_free_secdata;
	}

Al Viro's avatar
Al Viro committed
1106 1107 1108 1109
	root = type->mount(type, flags, name, data);
	if (IS_ERR(root)) {
		error = PTR_ERR(root);
		goto out_free_secdata;
1110
	}
1111 1112 1113
	sb = root->d_sb;
	BUG_ON(!sb);
	WARN_ON(!sb->s_bdi);
1114
	WARN_ON(sb->s_bdi == &default_backing_dev_info);
1115
	sb->s_flags |= MS_BORN;
1116

1117
	error = security_sb_kern_mount(sb, flags, secdata);
1118 1119
	if (error)
		goto out_sb;
1120

1121 1122 1123 1124
	/*
	 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
	 * but s_maxbytes was an unsigned long long for many releases. Throw
	 * this warning for a little while to try and catch filesystems that
1125
	 * violate this rule.
1126
	 */
1127 1128
	WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
		"negative value (%lld)\n", type->name, sb->s_maxbytes);
1129

1130
	up_write(&sb->s_umount);
1131
	free_secdata(secdata);
1132
	return root;
Linus Torvalds's avatar
Linus Torvalds committed
1133
out_sb:
1134 1135
	dput(root);
	deactivate_locked_super(sb);
Linus Torvalds's avatar
Linus Torvalds committed
1136 1137 1138
out_free_secdata:
	free_secdata(secdata);
out:
1139
	return ERR_PTR(error);
Linus Torvalds's avatar
Linus Torvalds committed
1140 1141
}

1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255
/*
 * This is an internal function, please use sb_end_{write,pagefault,intwrite}
 * instead.
 */
void __sb_end_write(struct super_block *sb, int level)
{
	percpu_counter_dec(&sb->s_writers.counter[level-1]);
	/*
	 * Make sure s_writers are updated before we wake up waiters in
	 * freeze_super().
	 */
	smp_mb();
	if (waitqueue_active(&sb->s_writers.wait))
		wake_up(&sb->s_writers.wait);
	rwsem_release(&sb->s_writers.lock_map[level-1], 1, _RET_IP_);
}
EXPORT_SYMBOL(__sb_end_write);

#ifdef CONFIG_LOCKDEP
/*
 * We want lockdep to tell us about possible deadlocks with freezing but
 * it's it bit tricky to properly instrument it. Getting a freeze protection
 * works as getting a read lock but there are subtle problems. XFS for example
 * gets freeze protection on internal level twice in some cases, which is OK
 * only because we already hold a freeze protection also on higher level. Due
 * to these cases we have to tell lockdep we are doing trylock when we
 * already hold a freeze protection for a higher freeze level.
 */
static void acquire_freeze_lock(struct super_block *sb, int level, bool trylock,
				unsigned long ip)
{
	int i;

	if (!trylock) {
		for (i = 0; i < level - 1; i++)
			if (lock_is_held(&sb->s_writers.lock_map[i])) {
				trylock = true;
				break;
			}
	}
	rwsem_acquire_read(&sb->s_writers.lock_map[level-1], 0, trylock, ip);
}
#endif

/*
 * This is an internal function, please use sb_start_{write,pagefault,intwrite}
 * instead.
 */
int __sb_start_write(struct super_block *sb, int level, bool wait)
{
retry:
	if (unlikely(sb->s_writers.frozen >= level)) {
		if (!wait)
			return 0;
		wait_event(sb->s_writers.wait_unfrozen,
			   sb->s_writers.frozen < level);
	}

#ifdef CONFIG_LOCKDEP
	acquire_freeze_lock(sb, level, !wait, _RET_IP_);
#endif
	percpu_counter_inc(&sb->s_writers.counter[level-1]);
	/*
	 * Make sure counter is updated before we check for frozen.
	 * freeze_super() first sets frozen and then checks the counter.
	 */
	smp_mb();
	if (unlikely(sb->s_writers.frozen >= level)) {
		__sb_end_write(sb, level);
		goto retry;
	}
	return 1;
}
EXPORT_SYMBOL(__sb_start_write);

/**
 * sb_wait_write - wait until all writers to given file system finish
 * @sb: the super for which we wait
 * @level: type of writers we wait for (normal vs page fault)
 *
 * This function waits until there are no writers of given type to given file
 * system. Caller of this function should make sure there can be no new writers
 * of type @level before calling this function. Otherwise this function can
 * livelock.
 */
static void sb_wait_write(struct super_block *sb, int level)
{
	s64 writers;

	/*
	 * We just cycle-through lockdep here so that it does not complain
	 * about returning with lock to userspace
	 */
	rwsem_acquire(&sb->s_writers.lock_map[level-1], 0, 0, _THIS_IP_);
	rwsem_release(&sb->s_writers.lock_map[level-1], 1, _THIS_IP_);

	do {
		DEFINE_WAIT(wait);

		/*
		 * We use a barrier in prepare_to_wait() to separate setting
		 * of frozen and checking of the counter
		 */
		prepare_to_wait(&sb->s_writers.wait, &wait,
				TASK_UNINTERRUPTIBLE);

		writers = percpu_counter_sum(&sb->s_writers.counter[level-1]);
		if (writers)
			schedule();

		finish_wait(&sb->s_writers.wait, &wait);
	} while (writers);
}

1256
/**
1257 1258
 * freeze_super - lock the filesystem and force it into a consistent state
 * @sb: the super to lock
1259 1260 1261 1262
 *
 * Syncs the super to make sure the filesystem is consistent and calls the fs's
 * freeze_fs.  Subsequent calls to this without first thawing the fs will return
 * -EBUSY.
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
 *
 * During this function, sb->s_writers.frozen goes through these values:
 *
 * SB_UNFROZEN: File system is normal, all writes progress as usual.
 *
 * SB_FREEZE_WRITE: The file system is in the process of being frozen.  New
 * writes should be blocked, though page faults are still allowed. We wait for
 * all writes to complete and then proceed to the next stage.
 *
 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
 * but internal fs threads can still modify the filesystem (although they
 * should not dirty new pages or inodes), writeback can run etc. After waiting
 * for all running page faults we sync the filesystem which will clean all
 * dirty pages and inodes (no new dirty pages or inodes can be created when
 * sync is running).
 *
 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
 * modification are blocked (e.g. XFS preallocation truncation on inode
 * reclaim). This is usually implemented by blocking new transactions for
 * filesystems that have them and need this additional guard. After all
 * internal writers are finished we call ->freeze_fs() to finish filesystem
 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
 *
 * sb->s_writers.frozen is protected by sb->s_umount.
1288 1289 1290 1291 1292 1293 1294
 */
int freeze_super(struct super_block *sb)
{
	int ret;

	atomic_inc(&sb->s_active);
	down_write(&sb->s_umount);
1295
	if (sb->s_writers.frozen != SB_UNFROZEN) {
1296 1297 1298 1299
		deactivate_locked_super(sb);
		return -EBUSY;
	}

1300 1301 1302 1303 1304
	if (!(sb->s_flags & MS_BORN)) {
		up_write(&sb->s_umount);
		return 0;	/* sic - it's "nothing to do" */
	}

1305
	if (sb->s_flags & MS_RDONLY) {
1306 1307
		/* Nothing to do really... */
		sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1308 1309 1310 1311
		up_write(&sb->s_umount);
		return 0;
	}

1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
	/* From now on, no new normal writers can start */
	sb->s_writers.frozen = SB_FREEZE_WRITE;
	smp_wmb();

	/* Release s_umount to preserve sb_start_write -> s_umount ordering */
	up_write(&sb->s_umount);

	sb_wait_write(sb, SB_FREEZE_WRITE);

	/* Now we go and block page faults... */
	down_write(&sb->s_umount);
	sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
1324 1325
	smp_wmb();

1326 1327 1328
	sb_wait_write(sb, SB_FREEZE_PAGEFAULT);

	/* All writers are done so after syncing there won't be dirty data */
1329 1330
	sync_filesystem(sb);

1331 1332
	/* Now wait for internal filesystem counter */
	sb->s_writers.frozen = SB_FREEZE_FS;
1333
	smp_wmb();
1334
	sb_wait_write(sb, SB_FREEZE_FS);
1335 1336 1337 1338 1339 1340

	if (sb->s_op->freeze_fs) {
		ret = sb->s_op->freeze_fs(sb);
		if (ret) {
			printk(KERN_ERR
				"VFS:Filesystem freeze failed\n");
1341
			sb->s_writers.frozen = SB_UNFROZEN;
1342
			smp_wmb();
1343
			wake_up(&sb->s_writers.wait_unfrozen);
1344 1345 1346 1347
			deactivate_locked_super(sb);
			return ret;
		}
	}
1348 1349 1350 1351 1352
	/*
	 * This is just for debugging purposes so that fs can warn if it
	 * sees write activity when frozen is set to SB_FREEZE_COMPLETE.
	 */
	sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
	up_write(&sb->s_umount);
	return 0;
}
EXPORT_SYMBOL(freeze_super);

/**
 * thaw_super -- unlock filesystem
 * @sb: the super to thaw
 *
 * Unlocks the filesystem and marks it writeable again after freeze_super().
 */
int thaw_super(struct super_block *sb)
{
	int error;

	down_write(&sb->s_umount);
1369
	if (sb->s_writers.frozen == SB_UNFROZEN) {
1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
		up_write(&sb->s_umount);
		return -EINVAL;
	}

	if (sb->s_flags & MS_RDONLY)
		goto out;

	if (sb->s_op->unfreeze_fs) {
		error = sb->s_op->unfreeze_fs(sb);
		if (error) {
			printk(KERN_ERR
				"VFS:Filesystem thaw failed\n");
			up_write(&sb->s_umount);
			return error;
		}
	}

out:
1388
	sb->s_writers.frozen = SB_UNFROZEN;
1389
	smp_wmb();
1390
	wake_up(&sb->s_writers.wait_unfrozen);
1391 1392 1393 1394 1395
	deactivate_locked_super(sb);

	return 0;
}
EXPORT_SYMBOL(thaw_super);