swapfile.c 42.3 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
/*
 *  linux/mm/swapfile.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 */

#include <linux/config.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/namei.h>
#include <linux/shm.h>
#include <linux/blkdev.h>
#include <linux/writeback.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/backing-dev.h>
28
#include <linux/capability.h>
Linus Torvalds's avatar
Linus Torvalds committed
29
30
31
32
33
34
#include <linux/syscalls.h>

#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <linux/swapops.h>

35
DEFINE_SPINLOCK(swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
unsigned int nr_swapfiles;
long total_swap_pages;
static int swap_overflow;

static const char Bad_file[] = "Bad swap file entry ";
static const char Unused_file[] = "Unused swap file entry ";
static const char Bad_offset[] = "Bad swap offset entry ";
static const char Unused_offset[] = "Unused swap offset entry ";

struct swap_list_t swap_list = {-1, -1};

struct swap_info_struct swap_info[MAX_SWAPFILES];

static DECLARE_MUTEX(swapon_sem);

/*
 * We need this because the bdev->unplug_fn can sleep and we cannot
53
 * hold swap_lock while calling the unplug_fn. And swap_lock
Linus Torvalds's avatar
Linus Torvalds committed
54
55
56
57
58
59
60
61
62
 * cannot be turned into a semaphore.
 */
static DECLARE_RWSEM(swap_unplug_sem);

void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
{
	swp_entry_t entry;

	down_read(&swap_unplug_sem);
63
	entry.val = page_private(page);
Linus Torvalds's avatar
Linus Torvalds committed
64
65
66
67
68
69
70
	if (PageSwapCache(page)) {
		struct block_device *bdev = swap_info[swp_type(entry)].bdev;
		struct backing_dev_info *bdi;

		/*
		 * If the page is removed from swapcache from under us (with a
		 * racy try_to_unuse/swapoff) we need an additional reference
71
72
		 * count to avoid reading garbage from page_private(page) above.
		 * If the WARN_ON triggers during a swapoff it maybe the race
Linus Torvalds's avatar
Linus Torvalds committed
73
74
75
76
77
78
		 * condition and it's harmless. However if it triggers without
		 * swapoff it signals a problem.
		 */
		WARN_ON(page_count(page) <= 1);

		bdi = bdev->bd_inode->i_mapping->backing_dev_info;
McMullan, Jason's avatar
McMullan, Jason committed
79
		blk_run_backing_dev(bdi, page);
Linus Torvalds's avatar
Linus Torvalds committed
80
81
82
83
	}
	up_read(&swap_unplug_sem);
}

84
85
86
#define SWAPFILE_CLUSTER	256
#define LATENCY_LIMIT		256

87
static inline unsigned long scan_swap_map(struct swap_info_struct *si)
Linus Torvalds's avatar
Linus Torvalds committed
88
{
89
	unsigned long offset, last_in_cluster;
90
	int latency_ration = LATENCY_LIMIT;
91

Linus Torvalds's avatar
Linus Torvalds committed
92
	/* 
93
94
95
96
97
98
99
100
101
	 * We try to cluster swap pages by allocating them sequentially
	 * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
	 * way, however, we resort to first-free allocation, starting
	 * a new cluster.  This prevents us from scattering swap pages
	 * all over the entire swap partition, so that we reduce
	 * overall disk seek times between swap pages.  -- sct
	 * But we do now try to find an empty cluster.  -Andrea
	 */

102
	si->flags += SWP_SCANNING;
103
104
105
106
	if (unlikely(!si->cluster_nr)) {
		si->cluster_nr = SWAPFILE_CLUSTER - 1;
		if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER)
			goto lowest;
107
		spin_unlock(&swap_lock);
108
109
110
111
112
113

		offset = si->lowest_bit;
		last_in_cluster = offset + SWAPFILE_CLUSTER - 1;

		/* Locate the first empty (unaligned) cluster */
		for (; last_in_cluster <= si->highest_bit; offset++) {
Linus Torvalds's avatar
Linus Torvalds committed
114
			if (si->swap_map[offset])
115
116
				last_in_cluster = offset + SWAPFILE_CLUSTER;
			else if (offset == last_in_cluster) {
117
				spin_lock(&swap_lock);
118
119
				si->cluster_next = offset-SWAPFILE_CLUSTER-1;
				goto cluster;
Linus Torvalds's avatar
Linus Torvalds committed
120
			}
121
122
123
124
			if (unlikely(--latency_ration < 0)) {
				cond_resched();
				latency_ration = LATENCY_LIMIT;
			}
125
		}
126
		spin_lock(&swap_lock);
127
		goto lowest;
Linus Torvalds's avatar
Linus Torvalds committed
128
	}
129
130
131
132
133
134

	si->cluster_nr--;
cluster:
	offset = si->cluster_next;
	if (offset > si->highest_bit)
lowest:		offset = si->lowest_bit;
135
136
checks:	if (!(si->flags & SWP_WRITEOK))
		goto no_page;
137
138
139
	if (!si->highest_bit)
		goto no_page;
	if (!si->swap_map[offset]) {
140
		if (offset == si->lowest_bit)
Linus Torvalds's avatar
Linus Torvalds committed
141
142
143
			si->lowest_bit++;
		if (offset == si->highest_bit)
			si->highest_bit--;
144
145
		si->inuse_pages++;
		if (si->inuse_pages == si->pages) {
Linus Torvalds's avatar
Linus Torvalds committed
146
147
148
149
			si->lowest_bit = si->max;
			si->highest_bit = 0;
		}
		si->swap_map[offset] = 1;
150
		si->cluster_next = offset + 1;
151
		si->flags -= SWP_SCANNING;
Linus Torvalds's avatar
Linus Torvalds committed
152
153
		return offset;
	}
154

155
	spin_unlock(&swap_lock);
156
	while (++offset <= si->highest_bit) {
157
		if (!si->swap_map[offset]) {
158
			spin_lock(&swap_lock);
159
160
			goto checks;
		}
161
162
163
164
		if (unlikely(--latency_ration < 0)) {
			cond_resched();
			latency_ration = LATENCY_LIMIT;
		}
165
	}
166
	spin_lock(&swap_lock);
167
168
169
	goto lowest;

no_page:
170
	si->flags -= SWP_SCANNING;
Linus Torvalds's avatar
Linus Torvalds committed
171
172
173
174
175
	return 0;
}

swp_entry_t get_swap_page(void)
{
176
177
178
179
	struct swap_info_struct *si;
	pgoff_t offset;
	int type, next;
	int wrapped = 0;
Linus Torvalds's avatar
Linus Torvalds committed
180

181
	spin_lock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
182
	if (nr_swap_pages <= 0)
183
184
185
186
187
188
189
190
191
192
		goto noswap;
	nr_swap_pages--;

	for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
		si = swap_info + type;
		next = si->next;
		if (next < 0 ||
		    (!wrapped && si->prio != swap_info[next].prio)) {
			next = swap_list.head;
			wrapped++;
Linus Torvalds's avatar
Linus Torvalds committed
193
		}
194
195
196
197
198
199
200
201

		if (!si->highest_bit)
			continue;
		if (!(si->flags & SWP_WRITEOK))
			continue;

		swap_list.next = next;
		offset = scan_swap_map(si);
202
203
		if (offset) {
			spin_unlock(&swap_lock);
204
			return swp_entry(type, offset);
205
		}
206
		next = swap_list.next;
Linus Torvalds's avatar
Linus Torvalds committed
207
	}
208
209
210

	nr_swap_pages++;
noswap:
211
	spin_unlock(&swap_lock);
212
	return (swp_entry_t) {0};
Linus Torvalds's avatar
Linus Torvalds committed
213
214
}

215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
swp_entry_t get_swap_page_of_type(int type)
{
	struct swap_info_struct *si;
	pgoff_t offset;

	spin_lock(&swap_lock);
	si = swap_info + type;
	if (si->flags & SWP_WRITEOK) {
		nr_swap_pages--;
		offset = scan_swap_map(si);
		if (offset) {
			spin_unlock(&swap_lock);
			return swp_entry(type, offset);
		}
		nr_swap_pages++;
	}
	spin_unlock(&swap_lock);
	return (swp_entry_t) {0};
}

Linus Torvalds's avatar
Linus Torvalds committed
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
static struct swap_info_struct * swap_info_get(swp_entry_t entry)
{
	struct swap_info_struct * p;
	unsigned long offset, type;

	if (!entry.val)
		goto out;
	type = swp_type(entry);
	if (type >= nr_swapfiles)
		goto bad_nofile;
	p = & swap_info[type];
	if (!(p->flags & SWP_USED))
		goto bad_device;
	offset = swp_offset(entry);
	if (offset >= p->max)
		goto bad_offset;
	if (!p->swap_map[offset])
		goto bad_free;
253
	spin_lock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
	return p;

bad_free:
	printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
	goto out;
bad_offset:
	printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
	goto out;
bad_device:
	printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
	goto out;
bad_nofile:
	printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
out:
	return NULL;
}	

static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
{
	int count = p->swap_map[offset];

	if (count < SWAP_MAP_MAX) {
		count--;
		p->swap_map[offset] = count;
		if (!count) {
			if (offset < p->lowest_bit)
				p->lowest_bit = offset;
			if (offset > p->highest_bit)
				p->highest_bit = offset;
283
284
			if (p->prio > swap_info[swap_list.next].prio)
				swap_list.next = p - swap_info;
Linus Torvalds's avatar
Linus Torvalds committed
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
			nr_swap_pages++;
			p->inuse_pages--;
		}
	}
	return count;
}

/*
 * Caller has made sure that the swapdevice corresponding to entry
 * is still around or has not been recycled.
 */
void swap_free(swp_entry_t entry)
{
	struct swap_info_struct * p;

	p = swap_info_get(entry);
	if (p) {
		swap_entry_free(p, swp_offset(entry));
303
		spin_unlock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
304
305
306
307
	}
}

/*
308
 * How many references to page are currently swapped out?
Linus Torvalds's avatar
Linus Torvalds committed
309
 */
310
static inline int page_swapcount(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
311
{
312
313
	int count = 0;
	struct swap_info_struct *p;
Linus Torvalds's avatar
Linus Torvalds committed
314
315
	swp_entry_t entry;

316
	entry.val = page_private(page);
Linus Torvalds's avatar
Linus Torvalds committed
317
318
	p = swap_info_get(entry);
	if (p) {
319
320
		/* Subtract the 1 for the swap cache itself */
		count = p->swap_map[swp_offset(entry)] - 1;
321
		spin_unlock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
322
	}
323
	return count;
Linus Torvalds's avatar
Linus Torvalds committed
324
325
326
327
328
329
330
331
}

/*
 * We can use this swap cache entry directly
 * if there are no other references to it.
 */
int can_share_swap_page(struct page *page)
{
332
333
334
335
336
337
338
	int count;

	BUG_ON(!PageLocked(page));
	count = page_mapcount(page);
	if (count <= 1 && PageSwapCache(page))
		count += page_swapcount(page);
	return count == 1;
Linus Torvalds's avatar
Linus Torvalds committed
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
}

/*
 * Work out if there are any other processes sharing this
 * swap cache page. Free it if you can. Return success.
 */
int remove_exclusive_swap_page(struct page *page)
{
	int retval;
	struct swap_info_struct * p;
	swp_entry_t entry;

	BUG_ON(PagePrivate(page));
	BUG_ON(!PageLocked(page));

	if (!PageSwapCache(page))
		return 0;
	if (PageWriteback(page))
		return 0;
	if (page_count(page) != 2) /* 2: us + cache */
		return 0;

361
	entry.val = page_private(page);
Linus Torvalds's avatar
Linus Torvalds committed
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
	p = swap_info_get(entry);
	if (!p)
		return 0;

	/* Is the only swap cache user the cache itself? */
	retval = 0;
	if (p->swap_map[swp_offset(entry)] == 1) {
		/* Recheck the page count with the swapcache lock held.. */
		write_lock_irq(&swapper_space.tree_lock);
		if ((page_count(page) == 2) && !PageWriteback(page)) {
			__delete_from_swap_cache(page);
			SetPageDirty(page);
			retval = 1;
		}
		write_unlock_irq(&swapper_space.tree_lock);
	}
378
	spin_unlock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400

	if (retval) {
		swap_free(entry);
		page_cache_release(page);
	}

	return retval;
}

/*
 * Free the swap entry like above, but also try to
 * free the page cache entry if it is the last user.
 */
void free_swap_and_cache(swp_entry_t entry)
{
	struct swap_info_struct * p;
	struct page *page = NULL;

	p = swap_info_get(entry);
	if (p) {
		if (swap_entry_free(p, swp_offset(entry)) == 1)
			page = find_trylock_page(&swapper_space, entry.val);
401
		spin_unlock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
	}
	if (page) {
		int one_user;

		BUG_ON(PagePrivate(page));
		page_cache_get(page);
		one_user = (page_count(page) == 2);
		/* Only cache user (+us), or swap space full? Free it! */
		if (!PageWriteback(page) && (one_user || vm_swap_full())) {
			delete_from_swap_cache(page);
			SetPageDirty(page);
		}
		unlock_page(page);
		page_cache_release(page);
	}
}

/*
420
421
422
 * No need to decide whether this PTE shares the swap entry with others,
 * just let do_wp_page work it out if a write is requested later - to
 * force COW, vm_page_prot omits write permission from any private vma.
Linus Torvalds's avatar
Linus Torvalds committed
423
424
425
426
 */
static void unuse_pte(struct vm_area_struct *vma, pte_t *pte,
		unsigned long addr, swp_entry_t entry, struct page *page)
{
427
	inc_mm_counter(vma->vm_mm, anon_rss);
Linus Torvalds's avatar
Linus Torvalds committed
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
	get_page(page);
	set_pte_at(vma->vm_mm, addr, pte,
		   pte_mkold(mk_pte(page, vma->vm_page_prot)));
	page_add_anon_rmap(page, vma, addr);
	swap_free(entry);
	/*
	 * Move the page to the active list so it is not
	 * immediately swapped out again after swapon.
	 */
	activate_page(page);
}

static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
				unsigned long addr, unsigned long end,
				swp_entry_t entry, struct page *page)
{
	pte_t swp_pte = swp_entry_to_pte(entry);
445
446
447
	pte_t *pte;
	spinlock_t *ptl;
	int found = 0;
Linus Torvalds's avatar
Linus Torvalds committed
448

449
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
Linus Torvalds's avatar
Linus Torvalds committed
450
451
452
453
454
455
	do {
		/*
		 * swapoff spends a _lot_ of time in this loop!
		 * Test inline before going to call unuse_pte.
		 */
		if (unlikely(pte_same(*pte, swp_pte))) {
456
457
458
			unuse_pte(vma, pte++, addr, entry, page);
			found = 1;
			break;
Linus Torvalds's avatar
Linus Torvalds committed
459
460
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
461
462
	pte_unmap_unlock(pte - 1, ptl);
	return found;
Linus Torvalds's avatar
Linus Torvalds committed
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
}

static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
				unsigned long addr, unsigned long end,
				swp_entry_t entry, struct page *page)
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (pmd_none_or_clear_bad(pmd))
			continue;
		if (unuse_pte_range(vma, pmd, addr, next, entry, page))
			return 1;
	} while (pmd++, addr = next, addr != end);
	return 0;
}

static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
				unsigned long addr, unsigned long end,
				swp_entry_t entry, struct page *page)
{
	pud_t *pud;
	unsigned long next;

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
		if (unuse_pmd_range(vma, pud, addr, next, entry, page))
			return 1;
	} while (pud++, addr = next, addr != end);
	return 0;
}

static int unuse_vma(struct vm_area_struct *vma,
				swp_entry_t entry, struct page *page)
{
	pgd_t *pgd;
	unsigned long addr, end, next;

	if (page->mapping) {
		addr = page_address_in_vma(page, vma);
		if (addr == -EFAULT)
			return 0;
		else
			end = addr + PAGE_SIZE;
	} else {
		addr = vma->vm_start;
		end = vma->vm_end;
	}

	pgd = pgd_offset(vma->vm_mm, addr);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
		if (unuse_pud_range(vma, pgd, addr, next, entry, page))
			return 1;
	} while (pgd++, addr = next, addr != end);
	return 0;
}

static int unuse_mm(struct mm_struct *mm,
				swp_entry_t entry, struct page *page)
{
	struct vm_area_struct *vma;

	if (!down_read_trylock(&mm->mmap_sem)) {
		/*
536
537
		 * Activate page so shrink_cache is unlikely to unmap its
		 * ptes while lock is dropped, so swapoff can make progress.
Linus Torvalds's avatar
Linus Torvalds committed
538
		 */
539
		activate_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
		unlock_page(page);
		down_read(&mm->mmap_sem);
		lock_page(page);
	}
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		if (vma->anon_vma && unuse_vma(vma, entry, page))
			break;
	}
	up_read(&mm->mmap_sem);
	/*
	 * Currently unuse_mm cannot fail, but leave error handling
	 * at call sites for now, since we change it from time to time.
	 */
	return 0;
}

/*
 * Scan swap_map from current position to next entry still in use.
 * Recycle to start on reaching the end, returning 0 when empty.
 */
560
561
static unsigned int find_next_to_unuse(struct swap_info_struct *si,
					unsigned int prev)
Linus Torvalds's avatar
Linus Torvalds committed
562
{
563
564
	unsigned int max = si->max;
	unsigned int i = prev;
Linus Torvalds's avatar
Linus Torvalds committed
565
566
567
	int count;

	/*
568
	 * No need for swap_lock here: we're just looking
Linus Torvalds's avatar
Linus Torvalds committed
569
570
	 * for whether an entry is in use, not modifying it; false
	 * hits are okay, and sys_swapoff() has already prevented new
571
	 * allocations from this area (while holding swap_lock).
Linus Torvalds's avatar
Linus Torvalds committed
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
	 */
	for (;;) {
		if (++i >= max) {
			if (!prev) {
				i = 0;
				break;
			}
			/*
			 * No entries in use at top of swap_map,
			 * loop back to start and recheck there.
			 */
			max = prev + 1;
			prev = 0;
			i = 1;
		}
		count = si->swap_map[i];
		if (count && count != SWAP_MAP_BAD)
			break;
	}
	return i;
}

/*
 * We completely avoid races by reading each swap page in advance,
 * and then search for the process using it.  All the necessary
 * page table adjustments can then be made atomically.
 */
static int try_to_unuse(unsigned int type)
{
	struct swap_info_struct * si = &swap_info[type];
	struct mm_struct *start_mm;
	unsigned short *swap_map;
	unsigned short swcount;
	struct page *page;
	swp_entry_t entry;
607
	unsigned int i = 0;
Linus Torvalds's avatar
Linus Torvalds committed
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
	int retval = 0;
	int reset_overflow = 0;
	int shmem;

	/*
	 * When searching mms for an entry, a good strategy is to
	 * start at the first mm we freed the previous entry from
	 * (though actually we don't notice whether we or coincidence
	 * freed the entry).  Initialize this start_mm with a hold.
	 *
	 * A simpler strategy would be to start at the last mm we
	 * freed the previous entry from; but that would take less
	 * advantage of mmlist ordering, which clusters forked mms
	 * together, child after parent.  If we race with dup_mmap(), we
	 * prefer to resolve parent before child, lest we miss entries
	 * duplicated after we scanned child: using last mm would invert
	 * that.  Though it's only a serious concern when an overflowed
	 * swap count is reset from SWAP_MAP_MAX, preventing a rescan.
	 */
	start_mm = &init_mm;
	atomic_inc(&init_mm.mm_users);

	/*
	 * Keep on scanning until all entries have gone.  Usually,
	 * one pass through swap_map is enough, but not necessarily:
	 * there are races when an instance of an entry might be missed.
	 */
	while ((i = find_next_to_unuse(si, i)) != 0) {
		if (signal_pending(current)) {
			retval = -EINTR;
			break;
		}

		/* 
		 * Get a page for the entry, using the existing swap
		 * cache page if there is one.  Otherwise, get a clean
		 * page and read the swap into it. 
		 */
		swap_map = &si->swap_map[i];
		entry = swp_entry(type, i);
		page = read_swap_cache_async(entry, NULL, 0);
		if (!page) {
			/*
			 * Either swap_duplicate() failed because entry
			 * has been freed independently, and will not be
			 * reused since sys_swapoff() already disabled
			 * allocation from here, or alloc_page() failed.
			 */
			if (!*swap_map)
				continue;
			retval = -ENOMEM;
			break;
		}

		/*
		 * Don't hold on to start_mm if it looks like exiting.
		 */
		if (atomic_read(&start_mm->mm_users) == 1) {
			mmput(start_mm);
			start_mm = &init_mm;
			atomic_inc(&init_mm.mm_users);
		}

		/*
		 * Wait for and lock page.  When do_swap_page races with
		 * try_to_unuse, do_swap_page can handle the fault much
		 * faster than try_to_unuse can locate the entry.  This
		 * apparently redundant "wait_on_page_locked" lets try_to_unuse
		 * defer to do_swap_page in such a case - in some tests,
		 * do_swap_page and try_to_unuse repeatedly compete.
		 */
		wait_on_page_locked(page);
		wait_on_page_writeback(page);
		lock_page(page);
		wait_on_page_writeback(page);

		/*
		 * Remove all references to entry.
		 * Whenever we reach init_mm, there's no address space
		 * to search, but use it as a reminder to search shmem.
		 */
		shmem = 0;
		swcount = *swap_map;
		if (swcount > 1) {
			if (start_mm == &init_mm)
				shmem = shmem_unuse(entry, page);
			else
				retval = unuse_mm(start_mm, entry, page);
		}
		if (*swap_map > 1) {
			int set_start_mm = (*swap_map >= swcount);
			struct list_head *p = &start_mm->mmlist;
			struct mm_struct *new_start_mm = start_mm;
			struct mm_struct *prev_mm = start_mm;
			struct mm_struct *mm;

			atomic_inc(&new_start_mm->mm_users);
			atomic_inc(&prev_mm->mm_users);
			spin_lock(&mmlist_lock);
			while (*swap_map > 1 && !retval &&
					(p = p->next) != &start_mm->mmlist) {
				mm = list_entry(p, struct mm_struct, mmlist);
				if (atomic_inc_return(&mm->mm_users) == 1) {
					atomic_dec(&mm->mm_users);
					continue;
				}
				spin_unlock(&mmlist_lock);
				mmput(prev_mm);
				prev_mm = mm;

				cond_resched();

				swcount = *swap_map;
				if (swcount <= 1)
					;
				else if (mm == &init_mm) {
					set_start_mm = 1;
					shmem = shmem_unuse(entry, page);
				} else
					retval = unuse_mm(mm, entry, page);
				if (set_start_mm && *swap_map < swcount) {
					mmput(new_start_mm);
					atomic_inc(&mm->mm_users);
					new_start_mm = mm;
					set_start_mm = 0;
				}
				spin_lock(&mmlist_lock);
			}
			spin_unlock(&mmlist_lock);
			mmput(prev_mm);
			mmput(start_mm);
			start_mm = new_start_mm;
		}
		if (retval) {
			unlock_page(page);
			page_cache_release(page);
			break;
		}

		/*
		 * How could swap count reach 0x7fff when the maximum
		 * pid is 0x7fff, and there's no way to repeat a swap
		 * page within an mm (except in shmem, where it's the
		 * shared object which takes the reference count)?
		 * We believe SWAP_MAP_MAX cannot occur in Linux 2.4.
		 *
		 * If that's wrong, then we should worry more about
		 * exit_mmap() and do_munmap() cases described above:
		 * we might be resetting SWAP_MAP_MAX too early here.
		 * We know "Undead"s can happen, they're okay, so don't
		 * report them; but do report if we reset SWAP_MAP_MAX.
		 */
		if (*swap_map == SWAP_MAP_MAX) {
761
			spin_lock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
762
			*swap_map = 1;
763
			spin_unlock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
			reset_overflow = 1;
		}

		/*
		 * If a reference remains (rare), we would like to leave
		 * the page in the swap cache; but try_to_unmap could
		 * then re-duplicate the entry once we drop page lock,
		 * so we might loop indefinitely; also, that page could
		 * not be swapped out to other storage meanwhile.  So:
		 * delete from cache even if there's another reference,
		 * after ensuring that the data has been saved to disk -
		 * since if the reference remains (rarer), it will be
		 * read from disk into another page.  Splitting into two
		 * pages would be incorrect if swap supported "shared
		 * private" pages, but they are handled by tmpfs files.
		 *
		 * Note shmem_unuse already deleted a swappage from
		 * the swap cache, unless the move to filepage failed:
		 * in which case it left swappage in cache, lowered its
		 * swap count to pass quickly through the loops above,
		 * and now we must reincrement count to try again later.
		 */
		if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
			struct writeback_control wbc = {
				.sync_mode = WB_SYNC_NONE,
			};

			swap_writepage(page, &wbc);
			lock_page(page);
			wait_on_page_writeback(page);
		}
		if (PageSwapCache(page)) {
			if (shmem)
				swap_duplicate(entry);
			else
				delete_from_swap_cache(page);
		}

		/*
		 * So we could skip searching mms once swap count went
		 * to 1, we did not mark any present ptes as dirty: must
		 * mark page dirty so shrink_list will preserve it.
		 */
		SetPageDirty(page);
		unlock_page(page);
		page_cache_release(page);

		/*
		 * Make sure that we aren't completely killing
		 * interactive performance.
		 */
		cond_resched();
	}

	mmput(start_mm);
	if (reset_overflow) {
		printk(KERN_WARNING "swapoff: cleared swap entry overflow\n");
		swap_overflow = 0;
	}
	return retval;
}

/*
827
828
829
 * After a successful try_to_unuse, if no swap is now in use, we know
 * we can empty the mmlist.  swap_lock must be held on entry and exit.
 * Note that mmlist_lock nests inside swap_lock, and an mm must be
Linus Torvalds's avatar
Linus Torvalds committed
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
 * added to the mmlist just after page_duplicate - before would be racy.
 */
static void drain_mmlist(void)
{
	struct list_head *p, *next;
	unsigned int i;

	for (i = 0; i < nr_swapfiles; i++)
		if (swap_info[i].inuse_pages)
			return;
	spin_lock(&mmlist_lock);
	list_for_each_safe(p, next, &init_mm.mmlist)
		list_del_init(p);
	spin_unlock(&mmlist_lock);
}

/*
 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
 * corresponds to page offset `offset'.
 */
sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset)
{
	struct swap_extent *se = sis->curr_swap_extent;
	struct swap_extent *start_se = se;

	for ( ; ; ) {
		struct list_head *lh;

		if (se->start_page <= offset &&
				offset < (se->start_page + se->nr_pages)) {
			return se->start_block + (offset - se->start_page);
		}
862
		lh = se->list.next;
Linus Torvalds's avatar
Linus Torvalds committed
863
		if (lh == &sis->extent_list)
864
			lh = lh->next;
Linus Torvalds's avatar
Linus Torvalds committed
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
		se = list_entry(lh, struct swap_extent, list);
		sis->curr_swap_extent = se;
		BUG_ON(se == start_se);		/* It *must* be present */
	}
}

/*
 * Free all of a swapdev's extent information
 */
static void destroy_swap_extents(struct swap_info_struct *sis)
{
	while (!list_empty(&sis->extent_list)) {
		struct swap_extent *se;

		se = list_entry(sis->extent_list.next,
				struct swap_extent, list);
		list_del(&se->list);
		kfree(se);
	}
}

/*
 * Add a block range (and the corresponding page range) into this swapdev's
888
 * extent list.  The extent list is kept sorted in page order.
Linus Torvalds's avatar
Linus Torvalds committed
889
 *
890
 * This function rather assumes that it is called in ascending page order.
Linus Torvalds's avatar
Linus Torvalds committed
891
892
893
894
895
896
897
898
899
 */
static int
add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
		unsigned long nr_pages, sector_t start_block)
{
	struct swap_extent *se;
	struct swap_extent *new_se;
	struct list_head *lh;

900
901
	lh = sis->extent_list.prev;	/* The highest page extent */
	if (lh != &sis->extent_list) {
Linus Torvalds's avatar
Linus Torvalds committed
902
		se = list_entry(lh, struct swap_extent, list);
903
904
		BUG_ON(se->start_page + se->nr_pages != start_page);
		if (se->start_block + se->nr_pages == start_block) {
Linus Torvalds's avatar
Linus Torvalds committed
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
			/* Merge it */
			se->nr_pages += nr_pages;
			return 0;
		}
	}

	/*
	 * No merge.  Insert a new extent, preserving ordering.
	 */
	new_se = kmalloc(sizeof(*se), GFP_KERNEL);
	if (new_se == NULL)
		return -ENOMEM;
	new_se->start_page = start_page;
	new_se->nr_pages = nr_pages;
	new_se->start_block = start_block;

921
	list_add_tail(&new_se->list, &sis->extent_list);
922
	return 1;
Linus Torvalds's avatar
Linus Torvalds committed
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
}

/*
 * A `swap extent' is a simple thing which maps a contiguous range of pages
 * onto a contiguous range of disk blocks.  An ordered list of swap extents
 * is built at swapon time and is then used at swap_writepage/swap_readpage
 * time for locating where on disk a page belongs.
 *
 * If the swapfile is an S_ISBLK block device, a single extent is installed.
 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
 * swap files identically.
 *
 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
 * extent list operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
 * swapfiles are handled *identically* after swapon time.
 *
 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
 * and will parse them into an ordered extent list, in PAGE_SIZE chunks.  If
 * some stray blocks are found which do not fall within the PAGE_SIZE alignment
 * requirements, they are simply tossed out - we will never use those blocks
 * for swapping.
 *
945
 * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon.  This
Linus Torvalds's avatar
Linus Torvalds committed
946
947
948
949
950
951
952
953
954
955
 * prevents root from shooting her foot off by ftruncating an in-use swapfile,
 * which will scribble on the fs.
 *
 * The amount of disk space which a single swap extent represents varies.
 * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
 * extents in the list.  To avoid much list walking, we cache the previous
 * search location in `curr_swap_extent', and start new searches from there.
 * This is extremely effective.  The average number of iterations in
 * map_swap_page() has been measured at about 0.3 per page.  - akpm.
 */
956
static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
Linus Torvalds's avatar
Linus Torvalds committed
957
958
959
960
961
962
963
{
	struct inode *inode;
	unsigned blocks_per_page;
	unsigned long page_no;
	unsigned blkbits;
	sector_t probe_block;
	sector_t last_block;
964
965
966
	sector_t lowest_block = -1;
	sector_t highest_block = 0;
	int nr_extents = 0;
Linus Torvalds's avatar
Linus Torvalds committed
967
968
969
970
971
	int ret;

	inode = sis->swap_file->f_mapping->host;
	if (S_ISBLK(inode->i_mode)) {
		ret = add_swap_extent(sis, 0, sis->max, 0);
972
		*span = sis->pages;
Linus Torvalds's avatar
Linus Torvalds committed
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
		goto done;
	}

	blkbits = inode->i_blkbits;
	blocks_per_page = PAGE_SIZE >> blkbits;

	/*
	 * Map all the blocks into the extent list.  This code doesn't try
	 * to be very smart.
	 */
	probe_block = 0;
	page_no = 0;
	last_block = i_size_read(inode) >> blkbits;
	while ((probe_block + blocks_per_page) <= last_block &&
			page_no < sis->max) {
		unsigned block_in_page;
		sector_t first_block;

		first_block = bmap(inode, probe_block);
		if (first_block == 0)
			goto bad_bmap;

		/*
		 * It must be PAGE_SIZE aligned on-disk
		 */
		if (first_block & (blocks_per_page - 1)) {
			probe_block++;
			goto reprobe;
		}

		for (block_in_page = 1; block_in_page < blocks_per_page;
					block_in_page++) {
			sector_t block;

			block = bmap(inode, probe_block + block_in_page);
			if (block == 0)
				goto bad_bmap;
			if (block != first_block + block_in_page) {
				/* Discontiguity */
				probe_block++;
				goto reprobe;
			}
		}

1017
1018
1019
1020
1021
1022
1023
1024
		first_block >>= (PAGE_SHIFT - blkbits);
		if (page_no) {	/* exclude the header page */
			if (first_block < lowest_block)
				lowest_block = first_block;
			if (first_block > highest_block)
				highest_block = first_block;
		}

Linus Torvalds's avatar
Linus Torvalds committed
1025
1026
1027
		/*
		 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
		 */
1028
1029
		ret = add_swap_extent(sis, page_no, 1, first_block);
		if (ret < 0)
Linus Torvalds's avatar
Linus Torvalds committed
1030
			goto out;
1031
		nr_extents += ret;
Linus Torvalds's avatar
Linus Torvalds committed
1032
1033
1034
1035
1036
		page_no++;
		probe_block += blocks_per_page;
reprobe:
		continue;
	}
1037
1038
	ret = nr_extents;
	*span = 1 + highest_block - lowest_block;
Linus Torvalds's avatar
Linus Torvalds committed
1039
	if (page_no == 0)
1040
		page_no = 1;	/* force Empty message */
Linus Torvalds's avatar
Linus Torvalds committed
1041
	sis->max = page_no;
1042
	sis->pages = page_no - 1;
Linus Torvalds's avatar
Linus Torvalds committed
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
	sis->highest_bit = page_no - 1;
done:
	sis->curr_swap_extent = list_entry(sis->extent_list.prev,
					struct swap_extent, list);
	goto out;
bad_bmap:
	printk(KERN_ERR "swapon: swapfile has holes\n");
	ret = -EINVAL;
out:
	return ret;
}

#if 0	/* We don't need this yet */
#include <linux/backing-dev.h>
int page_queue_congested(struct page *page)
{
	struct backing_dev_info *bdi;

	BUG_ON(!PageLocked(page));	/* It pins the swap_info_struct */

	if (PageSwapCache(page)) {
1064
		swp_entry_t entry = { .val = page_private(page) };
Linus Torvalds's avatar
Linus Torvalds committed
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
		struct swap_info_struct *sis;

		sis = get_swap_info_struct(swp_type(entry));
		bdi = sis->bdev->bd_inode->i_mapping->backing_dev_info;
	} else
		bdi = page->mapping->backing_dev_info;
	return bdi_write_congested(bdi);
}
#endif

asmlinkage long sys_swapoff(const char __user * specialfile)
{
	struct swap_info_struct * p = NULL;
	unsigned short *swap_map;
	struct file *swap_file, *victim;
	struct address_space *mapping;
	struct inode *inode;
	char * pathname;
	int i, type, prev;
	int err;
	
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	pathname = getname(specialfile);
	err = PTR_ERR(pathname);
	if (IS_ERR(pathname))
		goto out;

	victim = filp_open(pathname, O_RDWR|O_LARGEFILE, 0);
	putname(pathname);
	err = PTR_ERR(victim);
	if (IS_ERR(victim))
		goto out;

	mapping = victim->f_mapping;
	prev = -1;
1102
	spin_lock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
	for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
		p = swap_info + type;
		if ((p->flags & SWP_ACTIVE) == SWP_ACTIVE) {
			if (p->swap_file->f_mapping == mapping)
				break;
		}
		prev = type;
	}
	if (type < 0) {
		err = -EINVAL;
1113
		spin_unlock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1114
1115
1116
1117
1118
1119
		goto out_dput;
	}
	if (!security_vm_enough_memory(p->pages))
		vm_unacct_memory(p->pages);
	else {
		err = -ENOMEM;
1120
		spin_unlock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
		goto out_dput;
	}
	if (prev < 0) {
		swap_list.head = p->next;
	} else {
		swap_info[prev].next = p->next;
	}
	if (type == swap_list.next) {
		/* just pick something that's safe... */
		swap_list.next = swap_list.head;
	}
	nr_swap_pages -= p->pages;
	total_swap_pages -= p->pages;
	p->flags &= ~SWP_WRITEOK;
1135
	spin_unlock(&swap_lock);
1136

Linus Torvalds's avatar
Linus Torvalds committed
1137
1138
1139
1140
1141
1142
	current->flags |= PF_SWAPOFF;
	err = try_to_unuse(type);
	current->flags &= ~PF_SWAPOFF;

	if (err) {
		/* re-insert swap space back into swap_list */
1143
		spin_lock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
		for (prev = -1, i = swap_list.head; i >= 0; prev = i, i = swap_info[i].next)
			if (p->prio >= swap_info[i].prio)
				break;
		p->next = i;
		if (prev < 0)
			swap_list.head = swap_list.next = p - swap_info;
		else
			swap_info[prev].next = p - swap_info;
		nr_swap_pages += p->pages;
		total_swap_pages += p->pages;
		p->flags |= SWP_WRITEOK;
1155
		spin_unlock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1156
1157
		goto out_dput;
	}
1158
1159
1160
1161
1162

	/* wait for any unplug function to finish */
	down_write(&swap_unplug_sem);
	up_write(&swap_unplug_sem);

1163
1164
1165
1166
1167
	destroy_swap_extents(p);
	down(&swapon_sem);
	spin_lock(&swap_lock);
	drain_mmlist();

1168
1169
1170
	/* wait for anyone still in scan_swap_map */
	p->highest_bit = 0;		/* cuts scans short */
	while (p->flags >= SWP_SCANNING) {
1171
		spin_unlock(&swap_lock);
1172
		schedule_timeout_uninterruptible(1);
1173
		spin_lock(&swap_lock);
1174
1175
	}

Linus Torvalds's avatar
Linus Torvalds committed
1176
1177
1178
1179
1180
1181
	swap_file = p->swap_file;
	p->swap_file = NULL;
	p->max = 0;
	swap_map = p->swap_map;
	p->swap_map = NULL;
	p->flags = 0;
1182
	spin_unlock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1183
1184
1185
1186
1187
1188
1189
1190
	up(&swapon_sem);
	vfree(swap_map);
	inode = mapping->host;
	if (S_ISBLK(inode->i_mode)) {
		struct block_device *bdev = I_BDEV(inode);
		set_blocksize(bdev, p->old_block_size);
		bd_release(bdev);
	} else {
1191
		mutex_lock(&inode->i_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1192
		inode->i_flags &= ~S_SWAPFILE;
1193
		mutex_unlock(&inode->i_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
	}
	filp_close(swap_file, NULL);
	err = 0;

out_dput:
	filp_close(victim, NULL);
out:
	return err;
}

#ifdef CONFIG_PROC_FS
/* iterator */
static void *swap_start(struct seq_file *swap, loff_t *pos)
{
	struct swap_info_struct *ptr = swap_info;
	int i;
	loff_t l = *pos;

	down(&swapon_sem);

	for (i = 0; i < nr_swapfiles; i++, ptr++) {
		if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
			continue;
		if (!l--)
			return ptr;
	}

	return NULL;
}

static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
{
	struct swap_info_struct *ptr = v;
	struct swap_info_struct *endptr = swap_info + nr_swapfiles;

	for (++ptr; ptr < endptr; ptr++) {
		if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
			continue;
		++*pos;
		return ptr;
	}

	return NULL;
}

static void swap_stop(struct seq_file *swap, void *v)
{
	up(&swapon_sem);
}

static int swap_show(struct seq_file *swap, void *v)
{
	struct swap_info_struct *ptr = v;
	struct file *file;
	int len;

	if (v == swap_info)
		seq_puts(swap, "Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");

	file = ptr->swap_file;
	len = seq_path(swap, file->f_vfsmnt, file->f_dentry, " \t\n\\");
1255
	seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
Linus Torvalds's avatar
Linus Torvalds committed
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
		       len < 40 ? 40 - len : 1, " ",
		       S_ISBLK(file->f_dentry->d_inode->i_mode) ?
				"partition" : "file\t",
		       ptr->pages << (PAGE_SHIFT - 10),
		       ptr->inuse_pages << (PAGE_SHIFT - 10),
		       ptr->prio);
	return 0;
}

static struct seq_operations swaps_op = {
	.start =	swap_start,
	.next =		swap_next,
	.stop =		swap_stop,
	.show =		swap_show
};

static int swaps_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &swaps_op);
}

static struct file_operations proc_swaps_operations = {
	.open		= swaps_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release,
};

static int __init procswaps_init(void)
{
	struct proc_dir_entry *entry;

	entry = create_proc_entry("swaps", 0, NULL);
	if (entry)
		entry->proc_fops = &proc_swaps_operations;
	return 0;
}
__initcall(procswaps_init);
#endif /* CONFIG_PROC_FS */

/*
 * Written 01/25/92 by Simmule Turner, heavily changed by Linus.
 *
 * The swapon system call
 */
asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
{
	struct swap_info_struct * p;
	char *name = NULL;
	struct block_device *bdev = NULL;
	struct file *swap_file = NULL;
	struct address_space *mapping;
	unsigned int type;
	int i, prev;
	int error;
	static int least_priority;
	union swap_header *swap_header = NULL;
	int swap_header_version;
1314
1315
	unsigned int nr_good_pages = 0;
	int nr_extents = 0;
1316
	sector_t span;
Linus Torvalds's avatar
Linus Torvalds committed
1317
1318
1319
1320
1321
1322
1323
1324
1325
	unsigned long maxpages = 1;
	int swapfilesize;
	unsigned short *swap_map;
	struct page *page = NULL;
	struct inode *inode = NULL;
	int did_down = 0;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;
1326
	spin_lock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
	p = swap_info;
	for (type = 0 ; type < nr_swapfiles ; type++,p++)
		if (!(p->flags & SWP_USED))
			break;
	error = -EPERM;
	/*
	 * Test if adding another swap device is possible. There are
	 * two limiting factors: 1) the number of bits for the swap
	 * type swp_entry_t definition and 2) the number of bits for
	 * the swap type in the swap ptes as defined by the different
	 * architectures. To honor both limitations a swap entry
	 * with swap offset 0 and swap type ~0UL is created, encoded
	 * to a swap pte, decoded to a swp_entry_t again and finally
	 * the swap type part is extracted. This will mask all bits
	 * from the initial ~0UL that can't be encoded in either the
	 * swp_entry_t or the architecture definition of a swap pte.
	 */
	if (type > swp_type(pte_to_swp_entry(swp_entry_to_pte(swp_entry(~0UL,0))))) {
1345
		spin_unlock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
		goto out;
	}
	if (type >= nr_swapfiles)
		nr_swapfiles = type+1;
	INIT_LIST_HEAD(&p->extent_list);
	p->flags = SWP_USED;
	p->swap_file = NULL;
	p->old_block_size = 0;
	p->swap_map = NULL;
	p->lowest_bit = 0;
	p->highest_bit = 0;
	p->cluster_nr = 0;
	p->inuse_pages = 0;
	p->next = -1;
	if (swap_flags & SWAP_FLAG_PREFER) {
		p->prio =
		  (swap_flags & SWAP_FLAG_PRIO_MASK)>>SWAP_FLAG_PRIO_SHIFT;
	} else {
		p->prio = --least_priority;
	}
1366
	spin_unlock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
	name = getname(specialfile);
	error = PTR_ERR(name);
	if (IS_ERR(name)) {
		name = NULL;
		goto bad_swap_2;
	}
	swap_file = filp_open(name, O_RDWR|O_LARGEFILE, 0);
	error = PTR_ERR(swap_file);
	if (IS_ERR(swap_file)) {
		swap_file = NULL;
		goto bad_swap_2;
	}

	p->swap_file = swap_file;
	mapping = swap_file->f_mapping;
	inode = mapping->host;

	error = -EBUSY;
	for (i = 0; i < nr_swapfiles; i++) {
		struct swap_info_struct *q = &swap_info[i];

		if (i == type || !q->swap_file)
			continue;
		if (mapping == q->swap_file->f_mapping)
			goto bad_swap;
	}

	error = -EINVAL;
	if (S_ISBLK(inode->i_mode)) {
		bdev = I_BDEV(inode);
		error = bd_claim(bdev, sys_swapon);
		if (error < 0) {
			bdev = NULL;
1400
			error = -EINVAL;
Linus Torvalds's avatar
Linus Torvalds committed
1401
1402
1403
1404
1405
1406
1407
1408
1409
			goto bad_swap;
		}
		p->old_block_size = block_size(bdev);
		error = set_blocksize(bdev, PAGE_SIZE);
		if (error < 0)
			goto bad_swap;
		p->bdev = bdev;
	} else if (S_ISREG(inode->i_mode)) {
		p->bdev = inode->i_sb->s_bdev;
1410
		mutex_lock(&inode->i_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
		did_down = 1;
		if (IS_SWAPFILE(inode)) {
			error = -EBUSY;
			goto bad_swap;
		}
	} else {
		goto bad_swap;
	}

	swapfilesize = i_size_read(inode) >> PAGE_SHIFT;

	/*
	 * Read the swap header.
	 */
	if (!mapping->a_ops->readpage) {
		error = -EINVAL;
		goto bad_swap;
	}
	page = read_cache_page(mapping, 0,
			(filler_t *)mapping->a_ops->readpage, swap_file);
	if (IS_ERR(page)) {
		error = PTR_ERR(page);
		goto bad_swap;
	}
	wait_on_page_locked(page);
	if (!PageUptodate(page))
		goto bad_swap;
	kmap(page);
	swap_header = page_address(page);

	if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10))
		swap_header_version = 1;
	else if (!memcmp("SWAPSPACE2",swap_header->magic.magic,10))
		swap_header_version = 2;
	else {
1446
		printk(KERN_ERR "Unable to find swap-space signature\n");
Linus Torvalds's avatar
Linus Torvalds committed
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
		error = -EINVAL;
		goto bad_swap;
	}
	
	switch (swap_header_version) {
	case 1:
		printk(KERN_ERR "version 0 swap is no longer supported. "
			"Use mkswap -v1 %s\n", name);
		error = -EINVAL;
		goto bad_swap;
	case 2:
		/* Check the swap header's sub-version and the size of
                   the swap file and bad block lists */
		if (swap_header->info.version != 1) {
			printk(KERN_WARNING
			       "Unable to handle swap header version %d\n",
			       swap_header->info.version);
			error = -EINVAL;
			goto bad_swap;
		}

		p->lowest_bit  = 1;
1469
1470
		p->cluster_next = 1;

Linus Torvalds's avatar
Linus Torvalds committed
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
		/*
		 * Find out how many pages are allowed for a single swap
		 * device. There are two limiting factors: 1) the number of
		 * bits for the swap offset in the swp_entry_t type and
		 * 2) the number of bits in the a swap pte as defined by
		 * the different architectures. In order to find the
		 * largest possible bit mask a swap entry with swap type 0
		 * and swap offset ~0UL is created, encoded to a swap pte,
		 * decoded to a swp_entry_t again and finally the swap
		 * offset is extracted. This will mask all the bits from
		 * the initial ~0UL mask that can't be encoded in either
		 * the swp_entry_t or the architecture definition of a
		 * swap pte.
		 */
		maxpages = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0,~0UL)))) - 1;
		if (maxpages > swap_header->info.last_page)
			maxpages = swap_header->info.last_page;
		p->highest_bit = maxpages - 1;

		error = -EINVAL;
1491
1492
1493
1494
		if (!maxpages)
			goto bad_swap;
		if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
			goto bad_swap;
Linus Torvalds's avatar
Linus Torvalds committed
1495
1496
		if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
			goto bad_swap;
1497

Linus Torvalds's avatar
Linus Torvalds committed
1498
1499
1500
1501
1502
1503
1504
1505
		/* OK, set up the swap map and apply the bad block list */
		if (!(p->swap_map = vmalloc(maxpages * sizeof(short)))) {
			error = -ENOMEM;
			goto bad_swap;
		}

		error = 0;
		memset(p->swap_map, 0, maxpages * sizeof(short));
1506
1507
1508
		for (i = 0; i < swap_header->info.nr_badpages; i++) {
			int page_nr = swap_header->info.badpages[i];
			if (page_nr <= 0 || page_nr >= swap_header->info.last_page)
Linus Torvalds's avatar
Linus Torvalds committed
1509
1510
				error = -EINVAL;
			else
1511
				p->swap_map[page_nr] = SWAP_MAP_BAD;
Linus Torvalds's avatar
Linus Torvalds committed
1512
1513
1514
1515
		}
		nr_good_pages = swap_header->info.last_page -
				swap_header->info.nr_badpages -
				1 /* header page */;
1516
		if (error)
Linus Torvalds's avatar
Linus Torvalds committed
1517
1518
			goto bad_swap;
	}
1519

Linus Torvalds's avatar
Linus Torvalds committed
1520
1521
1522
1523
1524
1525
	if (swapfilesize && maxpages > swapfilesize) {
		printk(KERN_WARNING
		       "Swap area shorter than signature indicates\n");
		error = -EINVAL;
		goto bad_swap;
	}
1526
1527
1528
1529
	if (nr_good_pages) {
		p->swap_map[0] = SWAP_MAP_BAD;
		p->max = maxpages;
		p->pages = nr_good_pages;
1530
1531
1532
		nr_extents = setup_swap_extents(p, &span);
		if (nr_extents < 0) {
			error = nr_extents;
1533
			goto bad_swap;
1534
		}
1535
1536
		nr_good_pages = p->pages;
	}
Linus Torvalds's avatar
Linus Torvalds committed
1537
1538
1539
1540
1541
1542
1543
	if (!nr_good_pages) {
		printk(KERN_WARNING "Empty swap-file\n");
		error = -EINVAL;
		goto bad_swap;
	}

	down(&swapon_sem);
1544
	spin_lock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1545
1546
1547
	p->flags = SWP_ACTIVE;
	nr_swap_pages += nr_good_pages;
	total_swap_pages += nr_good_pages;
1548

1549
	printk(KERN_INFO "Adding %uk swap on %s.  "
1550
1551
1552
			"Priority:%d extents:%d across:%lluk\n",
		nr_good_pages<<(PAGE_SHIFT-10), name, p->prio,
		nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10));
Linus Torvalds's avatar
Linus Torvalds committed
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567

	/* insert swap space into swap_list: */
	prev = -1;
	for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
		if (p->prio >= swap_info[i].prio) {
			break;
		}
		prev = i;
	}
	p->next = i;
	if (prev < 0) {
		swap_list.head = swap_list.next = p - swap_info;
	} else {
		swap_info[prev].next = p - swap_info;
	}
1568
	spin_unlock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1569
1570
1571
1572
1573
1574
1575
1576
	up(&swapon_sem);
	error = 0;
	goto out;
bad_swap:
	if (bdev) {
		set_blocksize(bdev, p->old_block_size);
		bd_release(bdev);
	}
1577
	destroy_swap_extents(p);
Linus Torvalds's avatar
Linus Torvalds committed
1578
bad_swap_2:
1579
	spin_lock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1580
1581
1582
1583
1584
1585
	swap_map = p->swap_map;
	p->swap_file = NULL;
	p->swap_map = NULL;
	p->flags = 0;
	if (!(swap_flags & SWAP_FLAG_PREFER))
		++least_priority;
1586
	spin_unlock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
	vfree(swap_map);
	if (swap_file)
		filp_close(swap_file, NULL);
out:
	if (page && !IS_ERR(page)) {
		kunmap(page);
		page_cache_release(page);
	}
	if (name)
		putname(name);
	if (did_down) {
		if (!error)
			inode->i_flags |= S_SWAPFILE;
1600
		mutex_unlock(&inode->i_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1601
1602
1603
1604
1605
1606
1607
1608
1609
	}
	return error;
}

void si_swapinfo(struct sysinfo *val)
{
	unsigned int i;
	unsigned long nr_to_be_unused = 0;

1610
	spin_lock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1611
1612
1613
1614
1615
1616
1617
1618
	for (i = 0; i < nr_swapfiles; i++) {
		if (!(swap_info[i].flags & SWP_USED) ||
		     (swap_info[i].flags & SWP_WRITEOK))
			continue;
		nr_to_be_unused += swap_info[i].inuse_pages;
	}
	val->freeswap = nr_swap_pages + nr_to_be_unused;
	val->totalswap = total_swap_pages + nr_to_be_unused;
1619
	spin_unlock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
}

/*
 * Verify that a swap entry is valid and increment its swap map count.
 *
 * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as
 * "permanent", but will be reclaimed by the next swapoff.
 */
int swap_duplicate(swp_entry_t entry)
{
	struct swap_info_struct * p;
	unsigned long offset, type;
	int result = 0;

	type = swp_type(entry);
	if (type >= nr_swapfiles)
		goto bad_file;
	p = type + swap_info;
	offset = swp_offset(entry);

1640
	spin_lock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
	if (offset < p->max && p->swap_map[offset]) {
		if (p->swap_map[offset] < SWAP_MAP_MAX - 1) {
			p->swap_map[offset]++;
			result = 1;
		} else if (p->swap_map[offset] <= SWAP_MAP_MAX) {
			if (swap_overflow++ < 5)
				printk(KERN_WARNING "swap_dup: swap entry overflow\n");
			p->swap_map[offset] = SWAP_MAP_MAX;
			result = 1;
		}
	}
1652
	spin_unlock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
out:
	return result;

bad_file:
	printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
	goto out;
}

struct swap_info_struct *
get_swap_info_struct(unsigned type)
{
	return &swap_info[type];
}

/*
1668
 * swap_lock prevents swap_map being freed. Don't grab an extra
Linus Torvalds's avatar
Linus Torvalds committed
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
 * reference on the swaphandle, it doesn't matter if it becomes unused.
 */
int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
{
	int ret = 0, i = 1 << page_cluster;
	unsigned long toff;
	struct swap_info_struct *swapdev = swp_type(entry) + swap_info;

	if (!page_cluster)	/* no readahead */
		return 0;
	toff = (swp_offset(entry) >> page_cluster) << page_cluster;
	if (!toff)		/* first page is swap header */
		toff++, i--;
	*offset = toff;

1684
	spin_lock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
	do {
		/* Don't read-ahead past the end of the swap area */
		if (toff >= swapdev->max)
			break;
		/* Don't read in free or bad pages */
		if (!swapdev->swap_map[toff])
			break;
		if (swapdev->swap_map[toff] == SWAP_MAP_BAD)
			break;
		toff++;
		ret++;
	} while (--i);
1697
	spin_unlock(&swap_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1698
1699
	return ret;
}