ReactOS 0.4.15-dev-6049-ge54b32b
replay.c
Go to the documentation of this file.
1
2#include <linux/module.h>
3#include <linux/time.h>
4#include <linux/fs.h>
5#include <linux/jbd.h>
6#include <linux/errno.h>
7#include <linux/slab.h>
8#include <linux/init.h>
9#include <linux/mm.h>
10#include <linux/freezer.h>
11#include <linux/pagemap.h>
12#include <linux/kthread.h>
13#include <linux/poison.h>
14#include <linux/proc_fs.h>
15#include <linux/debugfs.h>
16
17
18/*
19 * Called under j_state_lock. Returns true if a transaction was started.
20 */
21int __log_start_commit(journal_t *journal, tid_t target)
22{
23 /*
24 * Are we already doing a recent enough commit?
25 */
26 if (!tid_geq(journal->j_commit_request, target)) {
27 /*
28 * We want a new commit: OK, mark the request and wakup the
29 * commit thread. We do _not_ do the commit ourselves.
30 */
31
32 journal->j_commit_request = target;
33 jbd_debug(1, "JBD: requesting commit %d/%d\n",
34 journal->j_commit_request,
35 journal->j_commit_sequence);
36 wake_up(&journal->j_wait_commit);
37 return 1;
38 }
39 return 0;
40}
41
42int log_start_commit(journal_t *journal, tid_t tid)
43{
44 int ret;
45
46 jbd_lock(&journal->j_state_lock);
47 ret = __log_start_commit(journal, tid);
48 jbd_unlock(&journal->j_state_lock);
49 return ret;
50}
51
52/*
53 * Journal abort has very specific semantics, which we describe
54 * for journal abort.
55 *
56 * Two internal function, which provide abort to te jbd layer
57 * itself are here.
58 */
59
60/*
61 * Quick version for internal journal use (doesn't lock the journal).
62 * Aborts hard --- we mark the abort as occurred, but do _nothing_ else,
63 * and don't attempt to make any other journal updates.
64 */
65static void __journal_abort_hard(journal_t *journal)
66{
67 transaction_t *transaction;
68
69 if (journal->j_flags & JFS_ABORT)
70 return;
71
72 jbd_lock(&journal->j_state_lock);
73 journal->j_flags |= JFS_ABORT;
74 transaction = journal->j_running_transaction;
75 if (transaction)
76 __log_start_commit(journal, transaction->t_tid);
77 jbd_unlock(&journal->j_state_lock);
78}
79
80/* Soft abort: record the abort error status in the journal superblock,
81 * but don't do any other IO. */
82static void __journal_abort_soft (journal_t *journal, int err)
83{
84 if (journal->j_flags & JFS_ABORT)
85 return;
86
87 if (!journal->j_errno)
88 journal->j_errno = err;
89
90 __journal_abort_hard(journal);
91
92 if (err)
93 journal_update_superblock(journal, 1);
94}
95
96
143void journal_abort(journal_t *journal, int err)
144{
145 __journal_abort_soft(journal, err);
146}
147
159int journal_errno(journal_t *journal)
160{
161 int err;
162
163 jbd_lock(&journal->j_state_lock);
164 if (journal->j_flags & JFS_ABORT)
165 err = -EROFS;
166 else
167 err = journal->j_errno;
168 jbd_unlock(&journal->j_state_lock);
169 return err;
170}
171
179int journal_clear_err(journal_t *journal)
180{
181 int err = 0;
182
183 jbd_lock(&journal->j_state_lock);
184 if (journal->j_flags & JFS_ABORT)
185 err = -EROFS;
186 else
187 journal->j_errno = 0;
188 jbd_unlock(&journal->j_state_lock);
189 return err;
190}
191
199void journal_ack_err(journal_t *journal)
200{
201 jbd_lock(&journal->j_state_lock);
202 if (journal->j_errno)
203 journal->j_flags |= JFS_ACK_ERR;
204 jbd_unlock(&journal->j_state_lock);
205}
206
208{
209 return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
210}
211
212
213/*
214 * Journal_head storage management
215 */
217#ifdef CONFIG_JBD_DEBUG
218static atomic_t nr_journal_heads = ATOMIC_INIT(0);
219#endif
220
222{
223 int retval;
224
225 J_ASSERT(journal_head_cache == 0);
226 journal_head_cache = kmem_cache_create("journal_head",
227 sizeof(struct journal_head),
228 0, /* offset */
229 SLAB_TEMPORARY, /* flags */
230 NULL); /* ctor */
231 retval = 0;
232 if (journal_head_cache == 0) {
233 retval = -ENOMEM;
234 printk(KERN_EMERG "JBD: no memory for journal_head cache\n");
235 }
236 return retval;
237}
238
240{
241 J_ASSERT(journal_head_cache != NULL);
244}
245
246/*
247 * journal_head splicing and dicing
248 */
250{
251 struct journal_head *ret;
252 static unsigned long last_warning;
253
254#ifdef CONFIG_JBD_DEBUG
255 atomic_inc(&nr_journal_heads);
256#endif
258 if (ret == NULL) {
259 jbd_debug(1, "out of memory for journal_head\n");
260 if (time_after(jiffies, last_warning + 5*HZ)) {
261 printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
263 last_warning = jiffies;
264 }
265 while (ret == NULL) {
266 yield();
268 }
269 }
270 return ret;
271}
272
274{
275#ifdef CONFIG_JBD_DEBUG
276 atomic_dec(&nr_journal_heads);
277 memset(jh, JBD_POISON_FREE, sizeof(*jh));
278#endif
280}
281
282/*
283 * A journal_head is attached to a buffer_head whenever JBD has an
284 * interest in the buffer.
285 *
286 * Whenever a buffer has an attached journal_head, its ->b_state:BH_JBD bit
287 * is set. This bit is tested in core kernel code where we need to take
288 * JBD-specific actions. Testing the zeroness of ->b_private is not reliable
289 * there.
290 *
291 * When a buffer has its BH_JBD bit set, its ->b_count is elevated by one.
292 *
293 * When a buffer has its BH_JBD bit set it is immune from being released by
294 * core kernel code, mainly via ->b_count.
295 *
296 * A journal_head may be detached from its buffer_head when the journal_head's
297 * b_transaction, b_cp_transaction and b_next_transaction pointers are NULL.
298 * Various places in JBD call journal_remove_journal_head() to indicate that the
299 * journal_head can be dropped if needed.
300 *
301 * Various places in the kernel want to attach a journal_head to a buffer_head
302 * _before_ attaching the journal_head to a transaction. To protect the
303 * journal_head in this situation, journal_add_journal_head elevates the
304 * journal_head's b_jcount refcount by one. The caller must call
305 * journal_put_journal_head() to undo this.
306 *
307 * So the typical usage would be:
308 *
309 * (Attach a journal_head if needed. Increments b_jcount)
310 * struct journal_head *jh = journal_add_journal_head(bh);
311 * ...
312 * jh->b_transaction = xxx;
313 * journal_put_journal_head(jh);
314 *
315 * Now, the journal_head's b_jcount is zero, but it is safe from being released
316 * because it has a non-zero b_transaction.
317 */
318
319/*
320 * Give a buffer_head a journal_head.
321 *
322 * Doesn't need the journal lock.
323 * May sleep.
324 */
326{
327 struct journal_head *jh;
328 struct journal_head *new_jh = NULL;
329
330repeat:
331 if (!buffer_jbd(bh)) {
333 memset(new_jh, 0, sizeof(*new_jh));
334 }
335
336 jbd_lock_bh_journal_head(bh);
337 if (buffer_jbd(bh)) {
338 jh = bh2jh(bh);
339 } else {
340 J_ASSERT_BH(bh,
341 (atomic_read(&bh->b_count) > 0) ||
342 (bh->b_page && bh->b_page->mapping));
343
344 if (!new_jh) {
345 jbd_unlock_bh_journal_head(bh);
346 goto repeat;
347 }
348
349 jh = new_jh;
350 new_jh = NULL; /* We consumed it */
351 set_buffer_jbd(bh);
352 bh->b_private = jh;
353 jh->b_bh = bh;
354 get_bh(bh);
355 BUFFER_TRACE(bh, "added journal_head");
356 }
357 jh->b_jcount++;
358 jbd_unlock_bh_journal_head(bh);
359 if (new_jh)
361 return bh->b_private;
362}
363
364/*
365 * Grab a ref against this buffer_head's journal_head. If it ended up not
366 * having a journal_head, return NULL
367 */
369{
370 struct journal_head *jh = NULL;
371
372 jbd_lock_bh_journal_head(bh);
373 if (buffer_jbd(bh)) {
374 jh = bh2jh(bh);
375 jh->b_jcount++;
376 }
377 jbd_unlock_bh_journal_head(bh);
378 return jh;
379}
380
382{
383 struct journal_head *jh = bh2jh(bh);
384
385 J_ASSERT_JH(jh, jh->b_jcount >= 0);
386
387 get_bh(bh);
388 if (jh->b_jcount == 0) {
389 if (jh->b_transaction == NULL &&
390 jh->b_next_transaction == NULL &&
391 jh->b_cp_transaction == NULL) {
392 J_ASSERT_JH(jh, jh->b_jlist == BJ_None);
393 J_ASSERT_BH(bh, buffer_jbd(bh));
394 J_ASSERT_BH(bh, jh2bh(jh) == bh);
395 BUFFER_TRACE(bh, "remove journal_head");
396 if (jh->b_frozen_data) {
397 printk(KERN_WARNING "%s: freeing "
398 "b_frozen_data\n",
400 jbd_free(jh->b_frozen_data, bh->b_size);
401 }
402 if (jh->b_committed_data) {
403 printk(KERN_WARNING "%s: freeing "
404 "b_committed_data\n",
406 jbd_free(jh->b_committed_data, bh->b_size);
407 }
408 bh->b_private = NULL;
409 jh->b_bh = NULL; /* debug, really */
410 clear_buffer_jbd(bh);
411 __brelse(bh);
413 } else {
414 BUFFER_TRACE(bh, "journal_head was locked");
415 }
416 }
417}
418
419/*
420 * journal_remove_journal_head(): if the buffer isn't attached to a transaction
421 * and has a zero b_jcount then remove and release its journal_head. If we did
422 * see that the buffer is not used by any transaction we also "logically"
423 * decrement ->b_count.
424 *
425 * We in fact take an additional increment on ->b_count as a convenience,
426 * because the caller usually wants to do additional things with the bh
427 * after calling here.
428 * The caller of journal_remove_journal_head() *must* run __brelse(bh) at some
429 * time. Once the caller has run __brelse(), the buffer is eligible for
430 * reaping by try_to_free_buffers().
431 */
433{
434 jbd_lock_bh_journal_head(bh);
436 jbd_unlock_bh_journal_head(bh);
437}
438
439/*
440 * Drop a reference on the passed journal_head. If it fell to zero then try to
441 * release the journal_head from the buffer_head.
442 */
444{
445 struct buffer_head *bh = jh2bh(jh);
446
447 jbd_lock_bh_journal_head(bh);
448 J_ASSERT_JH(jh, jh->b_jcount > 0);
449 --jh->b_jcount;
450 if (!jh->b_jcount && !jh->b_transaction) {
452 __brelse(bh);
453 }
454 jbd_unlock_bh_journal_head(bh);
455}
456
457/*
458 * Log buffer allocation routines:
459 */
460
461int journal_next_log_block(journal_t *journal, unsigned long *retp)
462{
463 unsigned long blocknr;
464
465 jbd_lock(&journal->j_state_lock);
466 J_ASSERT(journal->j_free > 1);
467
468 blocknr = journal->j_head;
469 journal->j_head++;
470 journal->j_free--;
471 if (journal->j_head == journal->j_last)
472 journal->j_head = journal->j_first;
473 jbd_unlock(&journal->j_state_lock);
474 return journal_bmap(journal, blocknr, retp);
475}
476
477/*
478 * Conversion of logical to physical block numbers for the journal
479 *
480 * On external journals the journal blocks are identity-mapped, so
481 * this is a no-op. If needed, we can use j_blk_offset - everything is
482 * ready.
483 */
484int journal_bmap(journal_t *journal, unsigned long blocknr,
485 unsigned long *retp)
486{
487 int err = 0;
488 unsigned long ret;
489
490 if (journal->j_inode) {
491 ret = (unsigned long)bmap(journal->j_inode, (sector_t)blocknr);
492 if (ret)
493 *retp = ret;
494 else {
495 printk(KERN_ALERT "%s: journal block not found "
496 "at offset %lu ...\n",
498 blocknr);
499 err = -EIO;
500 __journal_abort_soft(journal, err);
501 }
502 } else {
503 *retp = blocknr; /* +journal->j_blk_offset */
504 }
505 return err;
506}
507
508/*
509 * We play buffer_head aliasing tricks to write data/metadata blocks to
510 * the journal without copying their contents, but for journal
511 * descriptor blocks we do need to generate bona fide buffers.
512 *
513 * After the caller of journal_get_descriptor_buffer() has finished modifying
514 * the buffer's contents they really should run flush_dcache_page(bh->b_page).
515 * But we don't bother doing that, so there will be coherency problems with
516 * mmaps of blockdevs which hold live JBD-controlled filesystems.
517 */
519{
520 struct buffer_head *bh;
521 unsigned long blocknr;
522 int err;
523
524 err = journal_next_log_block(journal, &blocknr);
525
526 if (err)
527 return NULL;
528
529 bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
530 lock_buffer(bh);
531 memset(bh->b_data, 0, journal->j_blocksize);
532 set_buffer_uptodate(bh);
533 unlock_buffer(bh);
534 BUFFER_TRACE(bh, "return this buffer");
535 return journal_add_journal_head(bh);
536}
537
538/*
539 * Management for journal control blocks: functions to create and
540 * destroy journal_t structures, and to initialise and read existing
541 * journal blocks from disk. */
542
543/* First: create and setup a journal_t object in memory. We initialise
544 * very few fields yet: that has to wait until we have created the
545 * journal structures from from scratch, or loaded them from disk. */
546
547static journal_t * journal_init_common (void)
548{
549 journal_t *journal;
550 int err;
551
552 journal = kzalloc(sizeof(*journal), GFP_KERNEL);
553 if (!journal)
554 goto fail;
555
556 init_waitqueue_head(&journal->j_wait_transaction_locked);
557 init_waitqueue_head(&journal->j_wait_logspace);
558 init_waitqueue_head(&journal->j_wait_done_commit);
559 init_waitqueue_head(&journal->j_wait_checkpoint);
560 init_waitqueue_head(&journal->j_wait_commit);
561 init_waitqueue_head(&journal->j_wait_updates);
562 mutex_init(&journal->j_barrier);
563 mutex_init(&journal->j_checkpoint_mutex);
564 jbd_lock_init(&journal->j_revoke_lock);
565 jbd_lock_init(&journal->j_list_lock);
566 jbd_lock_init(&journal->j_state_lock);
567
568 journal->j_commit_interval = (HZ * JBD_DEFAULT_MAX_COMMIT_AGE);
569
570 /* The journal is marked for error until we succeed with recovery! */
571 journal->j_flags = JFS_ABORT;
572
573 /* Set up a default-sized revoke table for the new mount. */
574 err = journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
575 if (err) {
576 kfree(journal);
577 goto fail;
578 }
579 return journal;
580fail:
581 return NULL;
582}
583
592journal_t * journal_init_inode (struct inode *inode)
593{
594 struct buffer_head *bh;
595 journal_t *journal = journal_init_common();
596 int err;
597 int n;
598 unsigned long blocknr;
599
600 if (!journal)
601 return NULL;
602
603 journal->j_dev = journal->j_fs_dev = inode->i_sb->s_bdev;
604 journal->j_inode = inode;
605 jbd_debug(1,
606 "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
607 journal, inode->i_sb->s_id, inode->i_ino,
608 (s64) inode->i_size,
609 inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
610
611 journal->j_maxlen = (unsigned int)(inode->i_size >> inode->i_sb->s_blocksize_bits);
612 journal->j_blocksize = inode->i_sb->s_blocksize;
613
614 /* journal descriptor can store up to n blocks -bzzz */
615 n = journal->j_blocksize / sizeof(journal_block_tag_t);
616 journal->j_wbufsize = n;
617 journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
618 if (!journal->j_wbuf) {
619 printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
621
622 J_ASSERT(journal->j_revoke != NULL);
623 if (journal->j_revoke)
624 journal_destroy_revoke(journal);
625
626 kfree(journal);
627 return NULL;
628 }
629
630 err = journal_bmap(journal, 0, &blocknr);
631 /* If that failed, give up */
632 if (err) {
633 printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
635
636 J_ASSERT(journal->j_revoke != NULL);
637 if (journal->j_revoke)
638 journal_destroy_revoke(journal);
639 J_ASSERT(journal->j_wbuf != NULL);
640 kfree(journal->j_wbuf);
641 kfree(journal);
642 return NULL;
643 }
644
645 bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
646 J_ASSERT(bh != NULL);
647 journal->j_sb_buffer = bh;
648 journal->j_superblock = (journal_superblock_t *)bh->b_data;
649
650 return journal;
651}
652
659void journal_wipe_recovery(journal_t *journal)
660{
661 /* We can now mark the journal as empty. */
662
663 journal->j_tail = 0;
664 if (journal->j_sb_buffer) {
665 journal_update_superblock(journal, 0);
666 brelse(journal->j_sb_buffer);
667 journal->j_sb_buffer = NULL;
668 }
669}
670
678void journal_destroy(journal_t *journal)
679{
680#if 0
681 /* Wait for the commit thread to wake up and die. */
682 journal_kill_thread(journal);
683
684 /* Force a final log commit */
685 if (journal->j_running_transaction)
686 journal_commit_transaction(journal);
687
688 /* Force any old transactions to disk */
689
690 /* Totally anal locking here... */
691 jbd_lock(&journal->j_list_lock);
692 while (journal->j_checkpoint_transactions != NULL) {
693 jbd_unlock(&journal->j_list_lock);
694 log_do_checkpoint(journal);
695 jbd_lock(&journal->j_list_lock);
696 }
697
698 J_ASSERT(journal->j_running_transaction == NULL);
699 J_ASSERT(journal->j_committing_transaction == NULL);
700 J_ASSERT(journal->j_checkpoint_transactions == NULL);
701 jbd_unlock(&journal->j_list_lock);
702
703 /* We can now mark the journal as empty. */
704 journal->j_tail = 0;
705 journal->j_tail_sequence = ++journal->j_transaction_sequence;
706 if (journal->j_sb_buffer) {
707 journal_update_superblock(journal, 1);
708 brelse(journal->j_sb_buffer);
709 }
710#endif
711
712 if (journal->j_sb_buffer) {
713 brelse(journal->j_sb_buffer);
714 }
715 if (journal->j_inode)
716 iput(journal->j_inode);
717 if (journal->j_revoke)
718 journal_destroy_revoke(journal);
719 kfree(journal->j_wbuf);
720 kfree(journal);
721}
722
723
724
736int journal_check_used_features (journal_t *journal, unsigned long compat,
737 unsigned long ro, unsigned long incompat)
738{
740
741 if (!compat && !ro && !incompat)
742 return 1;
743 if (journal->j_format_version == 1)
744 return 0;
745
746 sb = journal->j_superblock;
747
748 if (((be32_to_cpu(sb->s_feature_compat) & compat) == compat) &&
749 ((be32_to_cpu(sb->s_feature_ro_compat) & ro) == ro) &&
750 ((be32_to_cpu(sb->s_feature_incompat) & incompat) == incompat))
751 return 1;
752
753 return 0;
754}
755
767int journal_check_available_features (journal_t *journal, unsigned long compat,
768 unsigned long ro, unsigned long incompat)
769{
771
772 if (!compat && !ro && !incompat)
773 return 1;
774
775 sb = journal->j_superblock;
776
777 /* We can support any known requested features iff the
778 * superblock is in version 2. Otherwise we fail to support any
779 * extended sb features. */
780
781 if (journal->j_format_version != 2)
782 return 0;
783
784 if ((compat & JFS_KNOWN_COMPAT_FEATURES) == compat &&
785 (ro & JFS_KNOWN_ROCOMPAT_FEATURES) == ro &&
786 (incompat & JFS_KNOWN_INCOMPAT_FEATURES) == incompat)
787 return 1;
788
789 return 0;
790}
791
804int journal_set_features (journal_t *journal, unsigned long compat,
805 unsigned long ro, unsigned long incompat)
806{
808
809 if (journal_check_used_features(journal, compat, ro, incompat))
810 return 1;
811
812 if (!journal_check_available_features(journal, compat, ro, incompat))
813 return 0;
814
815 jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
816 compat, ro, incompat);
817
818 sb = journal->j_superblock;
819
820 sb->s_feature_compat |= cpu_to_be32(compat);
821 sb->s_feature_ro_compat |= cpu_to_be32(ro);
822 sb->s_feature_incompat |= cpu_to_be32(incompat);
823
824 return 1;
825}
826
827static int journal_convert_superblock_v1(journal_t *journal,
829{
830 int offset, blocksize;
831 struct buffer_head *bh;
832
834 "JBD: Converting superblock from version 1 to 2.\n");
835
836 /* Pre-initialise new fields to zero */
837 offset = (INT)(((INT_PTR) &(sb->s_feature_compat)) - ((INT_PTR) sb));
838 blocksize = be32_to_cpu(sb->s_blocksize);
839 memset(&sb->s_feature_compat, 0, blocksize-offset);
840
841 sb->s_nr_users = cpu_to_be32(1);
842 sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2);
843 journal->j_format_version = 2;
844
845 bh = journal->j_sb_buffer;
846 BUFFER_TRACE(bh, "marking dirty");
849 return 0;
850}
851
852
853/*
854 * If the journal init or create aborts, we need to mark the journal
855 * superblock as being NULL to prevent the journal destroy from writing
856 * back a bogus superblock.
857 */
858static void journal_fail_superblock (journal_t *journal)
859{
860 struct buffer_head *bh = journal->j_sb_buffer;
861 brelse(bh);
862 journal->j_sb_buffer = NULL;
863}
864
865
866/*
867 * Read the superblock for a given journal, performing initial
868 * validation of the format.
869 */
870
871static int journal_get_superblock(journal_t *journal)
872{
873 struct buffer_head *bh;
875 int err = -EIO;
876
877 bh = journal->j_sb_buffer;
878
879 J_ASSERT(bh != NULL);
880 if (!buffer_uptodate(bh)) {
881 ll_rw_block(READ, 1, &bh);
882 wait_on_buffer(bh);
883 if (!buffer_uptodate(bh)) {
885 "JBD: IO error reading journal superblock\n");
886 goto out;
887 }
888 }
889
890 sb = journal->j_superblock;
891
892 err = -EINVAL;
893
894 if (sb->s_header.h_magic != cpu_to_be32(JFS_MAGIC_NUMBER) ||
895 sb->s_blocksize != cpu_to_be32(journal->j_blocksize)) {
896 printk(KERN_WARNING "JBD: no valid journal superblock found\n");
897 goto out;
898 }
899
900 switch (be32_to_cpu(sb->s_header.h_blocktype)) {
902 journal->j_format_version = 1;
903 break;
905 journal->j_format_version = 2;
906 break;
907 default:
908 printk(KERN_WARNING "JBD: unrecognised superblock format ID\n");
909 goto out;
910 }
911
912 if (be32_to_cpu(sb->s_maxlen) < journal->j_maxlen)
913 journal->j_maxlen = be32_to_cpu(sb->s_maxlen);
914 else if (be32_to_cpu(sb->s_maxlen) > journal->j_maxlen) {
915 printk (KERN_WARNING "JBD: journal file too short\n");
916 goto out;
917 }
918
919 return 0;
920
921out:
923 return err;
924}
925
926/*
927 * Load the on-disk journal superblock and read the key fields into the
928 * journal_t.
929 */
930
931static int load_superblock(journal_t *journal)
932{
933 int err;
935
936 err = journal_get_superblock(journal);
937 if (err)
938 return err;
939
940 sb = journal->j_superblock;
941
942 journal->j_tail_sequence = be32_to_cpu(sb->s_sequence);
943 journal->j_tail = be32_to_cpu(sb->s_start);
944 journal->j_first = be32_to_cpu(sb->s_first);
945 journal->j_last = be32_to_cpu(sb->s_maxlen);
946 journal->j_errno = be32_to_cpu(sb->s_errno);
947
948 return 0;
949}
950
964int journal_wipe(journal_t *journal, int write)
965{
967 int err = 0;
968
969 J_ASSERT (!(journal->j_flags & JFS_LOADED));
970
971 err = load_superblock(journal);
972 if (err)
973 return err;
974
975 sb = journal->j_superblock;
976
977 if (!journal->j_tail)
978 goto no_recovery;
979
980 printk (KERN_WARNING "JBD: %s recovery information on journal\n",
981 write ? "Clearing" : "Ignoring");
982
983 err = journal_skip_recovery(journal);
984 if (write)
985 journal_update_superblock(journal, 1);
986
987no_recovery:
988 return err;
989}
990
991
999int journal_update_format (journal_t *journal)
1000{
1002 int err;
1003
1004 err = journal_get_superblock(journal);
1005 if (err)
1006 return err;
1007
1008 sb = journal->j_superblock;
1009
1010 switch (be32_to_cpu(sb->s_header.h_blocktype)) {
1011 case JFS_SUPERBLOCK_V2:
1012 return 0;
1013 case JFS_SUPERBLOCK_V1:
1014 return journal_convert_superblock_v1(journal, sb);
1015 default:
1016 break;
1017 }
1018 return -EINVAL;
1019}
1020
1021
1030void journal_update_superblock(journal_t *journal, int wait)
1031{
1032 journal_superblock_t *sb = journal->j_superblock;
1033 struct buffer_head *bh = journal->j_sb_buffer;
1034
1035 /*
1036 * As a special case, if the on-disk copy is already marked as needing
1037 * no recovery (s_start == 0) and there are no outstanding transactions
1038 * in the filesystem, then we can safely defer the superblock update
1039 * until the next commit by setting JFS_FLUSHED. This avoids
1040 * attempting a write to a potential-readonly device.
1041 */
1042 if (sb->s_start == 0 && journal->j_tail_sequence ==
1043 journal->j_transaction_sequence) {
1044 jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
1045 "(start %ld, seq %d, errno %d)\n",
1046 journal->j_tail, journal->j_tail_sequence,
1047 journal->j_errno);
1048 goto out;
1049 }
1050
1051 jbd_lock(&journal->j_state_lock);
1052 jbd_debug(1,"JBD: updating superblock (start %ld, seq %d, errno %d)\n",
1053 journal->j_tail, journal->j_tail_sequence, journal->j_errno);
1054
1055 sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
1056 sb->s_start = cpu_to_be32(journal->j_tail);
1057 sb->s_errno = cpu_to_be32(journal->j_errno);
1058 jbd_unlock(&journal->j_state_lock);
1059
1060 BUFFER_TRACE(bh, "marking dirty");
1062 if (wait)
1064 else
1065 ll_rw_block(SWRITE, 1, &bh);
1066
1067out:
1068 /* If we have just flushed the log (by marking s_start==0), then
1069 * any future commit will have to be careful to update the
1070 * superblock again to re-record the true start of the log. */
1071
1072 jbd_lock(&journal->j_state_lock);
1073 if (sb->s_start)
1074 journal->j_flags &= ~JFS_FLUSHED;
1075 else
1076 journal->j_flags |= JFS_FLUSHED;
1077 jbd_unlock(&journal->j_state_lock);
1078}
1079
1080/*
1081 * Given a journal_t structure, initialise the various fields for
1082 * startup of a new journaling session. We use this both when creating
1083 * a journal, and after recovering an old journal to reset it for
1084 * subsequent use.
1085 */
1086
1087static int journal_reset(journal_t *journal)
1088{
1089 journal_superblock_t *sb = journal->j_superblock;
1090 unsigned long first, last;
1091
1092 first = be32_to_cpu(sb->s_first);
1093 last = be32_to_cpu(sb->s_maxlen);
1094
1095 journal->j_first = first;
1096 journal->j_last = last;
1097
1098 journal->j_head = first;
1099 journal->j_tail = first;
1100 journal->j_free = last - first;
1101
1102 journal->j_tail_sequence = journal->j_transaction_sequence;
1103 journal->j_commit_sequence = journal->j_transaction_sequence - 1;
1104 journal->j_commit_request = journal->j_commit_sequence;
1105
1106 journal->j_max_transaction_buffers = journal->j_maxlen / 4;
1107
1108 /* Add the dynamic fields and write it to disk. */
1109 journal_update_superblock(journal, 1);
1110 return 0;
1111}
1112
1121int journal_load(journal_t *journal)
1122{
1123 int err;
1125
1126 err = load_superblock(journal);
1127 if (err)
1128 return err;
1129
1130 sb = journal->j_superblock;
1131 /* If this is a V2 superblock, then we have to check the
1132 * features flags on it. */
1133
1134 if (journal->j_format_version >= 2) {
1135 if ((sb->s_feature_ro_compat &
1137 (sb->s_feature_incompat &
1140 "JBD: Unrecognised features on journal\n");
1141 return -EINVAL;
1142 }
1143 }
1144
1145 /* Let the recovery code check whether it needs to recover any
1146 * data from the journal. */
1147 if (journal_recover(journal))
1148 goto recovery_error;
1149
1150 /* OK, we've finished with the dynamic journal bits:
1151 * reinitialise the dynamic contents of the superblock in memory
1152 * and reset them on disk. */
1153 if (journal_reset(journal))
1154 goto recovery_error;
1155
1156 journal->j_flags &= ~JFS_ABORT;
1157 journal->j_flags |= JFS_LOADED;
1158 return 0;
1159
1160recovery_error:
1161 printk (KERN_WARNING "JBD: recovery failed\n");
1162 return -EIO;
1163}
1164
1165
1166//
1167// transactions routines
1168//
1169
1170
1171/*
1172 *
1173 * List management code snippets: various functions for manipulating the
1174 * transaction buffer lists.
1175 *
1176 */
1177
1178/*
1179 * Append a buffer to a transaction list, given the transaction's list head
1180 * pointer.
1181 *
1182 * j_list_lock is held.
1183 *
1184 * jbd_lock_bh_state(jh2bh(jh)) is held.
1185 */
1186
1187static inline void
1189{
1190 if (!*list) {
1191 jh->b_tnext = jh->b_tprev = jh;
1192 *list = jh;
1193 } else {
1194 /* Insert at the tail of the list to preserve order */
1195 struct journal_head *first = *list, *last = first->b_tprev;
1196 jh->b_tprev = last;
1197 jh->b_tnext = first;
1198 last->b_tnext = first->b_tprev = jh;
1199 }
1200}
1201
1202/*
1203 * Remove a buffer from a transaction list, given the transaction's list
1204 * head pointer.
1205 *
1206 * Called with j_list_lock held, and the journal may not be locked.
1207 *
1208 * jbd_lock_bh_state(jh2bh(jh)) is held.
1209 */
1210
1211static inline void
1213{
1214 if (*list == jh) {
1215 *list = jh->b_tnext;
1216 if (*list == jh)
1217 *list = NULL;
1218 }
1219 jh->b_tprev->b_tnext = jh->b_tnext;
1220 jh->b_tnext->b_tprev = jh->b_tprev;
1221}
1222
1223/*
1224 * Remove a buffer from the appropriate transaction list.
1225 *
1226 * Note that this function can *change* the value of
1227 * bh->b_transaction->t_sync_datalist, t_buffers, t_forget,
1228 * t_iobuf_list, t_shadow_list, t_log_list or t_reserved_list. If the caller
1229 * is holding onto a copy of one of thee pointers, it could go bad.
1230 * Generally the caller needs to re-read the pointer from the transaction_t.
1231 *
1232 * Called under j_list_lock. The journal may not be locked.
1233 */
1235{
1236 struct journal_head **list = NULL;
1237 transaction_t *transaction;
1238 struct buffer_head *bh = jh2bh(jh);
1239
1240 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
1241 transaction = jh->b_transaction;
1242 if (transaction)
1243 assert_jbd_locked(&transaction->t_journal->j_list_lock);
1244
1245 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
1246 if (jh->b_jlist != BJ_None)
1247 J_ASSERT_JH(jh, transaction != NULL);
1248
1249 switch (jh->b_jlist) {
1250 case BJ_None:
1251 return;
1252 case BJ_SyncData:
1253 list = &transaction->t_sync_datalist;
1254 break;
1255 case BJ_Metadata:
1256 transaction->t_nr_buffers--;
1257 J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
1258 list = &transaction->t_buffers;
1259 break;
1260 case BJ_Forget:
1261 list = &transaction->t_forget;
1262 break;
1263 case BJ_IO:
1264 list = &transaction->t_iobuf_list;
1265 break;
1266 case BJ_Shadow:
1267 list = &transaction->t_shadow_list;
1268 break;
1269 case BJ_LogCtl:
1270 list = &transaction->t_log_list;
1271 break;
1272 case BJ_Reserved:
1273 list = &transaction->t_reserved_list;
1274 break;
1275 case BJ_Locked:
1276 list = &transaction->t_locked_list;
1277 break;
1278 }
1279
1281 jh->b_jlist = BJ_None;
1282 if (test_clear_buffer_jbddirty(bh))
1283 mark_buffer_dirty(bh); /* Expose it to the VM */
1284}
1285
1287{
1289 jh->b_transaction = NULL;
1290}
1291
1292void journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
1293{
1294 jbd_lock_bh_state(jh2bh(jh));
1295 jbd_lock(&journal->j_list_lock);
1297 jbd_unlock(&journal->j_list_lock);
1298 jbd_unlock_bh_state(jh2bh(jh));
1299}
1300
1301/*
1302 * This buffer is no longer needed. If it is on an older transaction's
1303 * checkpoint list we need to record it on this transaction's forget list
1304 * to pin this buffer (and hence its checkpointing transaction) down until
1305 * this transaction commits. If the buffer isn't on a checkpoint list, we
1306 * release it.
1307 * Returns non-zero if JBD no longer has an interest in the buffer.
1308 *
1309 * Called under j_list_lock.
1310 *
1311 * Called under jbd_lock_bh_state(bh).
1312 */
1313static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1314{
1315 int may_free = 1;
1316 struct buffer_head *bh = jh2bh(jh);
1317
1319
1320 if (jh->b_cp_transaction) {
1321 JBUFFER_TRACE(jh, "on running+cp transaction");
1322 __journal_file_buffer(jh, transaction, BJ_Forget);
1323 clear_buffer_jbddirty(bh);
1324 may_free = 0;
1325 } else {
1326 JBUFFER_TRACE(jh, "on running transaction");
1328 __brelse(bh);
1329 }
1330 return may_free;
1331}
1332
1333
1334/*
1335 * File a buffer on the given transaction list.
1336 */
1338 transaction_t *transaction, int jlist)
1339{
1340 struct journal_head **list = NULL;
1341 int was_dirty = 0;
1342 struct buffer_head *bh = jh2bh(jh);
1343
1344 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
1345 assert_jbd_locked(&transaction->t_journal->j_list_lock);
1346
1347 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
1348 J_ASSERT_JH(jh, jh->b_transaction == transaction ||
1349 jh->b_transaction == NULL);
1350
1351 if (jh->b_transaction && jh->b_jlist == (unsigned) jlist)
1352 return;
1353
1354 /* The following list of buffer states needs to be consistent
1355 * with __jbd_unexpected_dirty_buffer()'s handling of dirty
1356 * state. */
1357
1358 if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
1359 jlist == BJ_Shadow || jlist == BJ_Forget) {
1360 if (test_clear_buffer_dirty(bh) ||
1361 test_clear_buffer_jbddirty(bh))
1362 was_dirty = 1;
1363 }
1364
1365 if (jh->b_transaction)
1367 jh->b_transaction = transaction;
1368
1369 switch (jlist) {
1370 case BJ_None:
1371 J_ASSERT_JH(jh, !jh->b_committed_data);
1372 J_ASSERT_JH(jh, !jh->b_frozen_data);
1373 return;
1374 case BJ_SyncData:
1375 list = &transaction->t_sync_datalist;
1376 break;
1377 case BJ_Metadata:
1378 transaction->t_nr_buffers++;
1379 list = &transaction->t_buffers;
1380 break;
1381 case BJ_Forget:
1382 list = &transaction->t_forget;
1383 break;
1384 case BJ_IO:
1385 list = &transaction->t_iobuf_list;
1386 break;
1387 case BJ_Shadow:
1388 list = &transaction->t_shadow_list;
1389 break;
1390 case BJ_LogCtl:
1391 list = &transaction->t_log_list;
1392 break;
1393 case BJ_Reserved:
1394 list = &transaction->t_reserved_list;
1395 break;
1396 case BJ_Locked:
1397 list = &transaction->t_locked_list;
1398 break;
1399 }
1400
1402 jh->b_jlist = jlist;
1403
1404 if (was_dirty)
1405 set_buffer_jbddirty(bh);
1406}
1407
1409 transaction_t *transaction, int jlist)
1410{
1411 jbd_lock_bh_state(jh2bh(jh));
1412 jbd_lock(&transaction->t_journal->j_list_lock);
1413 __journal_file_buffer(jh, transaction, jlist);
1414 jbd_unlock(&transaction->t_journal->j_list_lock);
1415 jbd_unlock_bh_state(jh2bh(jh));
1416}
1417
1418
1419/*
1420 * journal_release_buffer: undo a get_write_access without any buffer
1421 * updates, if the update decided in the end that it didn't need access.
1422 *
1423 */
1424void
1426{
1427 BUFFER_TRACE(bh, "entry");
1428}
1429
1448{
1449 transaction_t *transaction = handle->h_transaction;
1450 journal_t *journal = transaction->t_journal;
1451 struct journal_head *jh;
1452 int drop_reserve = 0;
1453 int err = 0;
1454
1455 BUFFER_TRACE(bh, "entry");
1456
1457 jbd_lock_bh_state(bh);
1458 jbd_lock(&journal->j_list_lock);
1459
1460 if (!buffer_jbd(bh))
1461 goto not_jbd;
1462 jh = bh2jh(bh);
1463
1464 /* Critical error: attempting to delete a bitmap buffer, maybe?
1465 * Don't do any jbd operations, and return an error. */
1466 if (!J_EXPECT_JH(jh, !jh->b_committed_data,
1467 "inconsistent data on disk")) {
1468 err = -EIO;
1469 goto not_jbd;
1470 }
1471
1472 /*
1473 * The buffer's going from the transaction, we must drop
1474 * all references -bzzz
1475 */
1476 jh->b_modified = 0;
1477
1478 if (jh->b_transaction == handle->h_transaction) {
1479 J_ASSERT_JH(jh, !jh->b_frozen_data);
1480
1481 /* If we are forgetting a buffer which is already part
1482 * of this transaction, then we can just drop it from
1483 * the transaction immediately. */
1484 clear_buffer_dirty(bh);
1485 clear_buffer_jbddirty(bh);
1486
1487 JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
1488
1489 drop_reserve = 1;
1490
1491 /*
1492 * We are no longer going to journal this buffer.
1493 * However, the commit of this transaction is still
1494 * important to the buffer: the delete that we are now
1495 * processing might obsolete an old log entry, so by
1496 * committing, we can satisfy the buffer's checkpoint.
1497 *
1498 * So, if we have a checkpoint on the buffer, we should
1499 * now refile the buffer on our BJ_Forget list so that
1500 * we know to remove the checkpoint after we commit.
1501 */
1502
1503 if (jh->b_cp_transaction) {
1505 __journal_file_buffer(jh, transaction, BJ_Forget);
1506 } else {
1509 __brelse(bh);
1510 if (!buffer_jbd(bh)) {
1511 jbd_unlock(&journal->j_list_lock);
1512 jbd_unlock_bh_state(bh);
1513 __bforget(bh);
1514 goto drop;
1515 }
1516 }
1517 } else if (jh->b_transaction) {
1518 J_ASSERT_JH(jh, (jh->b_transaction ==
1519 journal->j_committing_transaction));
1520 /* However, if the buffer is still owned by a prior
1521 * (committing) transaction, we can't drop it yet... */
1522 JBUFFER_TRACE(jh, "belongs to older transaction");
1523 /* ... but we CAN drop it from the new transaction if we
1524 * have also modified it since the original commit. */
1525
1526 if (jh->b_next_transaction) {
1527 J_ASSERT(jh->b_next_transaction == transaction);
1529 drop_reserve = 1;
1530 }
1531 }
1532
1533not_jbd:
1534 jbd_unlock(&journal->j_list_lock);
1535 jbd_unlock_bh_state(bh);
1536 __brelse(bh);
1537drop:
1538 if (drop_reserve) {
1539 /* no need to reserve log space for this block -bzzz */
1540 handle->h_buffer_credits++;
1541 }
1542 return err;
1543}
1544
1545/*
1546 * debugfs tunables
1547 */
1548#ifdef CONFIG_JBD_DEBUG
1549
1550u8 journal_enable_debug __read_mostly;
1551EXPORT_SYMBOL(journal_enable_debug);
1552
1553static struct dentry *jbd_debugfs_dir;
1554static struct dentry *jbd_debug;
1555
1556static void __init jbd_create_debugfs_entry(void)
1557{
1558 jbd_debugfs_dir = debugfs_create_dir("jbd", NULL);
1559 if (jbd_debugfs_dir)
1560 jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO,
1561 jbd_debugfs_dir,
1562 &journal_enable_debug);
1563}
1564
1565static void __exit jbd_remove_debugfs_entry(void)
1566{
1567 debugfs_remove(jbd_debug);
1568 debugfs_remove(jbd_debugfs_dir);
1569}
1570
1571#else
1572
1573static inline void jbd_create_debugfs_entry(void)
1574{
1575}
1576
1577static inline void jbd_remove_debugfs_entry(void)
1578{
1579}
1580
1581#endif
1582
1584
1586{
1587 jbd_handle_cache = kmem_cache_create("journal_handle",
1588 sizeof(handle_t),
1589 0, /* offset */
1590 SLAB_TEMPORARY, /* flags */
1591 NULL); /* ctor */
1592 if (jbd_handle_cache == NULL) {
1593 printk(KERN_EMERG "JBD: failed to create handle cache\n");
1594 return -ENOMEM;
1595 }
1596 return 0;
1597}
1598
1600{
1601 if (jbd_handle_cache)
1603}
1604
1605/*
1606 * Module startup and shutdown
1607 */
1608
1610{
1611 int ret;
1612
1614 if (ret == 0)
1616 if (ret == 0)
1618 return ret;
1619}
1620
1621static void journal_destroy_caches(void)
1622{
1626}
1627
1628static int __init journal_init(void)
1629{
1630 int ret;
1631
1632 J_ASSERT(sizeof(struct journal_superblock_s) == 1024);
1633
1635 if (ret != 0)
1638 return ret;
1639}
1640
1641static void __exit journal_exit(void)
1642{
1643#ifdef CONFIG_JBD_DEBUG
1644 int n = atomic_read(&nr_journal_heads);
1645 if (n)
1646 printk(KERN_EMERG "JBD: leaked %d journal_heads!\n", n);
1647#endif
1650}
1651
#define EINVAL
Definition: acclib.h:90
#define ENOMEM
Definition: acclib.h:84
#define EIO
Definition: acclib.h:81
#define __init
Definition: aclinux.h:211
#define write
Definition: acwin.h:97
#define atomic_read(v)
Definition: atomic.h:23
static void atomic_inc(atomic_t volatile *v)
Definition: atomic.h:95
#define ATOMIC_INIT(i)
Definition: atomic.h:14
static void atomic_dec(atomic_t volatile *v)
Definition: atomic.h:107
UCHAR u8
Definition: btrfs.h:12
Definition: list.h:37
#define NULL
Definition: types.h:112
unsigned int(__cdecl typeof(jpeg_read_scanlines))(struct jpeg_decompress_struct *
Definition: typeof.h:31
superblock * sb
Definition: btrfs.c:4261
#define EROFS
Definition: errno.h:36
void iput(struct inode *inode)
Definition: linux.c:992
ULONGLONG bmap(struct inode *i, ULONGLONG b)
Definition: linux.c:942
unsigned __int64 sector_t
Definition: types.h:82
#define __FUNCTION__
Definition: types.h:116
int journal_skip_recovery(journal_t *journal)
Definition: recovery.c:282
int journal_recover(journal_t *journal)
Definition: recovery.c:225
signed long long s64
Definition: linux.h:60
#define S_IRUGO
Definition: ext2fs.h:402
GLdouble n
Definition: glext.h:7729
const GLint * first
Definition: glext.h:5794
GLintptr offset
Definition: glext.h:5920
GLenum target
Definition: glext.h:7315
POINT last
Definition: font.c:46
#define time_after(a, b)
Definition: glue.h:18
tid_t
Definition: ieframe.h:311
#define BJ_Forget
Definition: jbd.h:1120
#define BJ_Types
Definition: jbd.h:1126
#define BJ_LogCtl
Definition: jbd.h:1123
#define JFS_SUPERBLOCK_V1
Definition: jbd.h:171
#define BJ_IO
Definition: jbd.h:1121
struct journal_block_tag_s journal_block_tag_t
#define BJ_Metadata
Definition: jbd.h:1119
#define JFS_SUPERBLOCK_V2
Definition: jbd.h:172
#define JFS_KNOWN_ROCOMPAT_FEATURES
Definition: jbd.h:275
#define BJ_SyncData
Definition: jbd.h:1118
#define JFS_KNOWN_COMPAT_FEATURES
Definition: jbd.h:274
#define BJ_Reserved
Definition: jbd.h:1124
#define BJ_Locked
Definition: jbd.h:1125
#define JFS_MAGIC_NUMBER
Definition: jbd.h:159
#define BJ_Shadow
Definition: jbd.h:1122
static int tid_geq(tid_t x, tid_t y)
Definition: jbd.h:1091
#define BJ_None
Definition: jbd.h:1117
#define JFS_KNOWN_INCOMPAT_FEATURES
Definition: jbd.h:276
struct transaction_s transaction_t
Definition: journal-head.h:14
#define EXPORT_SYMBOL(x)
Definition: module.h:272
void __bforget(struct buffer_head *)
Definition: linux.c:843
kmem_cache_t * kmem_cache_create(const char *name, size_t size, size_t offset, unsigned long flags, kmem_cache_cb_t ctor)
Definition: linux.c:48
#define kmalloc(size, gfp)
Definition: module.h:1125
#define SWRITE
Definition: module.h:1178
static void lock_buffer(struct buffer_head *bh)
Definition: module.h:1028
#define module_init(X)
Definition: module.h:278
#define GFP_KERNEL
Definition: module.h:668
#define KERN_NOTICE
Definition: module.h:227
void kmem_cache_free(kmem_cache_t *kc, void *p)
Definition: linux.c:103
void unlock_buffer(struct buffer_head *bh)
Definition: linux.c:853
#define KERN_WARNING
Definition: module.h:226
#define GFP_NOFS
Definition: module.h:669
#define MODULE_LICENSE(x)
Definition: module.h:270
struct buffer_head * __getblk(struct block_device *bdev, sector_t block, unsigned long size)
Definition: linux.c:799
void * kmem_cache_alloc(kmem_cache_t *kc, int flags)
Definition: linux.c:92
#define SLAB_TEMPORARY
Definition: module.h:1133
int kmem_cache_destroy(kmem_cache_t *kc)
Definition: linux.c:82
static void get_bh(struct buffer_head *bh)
Definition: module.h:944
void * kzalloc(int size, int flags)
Definition: linux.c:34
#define yield()
Definition: module.h:449
#define printk
Definition: module.h:231
static void wait_on_buffer(struct buffer_head *bh)
Definition: module.h:1021
#define __exit
Definition: module.h:267
int wake_up(wait_queue_head_t *queue)
Definition: linux.c:279
int sync_dirty_buffer(struct buffer_head *bh)
Definition: linux.c:898
void ll_rw_block(int, int, struct buffer_head *bh[])
Definition: linux.c:862
#define KERN_ALERT
Definition: module.h:223
#define be32_to_cpu
Definition: module.h:157
#define kfree(p)
Definition: module.h:1126
void init_waitqueue_head(wait_queue_head_t *q)
Definition: linux.c:115
#define PAGE_CACHE_SHIFT
Definition: module.h:706
static void brelse(struct buffer_head *bh)
Definition: module.h:955
#define KERN_EMERG
Definition: module.h:222
void __brelse(struct buffer_head *)
Definition: linux.c:808
#define jiffies
Definition: module.h:1085
#define cpu_to_be32
Definition: module.h:156
#define module_exit(X)
Definition: module.h:279
void mark_buffer_dirty(struct buffer_head *bh)
Definition: linux.c:914
#define KERN_ERR
Definition: module.h:225
static TfClientId tid
#define HZ
Definition: pchw.c:36
#define INT
Definition: polytest.cpp:20
#define long
Definition: qsort.c:33
#define err(...)
#define mutex_init(m, a)
Definition: reentrant.h:127
static FILE * out
Definition: regtests2xml.c:44
static void __journal_remove_journal_head(struct buffer_head *bh)
Definition: replay.c:381
static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
Definition: replay.c:1313
int __log_start_commit(journal_t *journal, tid_t target)
Definition: replay.c:21
struct journal_head * journal_grab_journal_head(struct buffer_head *bh)
Definition: replay.c:368
struct journal_head * journal_get_descriptor_buffer(journal_t *journal)
Definition: replay.c:518
struct journal_head * journal_add_journal_head(struct buffer_head *bh)
Definition: replay.c:325
static int journal_init_journal_head_cache(void)
Definition: replay.c:221
int journal_load(journal_t *journal)
Definition: replay.c:1121
static void __journal_abort_soft(journal_t *journal, int err)
Definition: replay.c:82
int journal_set_features(journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat)
Definition: replay.c:804
journal_t * journal_init_inode(struct inode *inode)
Definition: replay.c:592
int journal_forget(handle_t *handle, struct buffer_head *bh)
Definition: replay.c:1447
int journal_bmap(journal_t *journal, unsigned long blocknr, unsigned long *retp)
Definition: replay.c:484
static void __journal_abort_hard(journal_t *journal)
Definition: replay.c:65
static void jbd_create_debugfs_entry(void)
Definition: replay.c:1573
static int load_superblock(journal_t *journal)
Definition: replay.c:931
static void journal_destroy_caches(void)
Definition: replay.c:1621
int journal_clear_err(journal_t *journal)
Definition: replay.c:179
static void __journal_temp_unlink_buffer(struct journal_head *jh)
Definition: replay.c:1234
static void __exit journal_exit(void)
Definition: replay.c:1641
static struct kmem_cache * journal_head_cache
Definition: replay.c:216
void journal_file_buffer(struct journal_head *jh, transaction_t *transaction, int jlist)
Definition: replay.c:1408
void journal_release_buffer(handle_t *handle, struct buffer_head *bh)
Definition: replay.c:1425
void journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
Definition: replay.c:1292
static void journal_destroy_handle_cache(void)
Definition: replay.c:1599
static int journal_convert_superblock_v1(journal_t *journal, journal_superblock_t *sb)
Definition: replay.c:827
int journal_update_format(journal_t *journal)
Definition: replay.c:999
static void __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
Definition: replay.c:1188
int journal_next_log_block(journal_t *journal, unsigned long *retp)
Definition: replay.c:461
static void __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
Definition: replay.c:1212
static int journal_reset(journal_t *journal)
Definition: replay.c:1087
static struct journal_head * journal_alloc_journal_head(void)
Definition: replay.c:249
struct kmem_cache * jbd_handle_cache
Definition: replay.c:1583
int log_start_commit(journal_t *journal, tid_t tid)
Definition: replay.c:42
void journal_update_superblock(journal_t *journal, int wait)
Definition: replay.c:1030
static int __init journal_init_handle_cache(void)
Definition: replay.c:1585
static int __init journal_init_caches(void)
Definition: replay.c:1609
void journal_put_journal_head(struct journal_head *jh)
Definition: replay.c:443
int journal_check_available_features(journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat)
Definition: replay.c:767
void journal_wipe_recovery(journal_t *journal)
Definition: replay.c:659
static void journal_destroy_journal_head_cache(void)
Definition: replay.c:239
void __journal_file_buffer(struct journal_head *jh, transaction_t *transaction, int jlist)
Definition: replay.c:1337
void journal_ack_err(journal_t *journal)
Definition: replay.c:199
int journal_wipe(journal_t *journal, int write)
Definition: replay.c:964
static void journal_free_journal_head(struct journal_head *jh)
Definition: replay.c:273
void journal_remove_journal_head(struct buffer_head *bh)
Definition: replay.c:432
static int journal_get_superblock(journal_t *journal)
Definition: replay.c:871
int journal_errno(journal_t *journal)
Definition: replay.c:159
static void jbd_remove_debugfs_entry(void)
Definition: replay.c:1577
static void journal_fail_superblock(journal_t *journal)
Definition: replay.c:858
void journal_destroy(journal_t *journal)
Definition: replay.c:678
static journal_t * journal_init_common(void)
Definition: replay.c:547
static int __init journal_init(void)
Definition: replay.c:1628
void __journal_unfile_buffer(struct journal_head *jh)
Definition: replay.c:1286
int journal_check_used_features(journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat)
Definition: replay.c:736
void journal_abort(journal_t *journal, int err)
Definition: replay.c:143
int journal_blocks_per_page(struct inode *inode)
Definition: replay.c:207
void journal_destroy_revoke_caches(void)
Definition: revoke.c:191
void journal_destroy_revoke(journal_t *journal)
Definition: revoke.c:271
int __init journal_init_revoke_caches(void)
Definition: revoke.c:170
int journal_init_revoke(journal_t *journal, int hash_size)
Definition: revoke.c:201
#define list
Definition: rosglue.h:35
#define memset(x, y, z)
Definition: compat.h:39
size_t b_size
Definition: module.h:734
void * b_private
Definition: module.h:737
atomic_t b_count
Definition: module.h:740
struct page * b_page
Definition: module.h:726
char * b_data
Definition: module.h:735
Definition: fs.h:117
Definition: fs.h:78
__u32 i_ino
Definition: fs.h:79
loff_t i_size
Definition: fs.h:80
struct super_block * i_sb
Definition: fs.h:96
transaction_t * b_next_transaction
Definition: journal-head.h:69
transaction_t * b_cp_transaction
Definition: journal-head.h:82
transaction_t * b_transaction
Definition: journal-head.h:61
struct journal_head * b_tprev
Definition: journal-head.h:75
unsigned b_modified
Definition: journal-head.h:39
char * b_committed_data
Definition: journal-head.h:52
struct buffer_head * b_bh
Definition: journal-head.h:21
struct journal_head * b_tnext
Definition: journal-head.h:75
unsigned b_jlist
Definition: journal-head.h:32
char * b_frozen_data
Definition: journal-head.h:45
int32_t INT_PTR
Definition: typedefs.h:64
#define READ(_gif, _buf, _len)
Definition: ungif.c:107
int ret
static int repeat
Definition: xmllint.c:137