[JFFS2] Disable summary after wbuf recovery
[pandora-kernel.git] / fs / jffs2 / wbuf.c
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright (C) 2001-2003 Red Hat, Inc.
5  * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de>
6  *
7  * Created by David Woodhouse <dwmw2@infradead.org>
8  * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
9  *
10  * For licensing information, see the file 'LICENCE' in this directory.
11  *
12  * $Id: wbuf.c,v 1.100 2005/09/30 13:59:13 dedekind Exp $
13  *
14  */
15
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/crc32.h>
20 #include <linux/mtd/nand.h>
21 #include <linux/jiffies.h>
22 #include <linux/sched.h>
23
24 #include "nodelist.h"
25
26 /* For testing write failures */
27 #undef BREAKME
28 #undef BREAKMEHEADER
29
30 #ifdef BREAKME
31 static unsigned char *brokenbuf;
32 #endif
33
34 #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
35 #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
36
37 /* max. erase failures before we mark a block bad */
38 #define MAX_ERASE_FAILURES      2
39
40 struct jffs2_inodirty {
41         uint32_t ino;
42         struct jffs2_inodirty *next;
43 };
44
45 static struct jffs2_inodirty inodirty_nomem;
46
47 static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
48 {
49         struct jffs2_inodirty *this = c->wbuf_inodes;
50
51         /* If a malloc failed, consider _everything_ dirty */
52         if (this == &inodirty_nomem)
53                 return 1;
54
55         /* If ino == 0, _any_ non-GC writes mean 'yes' */
56         if (this && !ino)
57                 return 1;
58
59         /* Look to see if the inode in question is pending in the wbuf */
60         while (this) {
61                 if (this->ino == ino)
62                         return 1;
63                 this = this->next;
64         }
65         return 0;
66 }
67
68 static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
69 {
70         struct jffs2_inodirty *this;
71
72         this = c->wbuf_inodes;
73
74         if (this != &inodirty_nomem) {
75                 while (this) {
76                         struct jffs2_inodirty *next = this->next;
77                         kfree(this);
78                         this = next;
79                 }
80         }
81         c->wbuf_inodes = NULL;
82 }
83
84 static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
85 {
86         struct jffs2_inodirty *new;
87
88         /* Mark the superblock dirty so that kupdated will flush... */
89         jffs2_erase_pending_trigger(c);
90
91         if (jffs2_wbuf_pending_for_ino(c, ino))
92                 return;
93
94         new = kmalloc(sizeof(*new), GFP_KERNEL);
95         if (!new) {
96                 D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
97                 jffs2_clear_wbuf_ino_list(c);
98                 c->wbuf_inodes = &inodirty_nomem;
99                 return;
100         }
101         new->ino = ino;
102         new->next = c->wbuf_inodes;
103         c->wbuf_inodes = new;
104         return;
105 }
106
107 static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
108 {
109         struct list_head *this, *next;
110         static int n;
111
112         if (list_empty(&c->erasable_pending_wbuf_list))
113                 return;
114
115         list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
116                 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
117
118                 D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
119                 list_del(this);
120                 if ((jiffies + (n++)) & 127) {
121                         /* Most of the time, we just erase it immediately. Otherwise we
122                            spend ages scanning it on mount, etc. */
123                         D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
124                         list_add_tail(&jeb->list, &c->erase_pending_list);
125                         c->nr_erasing_blocks++;
126                         jffs2_erase_pending_trigger(c);
127                 } else {
128                         /* Sometimes, however, we leave it elsewhere so it doesn't get
129                            immediately reused, and we spread the load a bit. */
130                         D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
131                         list_add_tail(&jeb->list, &c->erasable_list);
132                 }
133         }
134 }
135
136 #define REFILE_NOTEMPTY 0
137 #define REFILE_ANYWAY   1
138
139 static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
140 {
141         D1(printk("About to refile bad block at %08x\n", jeb->offset));
142
143         /* File the existing block on the bad_used_list.... */
144         if (c->nextblock == jeb)
145                 c->nextblock = NULL;
146         else /* Not sure this should ever happen... need more coffee */
147                 list_del(&jeb->list);
148         if (jeb->first_node) {
149                 D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
150                 list_add(&jeb->list, &c->bad_used_list);
151         } else {
152                 BUG_ON(allow_empty == REFILE_NOTEMPTY);
153                 /* It has to have had some nodes or we couldn't be here */
154                 D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
155                 list_add(&jeb->list, &c->erase_pending_list);
156                 c->nr_erasing_blocks++;
157                 jffs2_erase_pending_trigger(c);
158         }
159
160         if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
161                 uint32_t oldfree = jeb->free_size;
162
163                 jffs2_link_node_ref(c, jeb, 
164                                     (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
165                                     oldfree, NULL);
166                 /* convert to wasted */
167                 c->wasted_size += oldfree;
168                 jeb->wasted_size += oldfree;
169                 c->dirty_size -= oldfree;
170                 jeb->dirty_size -= oldfree;
171         }
172
173         jffs2_dbg_dump_block_lists_nolock(c);
174         jffs2_dbg_acct_sanity_check_nolock(c,jeb);
175         jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
176 }
177
178 static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
179                                                             struct jffs2_inode_info *f,
180                                                             struct jffs2_raw_node_ref *raw,
181                                                             union jffs2_node_union *node)
182 {
183         struct jffs2_node_frag *frag;
184         struct jffs2_full_dirent *fd;
185
186         dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
187                     node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
188
189         BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
190                je16_to_cpu(node->u.magic) != 0);
191
192         switch (je16_to_cpu(node->u.nodetype)) {
193         case JFFS2_NODETYPE_INODE:
194                 if (f->metadata && f->metadata->raw == raw) {
195                         dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
196                         return &f->metadata->raw;
197                 }
198                 frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
199                 BUG_ON(!frag);
200                 /* Find a frag which refers to the full_dnode we want to modify */
201                 while (!frag->node || frag->node->raw != raw) {
202                         frag = frag_next(frag);
203                         BUG_ON(!frag);
204                 }
205                 dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
206                 return &frag->node->raw;
207
208         case JFFS2_NODETYPE_DIRENT:
209                 for (fd = f->dents; fd; fd = fd->next) {
210                         if (fd->raw == raw) {
211                                 dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
212                                 return &fd->raw;
213                         }
214                 }
215                 BUG();
216
217         default:
218                 dbg_noderef("Don't care about replacing raw for nodetype %x\n",
219                             je16_to_cpu(node->u.nodetype));
220                 break;
221         }
222         return NULL;
223 }
224
225 /* Recover from failure to write wbuf. Recover the nodes up to the
226  * wbuf, not the one which we were starting to try to write. */
227
228 static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
229 {
230         struct jffs2_eraseblock *jeb, *new_jeb;
231         struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
232         size_t retlen;
233         int ret;
234         int nr_refile = 0;
235         unsigned char *buf;
236         uint32_t start, end, ofs, len;
237
238         jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
239
240         spin_lock(&c->erase_completion_lock);
241         if (c->wbuf_ofs % c->mtd->erasesize)
242                 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
243         else
244                 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
245         spin_unlock(&c->erase_completion_lock);
246
247         BUG_ON(!ref_obsolete(jeb->last_node));
248
249         /* Find the first node to be recovered, by skipping over every
250            node which ends before the wbuf starts, or which is obsolete. */
251         for (next = raw = jeb->first_node; next; raw = next) {
252                 next = ref_next(raw);
253
254                 if (ref_obsolete(raw) || 
255                     (next && ref_offset(next) <= c->wbuf_ofs)) {
256                         dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
257                                     ref_offset(raw), ref_flags(raw),
258                                     (ref_offset(raw) + ref_totlen(c, jeb, raw)),
259                                     c->wbuf_ofs);
260                         continue;
261                 }
262                 dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
263                             ref_offset(raw), ref_flags(raw),
264                             (ref_offset(raw) + ref_totlen(c, jeb, raw)));
265
266                 first_raw = raw;
267                 break;
268         }
269
270         if (!first_raw) {
271                 /* All nodes were obsolete. Nothing to recover. */
272                 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
273                 c->wbuf_len = 0;
274                 return;
275         }
276
277         start = ref_offset(first_raw);
278         end = ref_offset(jeb->last_node);
279         nr_refile = 1;
280
281         /* Count the number of refs which need to be copied */
282         while ((raw = ref_next(raw)) != jeb->last_node)
283                 nr_refile++;
284
285         dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
286                     start, end, end - start, nr_refile);
287
288         buf = NULL;
289         if (start < c->wbuf_ofs) {
290                 /* First affected node was already partially written.
291                  * Attempt to reread the old data into our buffer. */
292
293                 buf = kmalloc(end - start, GFP_KERNEL);
294                 if (!buf) {
295                         printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
296
297                         goto read_failed;
298                 }
299
300                 /* Do the read... */
301                 ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
302
303                 /* ECC recovered ? */
304                 if ((ret == -EUCLEAN || ret == -EBADMSG) &&
305                     (retlen == c->wbuf_ofs - start))
306                         ret = 0;
307
308                 if (ret || retlen != c->wbuf_ofs - start) {
309                         printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
310
311                         kfree(buf);
312                         buf = NULL;
313                 read_failed:
314                         first_raw = ref_next(first_raw);
315                         nr_refile--;
316                         while (first_raw && ref_obsolete(first_raw)) {
317                                 first_raw = ref_next(first_raw);
318                                 nr_refile--;
319                         }
320
321                         /* If this was the only node to be recovered, give up */
322                         if (!first_raw) {
323                                 c->wbuf_len = 0;
324                                 return;
325                         }
326
327                         /* It wasn't. Go on and try to recover nodes complete in the wbuf */
328                         start = ref_offset(first_raw);
329                         dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
330                                     start, end, end - start, nr_refile);
331
332                 } else {
333                         /* Read succeeded. Copy the remaining data from the wbuf */
334                         memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
335                 }
336         }
337         /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
338            Either 'buf' contains the data, or we find it in the wbuf */
339
340         /* ... and get an allocation of space from a shiny new block instead */
341         ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
342         if (ret) {
343                 printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
344                 kfree(buf);
345                 return;
346         }
347
348         /* The summary is not recovered, so it must be disabled for this erase block */
349         jffs2_sum_disable_collecting(c->summary);
350
351         ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
352         if (ret) {
353                 printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
354                 kfree(buf);
355                 return;
356         }
357
358         ofs = write_ofs(c);
359
360         if (end-start >= c->wbuf_pagesize) {
361                 /* Need to do another write immediately, but it's possible
362                    that this is just because the wbuf itself is completely
363                    full, and there's nothing earlier read back from the
364                    flash. Hence 'buf' isn't necessarily what we're writing
365                    from. */
366                 unsigned char *rewrite_buf = buf?:c->wbuf;
367                 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
368
369                 D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
370                           towrite, ofs));
371
372 #ifdef BREAKMEHEADER
373                 static int breakme;
374                 if (breakme++ == 20) {
375                         printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
376                         breakme = 0;
377                         c->mtd->write(c->mtd, ofs, towrite, &retlen,
378                                       brokenbuf);
379                         ret = -EIO;
380                 } else
381 #endif
382                         ret = c->mtd->write(c->mtd, ofs, towrite, &retlen,
383                                             rewrite_buf);
384
385                 if (ret || retlen != towrite) {
386                         /* Argh. We tried. Really we did. */
387                         printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
388                         kfree(buf);
389
390                         if (retlen)
391                                 jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
392
393                         return;
394                 }
395                 printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
396
397                 c->wbuf_len = (end - start) - towrite;
398                 c->wbuf_ofs = ofs + towrite;
399                 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
400                 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
401         } else {
402                 /* OK, now we're left with the dregs in whichever buffer we're using */
403                 if (buf) {
404                         memcpy(c->wbuf, buf, end-start);
405                 } else {
406                         memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
407                 }
408                 c->wbuf_ofs = ofs;
409                 c->wbuf_len = end - start;
410         }
411
412         /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
413         new_jeb = &c->blocks[ofs / c->sector_size];
414
415         spin_lock(&c->erase_completion_lock);
416         for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
417                 uint32_t rawlen = ref_totlen(c, jeb, raw);
418                 struct jffs2_inode_cache *ic;
419                 struct jffs2_raw_node_ref *new_ref;
420                 struct jffs2_raw_node_ref **adjust_ref = NULL;
421                 struct jffs2_inode_info *f = NULL;
422
423                 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
424                           rawlen, ref_offset(raw), ref_flags(raw), ofs));
425
426                 ic = jffs2_raw_ref_to_ic(raw);
427
428                 /* Ick. This XATTR mess should be fixed shortly... */
429                 if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
430                         struct jffs2_xattr_datum *xd = (void *)ic;
431                         BUG_ON(xd->node != raw);
432                         adjust_ref = &xd->node;
433                         raw->next_in_ino = NULL;
434                         ic = NULL;
435                 } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
436                         struct jffs2_xattr_datum *xr = (void *)ic;
437                         BUG_ON(xr->node != raw);
438                         adjust_ref = &xr->node;
439                         raw->next_in_ino = NULL;
440                         ic = NULL;
441                 } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
442                         struct jffs2_raw_node_ref **p = &ic->nodes;
443
444                         /* Remove the old node from the per-inode list */
445                         while (*p && *p != (void *)ic) {
446                                 if (*p == raw) {
447                                         (*p) = (raw->next_in_ino);
448                                         raw->next_in_ino = NULL;
449                                         break;
450                                 }
451                                 p = &((*p)->next_in_ino);
452                         }
453
454                         if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
455                                 /* If it's an in-core inode, then we have to adjust any
456                                    full_dirent or full_dnode structure to point to the
457                                    new version instead of the old */
458                                 f = jffs2_gc_fetch_inode(c, ic->ino, ic->nlink);
459                                 if (IS_ERR(f)) {
460                                         /* Should never happen; it _must_ be present */
461                                         JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
462                                                     ic->ino, PTR_ERR(f));
463                                         BUG();
464                                 }
465                                 /* We don't lock f->sem. There's a number of ways we could
466                                    end up in here with it already being locked, and nobody's
467                                    going to modify it on us anyway because we hold the
468                                    alloc_sem. We're only changing one ->raw pointer too,
469                                    which we can get away with without upsetting readers. */
470                                 adjust_ref = jffs2_incore_replace_raw(c, f, raw,
471                                                                       (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
472                         } else if (unlikely(ic->state != INO_STATE_PRESENT &&
473                                             ic->state != INO_STATE_CHECKEDABSENT &&
474                                             ic->state != INO_STATE_GC)) {
475                                 JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
476                                 BUG();
477                         }
478                 }
479
480                 new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
481
482                 if (adjust_ref) {
483                         BUG_ON(*adjust_ref != raw);
484                         *adjust_ref = new_ref;
485                 }
486                 if (f)
487                         jffs2_gc_release_inode(c, f);
488
489                 if (!ref_obsolete(raw)) {
490                         jeb->dirty_size += rawlen;
491                         jeb->used_size  -= rawlen;
492                         c->dirty_size += rawlen;
493                         c->used_size -= rawlen;
494                         raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
495                         BUG_ON(raw->next_in_ino);
496                 }
497                 ofs += rawlen;
498         }
499
500         kfree(buf);
501
502         /* Fix up the original jeb now it's on the bad_list */
503         if (first_raw == jeb->first_node) {
504                 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
505                 list_move(&jeb->list, &c->erase_pending_list);
506                 c->nr_erasing_blocks++;
507                 jffs2_erase_pending_trigger(c);
508         }
509
510         jffs2_dbg_acct_sanity_check_nolock(c, jeb);
511         jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
512
513         jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
514         jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
515
516         spin_unlock(&c->erase_completion_lock);
517
518         D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len));
519
520 }
521
522 /* Meaning of pad argument:
523    0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
524    1: Pad, do not adjust nextblock free_size
525    2: Pad, adjust nextblock free_size
526 */
527 #define NOPAD           0
528 #define PAD_NOACCOUNT   1
529 #define PAD_ACCOUNTING  2
530
531 static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
532 {
533         struct jffs2_eraseblock *wbuf_jeb;
534         int ret;
535         size_t retlen;
536
537         /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
538            del_timer() the timer we never initialised. */
539         if (!jffs2_is_writebuffered(c))
540                 return 0;
541
542         if (!down_trylock(&c->alloc_sem)) {
543                 up(&c->alloc_sem);
544                 printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
545                 BUG();
546         }
547
548         if (!c->wbuf_len)       /* already checked c->wbuf above */
549                 return 0;
550
551         wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
552         if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
553                 return -ENOMEM;
554
555         /* claim remaining space on the page
556            this happens, if we have a change to a new block,
557            or if fsync forces us to flush the writebuffer.
558            if we have a switch to next page, we will not have
559            enough remaining space for this.
560         */
561         if (pad ) {
562                 c->wbuf_len = PAD(c->wbuf_len);
563
564                 /* Pad with JFFS2_DIRTY_BITMASK initially.  this helps out ECC'd NOR
565                    with 8 byte page size */
566                 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
567
568                 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
569                         struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
570                         padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
571                         padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
572                         padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
573                         padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
574                 }
575         }
576         /* else jffs2_flash_writev has actually filled in the rest of the
577            buffer for us, and will deal with the node refs etc. later. */
578
579 #ifdef BREAKME
580         static int breakme;
581         if (breakme++ == 20) {
582                 printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
583                 breakme = 0;
584                 c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
585                               brokenbuf);
586                 ret = -EIO;
587         } else
588 #endif
589
590                 ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
591
592         if (ret || retlen != c->wbuf_pagesize) {
593                 if (ret)
594                         printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret);
595                 else {
596                         printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
597                                 retlen, c->wbuf_pagesize);
598                         ret = -EIO;
599                 }
600
601                 jffs2_wbuf_recover(c);
602
603                 return ret;
604         }
605
606         /* Adjust free size of the block if we padded. */
607         if (pad) {
608                 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
609
610                 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
611                           (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset));
612
613                 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
614                    padded. If there is less free space in the block than that,
615                    something screwed up */
616                 if (wbuf_jeb->free_size < waste) {
617                         printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
618                                c->wbuf_ofs, c->wbuf_len, waste);
619                         printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
620                                wbuf_jeb->offset, wbuf_jeb->free_size);
621                         BUG();
622                 }
623
624                 spin_lock(&c->erase_completion_lock);
625
626                 jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
627                 /* FIXME: that made it count as dirty. Convert to wasted */
628                 wbuf_jeb->dirty_size -= waste;
629                 c->dirty_size -= waste;
630                 wbuf_jeb->wasted_size += waste;
631                 c->wasted_size += waste;
632         } else
633                 spin_lock(&c->erase_completion_lock);
634
635         /* Stick any now-obsoleted blocks on the erase_pending_list */
636         jffs2_refile_wbuf_blocks(c);
637         jffs2_clear_wbuf_ino_list(c);
638         spin_unlock(&c->erase_completion_lock);
639
640         memset(c->wbuf,0xff,c->wbuf_pagesize);
641         /* adjust write buffer offset, else we get a non contiguous write bug */
642         c->wbuf_ofs += c->wbuf_pagesize;
643         c->wbuf_len = 0;
644         return 0;
645 }
646
647 /* Trigger garbage collection to flush the write-buffer.
648    If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
649    outstanding. If ino arg non-zero, do it only if a write for the
650    given inode is outstanding. */
651 int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
652 {
653         uint32_t old_wbuf_ofs;
654         uint32_t old_wbuf_len;
655         int ret = 0;
656
657         D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
658
659         if (!c->wbuf)
660                 return 0;
661
662         down(&c->alloc_sem);
663         if (!jffs2_wbuf_pending_for_ino(c, ino)) {
664                 D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
665                 up(&c->alloc_sem);
666                 return 0;
667         }
668
669         old_wbuf_ofs = c->wbuf_ofs;
670         old_wbuf_len = c->wbuf_len;
671
672         if (c->unchecked_size) {
673                 /* GC won't make any progress for a while */
674                 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
675                 down_write(&c->wbuf_sem);
676                 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
677                 /* retry flushing wbuf in case jffs2_wbuf_recover
678                    left some data in the wbuf */
679                 if (ret)
680                         ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
681                 up_write(&c->wbuf_sem);
682         } else while (old_wbuf_len &&
683                       old_wbuf_ofs == c->wbuf_ofs) {
684
685                 up(&c->alloc_sem);
686
687                 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
688
689                 ret = jffs2_garbage_collect_pass(c);
690                 if (ret) {
691                         /* GC failed. Flush it with padding instead */
692                         down(&c->alloc_sem);
693                         down_write(&c->wbuf_sem);
694                         ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
695                         /* retry flushing wbuf in case jffs2_wbuf_recover
696                            left some data in the wbuf */
697                         if (ret)
698                                 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
699                         up_write(&c->wbuf_sem);
700                         break;
701                 }
702                 down(&c->alloc_sem);
703         }
704
705         D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
706
707         up(&c->alloc_sem);
708         return ret;
709 }
710
711 /* Pad write-buffer to end and write it, wasting space. */
712 int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
713 {
714         int ret;
715
716         if (!c->wbuf)
717                 return 0;
718
719         down_write(&c->wbuf_sem);
720         ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
721         /* retry - maybe wbuf recover left some data in wbuf. */
722         if (ret)
723                 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
724         up_write(&c->wbuf_sem);
725
726         return ret;
727 }
728
729 static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
730                               size_t len)
731 {
732         if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
733                 return 0;
734
735         if (len > (c->wbuf_pagesize - c->wbuf_len))
736                 len = c->wbuf_pagesize - c->wbuf_len;
737         memcpy(c->wbuf + c->wbuf_len, buf, len);
738         c->wbuf_len += (uint32_t) len;
739         return len;
740 }
741
742 int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
743                        unsigned long count, loff_t to, size_t *retlen,
744                        uint32_t ino)
745 {
746         struct jffs2_eraseblock *jeb;
747         size_t wbuf_retlen, donelen = 0;
748         uint32_t outvec_to = to;
749         int ret, invec;
750
751         /* If not writebuffered flash, don't bother */
752         if (!jffs2_is_writebuffered(c))
753                 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
754
755         down_write(&c->wbuf_sem);
756
757         /* If wbuf_ofs is not initialized, set it to target address */
758         if (c->wbuf_ofs == 0xFFFFFFFF) {
759                 c->wbuf_ofs = PAGE_DIV(to);
760                 c->wbuf_len = PAGE_MOD(to);
761                 memset(c->wbuf,0xff,c->wbuf_pagesize);
762         }
763
764         /*
765          * Sanity checks on target address.  It's permitted to write
766          * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
767          * write at the beginning of a new erase block. Anything else,
768          * and you die.  New block starts at xxx000c (0-b = block
769          * header)
770          */
771         if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
772                 /* It's a write to a new block */
773                 if (c->wbuf_len) {
774                         D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx "
775                                   "causes flush of wbuf at 0x%08x\n",
776                                   (unsigned long)to, c->wbuf_ofs));
777                         ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
778                         if (ret)
779                                 goto outerr;
780                 }
781                 /* set pointer to new block */
782                 c->wbuf_ofs = PAGE_DIV(to);
783                 c->wbuf_len = PAGE_MOD(to);
784         }
785
786         if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
787                 /* We're not writing immediately after the writebuffer. Bad. */
788                 printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write "
789                        "to %08lx\n", (unsigned long)to);
790                 if (c->wbuf_len)
791                         printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
792                                c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
793                 BUG();
794         }
795
796         /* adjust alignment offset */
797         if (c->wbuf_len != PAGE_MOD(to)) {
798                 c->wbuf_len = PAGE_MOD(to);
799                 /* take care of alignment to next page */
800                 if (!c->wbuf_len) {
801                         c->wbuf_len = c->wbuf_pagesize;
802                         ret = __jffs2_flush_wbuf(c, NOPAD);
803                         if (ret)
804                                 goto outerr;
805                 }
806         }
807
808         for (invec = 0; invec < count; invec++) {
809                 int vlen = invecs[invec].iov_len;
810                 uint8_t *v = invecs[invec].iov_base;
811
812                 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
813
814                 if (c->wbuf_len == c->wbuf_pagesize) {
815                         ret = __jffs2_flush_wbuf(c, NOPAD);
816                         if (ret)
817                                 goto outerr;
818                 }
819                 vlen -= wbuf_retlen;
820                 outvec_to += wbuf_retlen;
821                 donelen += wbuf_retlen;
822                 v += wbuf_retlen;
823
824                 if (vlen >= c->wbuf_pagesize) {
825                         ret = c->mtd->write(c->mtd, outvec_to, PAGE_DIV(vlen),
826                                             &wbuf_retlen, v);
827                         if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
828                                 goto outfile;
829
830                         vlen -= wbuf_retlen;
831                         outvec_to += wbuf_retlen;
832                         c->wbuf_ofs = outvec_to;
833                         donelen += wbuf_retlen;
834                         v += wbuf_retlen;
835                 }
836
837                 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
838                 if (c->wbuf_len == c->wbuf_pagesize) {
839                         ret = __jffs2_flush_wbuf(c, NOPAD);
840                         if (ret)
841                                 goto outerr;
842                 }
843
844                 outvec_to += wbuf_retlen;
845                 donelen += wbuf_retlen;
846         }
847
848         /*
849          * If there's a remainder in the wbuf and it's a non-GC write,
850          * remember that the wbuf affects this ino
851          */
852         *retlen = donelen;
853
854         if (jffs2_sum_active()) {
855                 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
856                 if (res)
857                         return res;
858         }
859
860         if (c->wbuf_len && ino)
861                 jffs2_wbuf_dirties_inode(c, ino);
862
863         ret = 0;
864         up_write(&c->wbuf_sem);
865         return ret;
866
867 outfile:
868         /*
869          * At this point we have no problem, c->wbuf is empty. However
870          * refile nextblock to avoid writing again to same address.
871          */
872
873         spin_lock(&c->erase_completion_lock);
874
875         jeb = &c->blocks[outvec_to / c->sector_size];
876         jffs2_block_refile(c, jeb, REFILE_ANYWAY);
877
878         spin_unlock(&c->erase_completion_lock);
879
880 outerr:
881         *retlen = 0;
882         up_write(&c->wbuf_sem);
883         return ret;
884 }
885
886 /*
887  *      This is the entry for flash write.
888  *      Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
889 */
890 int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
891                       size_t *retlen, const u_char *buf)
892 {
893         struct kvec vecs[1];
894
895         if (!jffs2_is_writebuffered(c))
896                 return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
897
898         vecs[0].iov_base = (unsigned char *) buf;
899         vecs[0].iov_len = len;
900         return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
901 }
902
903 /*
904         Handle readback from writebuffer and ECC failure return
905 */
906 int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
907 {
908         loff_t  orbf = 0, owbf = 0, lwbf = 0;
909         int     ret;
910
911         if (!jffs2_is_writebuffered(c))
912                 return c->mtd->read(c->mtd, ofs, len, retlen, buf);
913
914         /* Read flash */
915         down_read(&c->wbuf_sem);
916         ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
917
918         if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
919                 if (ret == -EBADMSG)
920                         printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx)"
921                                " returned ECC error\n", len, ofs);
922                 /*
923                  * We have the raw data without ECC correction in the buffer,
924                  * maybe we are lucky and all data or parts are correct. We
925                  * check the node.  If data are corrupted node check will sort
926                  * it out.  We keep this block, it will fail on write or erase
927                  * and the we mark it bad. Or should we do that now? But we
928                  * should give him a chance.  Maybe we had a system crash or
929                  * power loss before the ecc write or a erase was completed.
930                  * So we return success. :)
931                  */
932                 ret = 0;
933         }
934
935         /* if no writebuffer available or write buffer empty, return */
936         if (!c->wbuf_pagesize || !c->wbuf_len)
937                 goto exit;
938
939         /* if we read in a different block, return */
940         if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
941                 goto exit;
942
943         if (ofs >= c->wbuf_ofs) {
944                 owbf = (ofs - c->wbuf_ofs);     /* offset in write buffer */
945                 if (owbf > c->wbuf_len)         /* is read beyond write buffer ? */
946                         goto exit;
947                 lwbf = c->wbuf_len - owbf;      /* number of bytes to copy */
948                 if (lwbf > len)
949                         lwbf = len;
950         } else {
951                 orbf = (c->wbuf_ofs - ofs);     /* offset in read buffer */
952                 if (orbf > len)                 /* is write beyond write buffer ? */
953                         goto exit;
954                 lwbf = len - orbf;              /* number of bytes to copy */
955                 if (lwbf > c->wbuf_len)
956                         lwbf = c->wbuf_len;
957         }
958         if (lwbf > 0)
959                 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
960
961 exit:
962         up_read(&c->wbuf_sem);
963         return ret;
964 }
965
966 #define NR_OOB_SCAN_PAGES 4
967
968 /* For historical reasons we use only 12 bytes for OOB clean marker */
969 #define OOB_CM_SIZE 12
970
971 static const struct jffs2_unknown_node oob_cleanmarker =
972 {
973         .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
974         .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
975         .totlen = cpu_to_je32(8)
976 };
977
978 /*
979  * Check, if the out of band area is empty. This function knows about the clean
980  * marker and if it is present in OOB, treats the OOB as empty anyway.
981  */
982 int jffs2_check_oob_empty(struct jffs2_sb_info *c,
983                           struct jffs2_eraseblock *jeb, int mode)
984 {
985         int i, ret;
986         int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
987         struct mtd_oob_ops ops;
988
989         ops.mode = MTD_OOB_AUTO;
990         ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
991         ops.oobbuf = c->oobbuf;
992         ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
993         ops.datbuf = NULL;
994
995         ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
996         if (ret || ops.oobretlen != ops.ooblen) {
997                 printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
998                                 " bytes, read %zd bytes, error %d\n",
999                                 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1000                 if (!ret)
1001                         ret = -EIO;
1002                 return ret;
1003         }
1004
1005         for(i = 0; i < ops.ooblen; i++) {
1006                 if (mode && i < cmlen)
1007                         /* Yeah, we know about the cleanmarker */
1008                         continue;
1009
1010                 if (ops.oobbuf[i] != 0xFF) {
1011                         D2(printk(KERN_DEBUG "Found %02x at %x in OOB for "
1012                                   "%08x\n", ops.oobbuf[i], i, jeb->offset));
1013                         return 1;
1014                 }
1015         }
1016
1017         return 0;
1018 }
1019
1020 /*
1021  * Check for a valid cleanmarker.
1022  * Returns: 0 if a valid cleanmarker was found
1023  *          1 if no cleanmarker was found
1024  *          negative error code if an error occurred
1025  */
1026 int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
1027                                  struct jffs2_eraseblock *jeb)
1028 {
1029         struct mtd_oob_ops ops;
1030         int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1031
1032         ops.mode = MTD_OOB_AUTO;
1033         ops.ooblen = cmlen;
1034         ops.oobbuf = c->oobbuf;
1035         ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1036         ops.datbuf = NULL;
1037
1038         ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
1039         if (ret || ops.oobretlen != ops.ooblen) {
1040                 printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
1041                                 " bytes, read %zd bytes, error %d\n",
1042                                 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1043                 if (!ret)
1044                         ret = -EIO;
1045                 return ret;
1046         }
1047
1048         return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
1049 }
1050
1051 int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
1052                                  struct jffs2_eraseblock *jeb)
1053 {
1054         int ret;
1055         struct mtd_oob_ops ops;
1056         int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1057
1058         ops.mode = MTD_OOB_AUTO;
1059         ops.ooblen = cmlen;
1060         ops.oobbuf = (uint8_t *)&oob_cleanmarker;
1061         ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1062         ops.datbuf = NULL;
1063
1064         ret = c->mtd->write_oob(c->mtd, jeb->offset, &ops);
1065         if (ret || ops.oobretlen != ops.ooblen) {
1066                 printk(KERN_ERR "cannot write OOB for EB at %08x, requested %zd"
1067                                 " bytes, read %zd bytes, error %d\n",
1068                                 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1069                 if (!ret)
1070                         ret = -EIO;
1071                 return ret;
1072         }
1073
1074         return 0;
1075 }
1076
1077 /*
1078  * On NAND we try to mark this block bad. If the block was erased more
1079  * than MAX_ERASE_FAILURES we mark it finaly bad.
1080  * Don't care about failures. This block remains on the erase-pending
1081  * or badblock list as long as nobody manipulates the flash with
1082  * a bootloader or something like that.
1083  */
1084
1085 int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1086 {
1087         int     ret;
1088
1089         /* if the count is < max, we try to write the counter to the 2nd page oob area */
1090         if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1091                 return 0;
1092
1093         if (!c->mtd->block_markbad)
1094                 return 1; // What else can we do?
1095
1096         printk(KERN_WARNING "JFFS2: marking eraseblock at %08x\n as bad", bad_offset);
1097         ret = c->mtd->block_markbad(c->mtd, bad_offset);
1098
1099         if (ret) {
1100                 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1101                 return ret;
1102         }
1103         return 1;
1104 }
1105
1106 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1107 {
1108         struct nand_ecclayout *oinfo = c->mtd->ecclayout;
1109
1110         if (!c->mtd->oobsize)
1111                 return 0;
1112
1113         /* Cleanmarker is out-of-band, so inline size zero */
1114         c->cleanmarker_size = 0;
1115
1116         if (!oinfo || oinfo->oobavail == 0) {
1117                 printk(KERN_ERR "inconsistent device description\n");
1118                 return -EINVAL;
1119         }
1120
1121         D1(printk(KERN_DEBUG "JFFS2 using OOB on NAND\n"));
1122
1123         c->oobavail = oinfo->oobavail;
1124
1125         /* Initialise write buffer */
1126         init_rwsem(&c->wbuf_sem);
1127         c->wbuf_pagesize = c->mtd->writesize;
1128         c->wbuf_ofs = 0xFFFFFFFF;
1129
1130         c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1131         if (!c->wbuf)
1132                 return -ENOMEM;
1133
1134         c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->oobavail, GFP_KERNEL);
1135         if (!c->oobbuf) {
1136                 kfree(c->wbuf);
1137                 return -ENOMEM;
1138         }
1139
1140         return 0;
1141 }
1142
1143 void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1144 {
1145         kfree(c->wbuf);
1146         kfree(c->oobbuf);
1147 }
1148
1149 int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1150         c->cleanmarker_size = 0;                /* No cleanmarkers needed */
1151
1152         /* Initialize write buffer */
1153         init_rwsem(&c->wbuf_sem);
1154
1155
1156         c->wbuf_pagesize =  c->mtd->erasesize;
1157
1158         /* Find a suitable c->sector_size
1159          * - Not too much sectors
1160          * - Sectors have to be at least 4 K + some bytes
1161          * - All known dataflashes have erase sizes of 528 or 1056
1162          * - we take at least 8 eraseblocks and want to have at least 8K size
1163          * - The concatenation should be a power of 2
1164         */
1165
1166         c->sector_size = 8 * c->mtd->erasesize;
1167
1168         while (c->sector_size < 8192) {
1169                 c->sector_size *= 2;
1170         }
1171
1172         /* It may be necessary to adjust the flash size */
1173         c->flash_size = c->mtd->size;
1174
1175         if ((c->flash_size % c->sector_size) != 0) {
1176                 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1177                 printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
1178         };
1179
1180         c->wbuf_ofs = 0xFFFFFFFF;
1181         c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1182         if (!c->wbuf)
1183                 return -ENOMEM;
1184
1185         printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
1186
1187         return 0;
1188 }
1189
1190 void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1191         kfree(c->wbuf);
1192 }
1193
1194 int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1195         /* Cleanmarker currently occupies whole programming regions,
1196          * either one or 2 for 8Byte STMicro flashes. */
1197         c->cleanmarker_size = max(16u, c->mtd->writesize);
1198
1199         /* Initialize write buffer */
1200         init_rwsem(&c->wbuf_sem);
1201         c->wbuf_pagesize = c->mtd->writesize;
1202         c->wbuf_ofs = 0xFFFFFFFF;
1203
1204         c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1205         if (!c->wbuf)
1206                 return -ENOMEM;
1207
1208         return 0;
1209 }
1210
1211 void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1212         kfree(c->wbuf);
1213 }