Linux 2.6.21
[pandora-kernel.git] / fs / jffs2 / wbuf.c
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright (C) 2001-2003 Red Hat, Inc.
5  * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de>
6  *
7  * Created by David Woodhouse <dwmw2@infradead.org>
8  * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
9  *
10  * For licensing information, see the file 'LICENCE' in this directory.
11  *
12  * $Id: wbuf.c,v 1.100 2005/09/30 13:59:13 dedekind Exp $
13  *
14  */
15
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/crc32.h>
20 #include <linux/mtd/nand.h>
21 #include <linux/jiffies.h>
22 #include <linux/sched.h>
23
24 #include "nodelist.h"
25
26 /* For testing write failures */
27 #undef BREAKME
28 #undef BREAKMEHEADER
29
30 #ifdef BREAKME
31 static unsigned char *brokenbuf;
32 #endif
33
34 #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
35 #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
36
37 /* max. erase failures before we mark a block bad */
38 #define MAX_ERASE_FAILURES      2
39
40 struct jffs2_inodirty {
41         uint32_t ino;
42         struct jffs2_inodirty *next;
43 };
44
45 static struct jffs2_inodirty inodirty_nomem;
46
47 static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
48 {
49         struct jffs2_inodirty *this = c->wbuf_inodes;
50
51         /* If a malloc failed, consider _everything_ dirty */
52         if (this == &inodirty_nomem)
53                 return 1;
54
55         /* If ino == 0, _any_ non-GC writes mean 'yes' */
56         if (this && !ino)
57                 return 1;
58
59         /* Look to see if the inode in question is pending in the wbuf */
60         while (this) {
61                 if (this->ino == ino)
62                         return 1;
63                 this = this->next;
64         }
65         return 0;
66 }
67
68 static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
69 {
70         struct jffs2_inodirty *this;
71
72         this = c->wbuf_inodes;
73
74         if (this != &inodirty_nomem) {
75                 while (this) {
76                         struct jffs2_inodirty *next = this->next;
77                         kfree(this);
78                         this = next;
79                 }
80         }
81         c->wbuf_inodes = NULL;
82 }
83
84 static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
85 {
86         struct jffs2_inodirty *new;
87
88         /* Mark the superblock dirty so that kupdated will flush... */
89         jffs2_erase_pending_trigger(c);
90
91         if (jffs2_wbuf_pending_for_ino(c, ino))
92                 return;
93
94         new = kmalloc(sizeof(*new), GFP_KERNEL);
95         if (!new) {
96                 D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
97                 jffs2_clear_wbuf_ino_list(c);
98                 c->wbuf_inodes = &inodirty_nomem;
99                 return;
100         }
101         new->ino = ino;
102         new->next = c->wbuf_inodes;
103         c->wbuf_inodes = new;
104         return;
105 }
106
107 static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
108 {
109         struct list_head *this, *next;
110         static int n;
111
112         if (list_empty(&c->erasable_pending_wbuf_list))
113                 return;
114
115         list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
116                 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
117
118                 D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
119                 list_del(this);
120                 if ((jiffies + (n++)) & 127) {
121                         /* Most of the time, we just erase it immediately. Otherwise we
122                            spend ages scanning it on mount, etc. */
123                         D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
124                         list_add_tail(&jeb->list, &c->erase_pending_list);
125                         c->nr_erasing_blocks++;
126                         jffs2_erase_pending_trigger(c);
127                 } else {
128                         /* Sometimes, however, we leave it elsewhere so it doesn't get
129                            immediately reused, and we spread the load a bit. */
130                         D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
131                         list_add_tail(&jeb->list, &c->erasable_list);
132                 }
133         }
134 }
135
136 #define REFILE_NOTEMPTY 0
137 #define REFILE_ANYWAY   1
138
139 static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
140 {
141         D1(printk("About to refile bad block at %08x\n", jeb->offset));
142
143         /* File the existing block on the bad_used_list.... */
144         if (c->nextblock == jeb)
145                 c->nextblock = NULL;
146         else /* Not sure this should ever happen... need more coffee */
147                 list_del(&jeb->list);
148         if (jeb->first_node) {
149                 D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
150                 list_add(&jeb->list, &c->bad_used_list);
151         } else {
152                 BUG_ON(allow_empty == REFILE_NOTEMPTY);
153                 /* It has to have had some nodes or we couldn't be here */
154                 D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
155                 list_add(&jeb->list, &c->erase_pending_list);
156                 c->nr_erasing_blocks++;
157                 jffs2_erase_pending_trigger(c);
158         }
159
160         if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
161                 uint32_t oldfree = jeb->free_size;
162
163                 jffs2_link_node_ref(c, jeb, 
164                                     (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
165                                     oldfree, NULL);
166                 /* convert to wasted */
167                 c->wasted_size += oldfree;
168                 jeb->wasted_size += oldfree;
169                 c->dirty_size -= oldfree;
170                 jeb->dirty_size -= oldfree;
171         }
172
173         jffs2_dbg_dump_block_lists_nolock(c);
174         jffs2_dbg_acct_sanity_check_nolock(c,jeb);
175         jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
176 }
177
178 static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
179                                                             struct jffs2_inode_info *f,
180                                                             struct jffs2_raw_node_ref *raw,
181                                                             union jffs2_node_union *node)
182 {
183         struct jffs2_node_frag *frag;
184         struct jffs2_full_dirent *fd;
185
186         dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
187                     node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
188
189         BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
190                je16_to_cpu(node->u.magic) != 0);
191
192         switch (je16_to_cpu(node->u.nodetype)) {
193         case JFFS2_NODETYPE_INODE:
194                 if (f->metadata && f->metadata->raw == raw) {
195                         dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
196                         return &f->metadata->raw;
197                 }
198                 frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
199                 BUG_ON(!frag);
200                 /* Find a frag which refers to the full_dnode we want to modify */
201                 while (!frag->node || frag->node->raw != raw) {
202                         frag = frag_next(frag);
203                         BUG_ON(!frag);
204                 }
205                 dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
206                 return &frag->node->raw;
207
208         case JFFS2_NODETYPE_DIRENT:
209                 for (fd = f->dents; fd; fd = fd->next) {
210                         if (fd->raw == raw) {
211                                 dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
212                                 return &fd->raw;
213                         }
214                 }
215                 BUG();
216
217         default:
218                 dbg_noderef("Don't care about replacing raw for nodetype %x\n",
219                             je16_to_cpu(node->u.nodetype));
220                 break;
221         }
222         return NULL;
223 }
224
225 /* Recover from failure to write wbuf. Recover the nodes up to the
226  * wbuf, not the one which we were starting to try to write. */
227
228 static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
229 {
230         struct jffs2_eraseblock *jeb, *new_jeb;
231         struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
232         size_t retlen;
233         int ret;
234         int nr_refile = 0;
235         unsigned char *buf;
236         uint32_t start, end, ofs, len;
237
238         jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
239
240         spin_lock(&c->erase_completion_lock);
241         if (c->wbuf_ofs % c->mtd->erasesize)
242                 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
243         else
244                 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
245         spin_unlock(&c->erase_completion_lock);
246
247         BUG_ON(!ref_obsolete(jeb->last_node));
248
249         /* Find the first node to be recovered, by skipping over every
250            node which ends before the wbuf starts, or which is obsolete. */
251         for (next = raw = jeb->first_node; next; raw = next) {
252                 next = ref_next(raw);
253
254                 if (ref_obsolete(raw) || 
255                     (next && ref_offset(next) <= c->wbuf_ofs)) {
256                         dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
257                                     ref_offset(raw), ref_flags(raw),
258                                     (ref_offset(raw) + ref_totlen(c, jeb, raw)),
259                                     c->wbuf_ofs);
260                         continue;
261                 }
262                 dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
263                             ref_offset(raw), ref_flags(raw),
264                             (ref_offset(raw) + ref_totlen(c, jeb, raw)));
265
266                 first_raw = raw;
267                 break;
268         }
269
270         if (!first_raw) {
271                 /* All nodes were obsolete. Nothing to recover. */
272                 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
273                 c->wbuf_len = 0;
274                 return;
275         }
276
277         start = ref_offset(first_raw);
278         end = ref_offset(jeb->last_node);
279         nr_refile = 1;
280
281         /* Count the number of refs which need to be copied */
282         while ((raw = ref_next(raw)) != jeb->last_node)
283                 nr_refile++;
284
285         dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
286                     start, end, end - start, nr_refile);
287
288         buf = NULL;
289         if (start < c->wbuf_ofs) {
290                 /* First affected node was already partially written.
291                  * Attempt to reread the old data into our buffer. */
292
293                 buf = kmalloc(end - start, GFP_KERNEL);
294                 if (!buf) {
295                         printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
296
297                         goto read_failed;
298                 }
299
300                 /* Do the read... */
301                 ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
302
303                 /* ECC recovered ? */
304                 if ((ret == -EUCLEAN || ret == -EBADMSG) &&
305                     (retlen == c->wbuf_ofs - start))
306                         ret = 0;
307
308                 if (ret || retlen != c->wbuf_ofs - start) {
309                         printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
310
311                         kfree(buf);
312                         buf = NULL;
313                 read_failed:
314                         first_raw = ref_next(first_raw);
315                         nr_refile--;
316                         while (first_raw && ref_obsolete(first_raw)) {
317                                 first_raw = ref_next(first_raw);
318                                 nr_refile--;
319                         }
320
321                         /* If this was the only node to be recovered, give up */
322                         if (!first_raw) {
323                                 c->wbuf_len = 0;
324                                 return;
325                         }
326
327                         /* It wasn't. Go on and try to recover nodes complete in the wbuf */
328                         start = ref_offset(first_raw);
329                         dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
330                                     start, end, end - start, nr_refile);
331
332                 } else {
333                         /* Read succeeded. Copy the remaining data from the wbuf */
334                         memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
335                 }
336         }
337         /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
338            Either 'buf' contains the data, or we find it in the wbuf */
339
340         /* ... and get an allocation of space from a shiny new block instead */
341         ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
342         if (ret) {
343                 printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
344                 kfree(buf);
345                 return;
346         }
347
348         ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
349         if (ret) {
350                 printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
351                 kfree(buf);
352                 return;
353         }
354
355         ofs = write_ofs(c);
356
357         if (end-start >= c->wbuf_pagesize) {
358                 /* Need to do another write immediately, but it's possible
359                    that this is just because the wbuf itself is completely
360                    full, and there's nothing earlier read back from the
361                    flash. Hence 'buf' isn't necessarily what we're writing
362                    from. */
363                 unsigned char *rewrite_buf = buf?:c->wbuf;
364                 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
365
366                 D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
367                           towrite, ofs));
368
369 #ifdef BREAKMEHEADER
370                 static int breakme;
371                 if (breakme++ == 20) {
372                         printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
373                         breakme = 0;
374                         c->mtd->write(c->mtd, ofs, towrite, &retlen,
375                                       brokenbuf);
376                         ret = -EIO;
377                 } else
378 #endif
379                         ret = c->mtd->write(c->mtd, ofs, towrite, &retlen,
380                                             rewrite_buf);
381
382                 if (ret || retlen != towrite) {
383                         /* Argh. We tried. Really we did. */
384                         printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
385                         kfree(buf);
386
387                         if (retlen)
388                                 jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
389
390                         return;
391                 }
392                 printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
393
394                 c->wbuf_len = (end - start) - towrite;
395                 c->wbuf_ofs = ofs + towrite;
396                 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
397                 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
398         } else {
399                 /* OK, now we're left with the dregs in whichever buffer we're using */
400                 if (buf) {
401                         memcpy(c->wbuf, buf, end-start);
402                 } else {
403                         memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
404                 }
405                 c->wbuf_ofs = ofs;
406                 c->wbuf_len = end - start;
407         }
408
409         /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
410         new_jeb = &c->blocks[ofs / c->sector_size];
411
412         spin_lock(&c->erase_completion_lock);
413         for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
414                 uint32_t rawlen = ref_totlen(c, jeb, raw);
415                 struct jffs2_inode_cache *ic;
416                 struct jffs2_raw_node_ref *new_ref;
417                 struct jffs2_raw_node_ref **adjust_ref = NULL;
418                 struct jffs2_inode_info *f = NULL;
419
420                 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
421                           rawlen, ref_offset(raw), ref_flags(raw), ofs));
422
423                 ic = jffs2_raw_ref_to_ic(raw);
424
425                 /* Ick. This XATTR mess should be fixed shortly... */
426                 if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
427                         struct jffs2_xattr_datum *xd = (void *)ic;
428                         BUG_ON(xd->node != raw);
429                         adjust_ref = &xd->node;
430                         raw->next_in_ino = NULL;
431                         ic = NULL;
432                 } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
433                         struct jffs2_xattr_datum *xr = (void *)ic;
434                         BUG_ON(xr->node != raw);
435                         adjust_ref = &xr->node;
436                         raw->next_in_ino = NULL;
437                         ic = NULL;
438                 } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
439                         struct jffs2_raw_node_ref **p = &ic->nodes;
440
441                         /* Remove the old node from the per-inode list */
442                         while (*p && *p != (void *)ic) {
443                                 if (*p == raw) {
444                                         (*p) = (raw->next_in_ino);
445                                         raw->next_in_ino = NULL;
446                                         break;
447                                 }
448                                 p = &((*p)->next_in_ino);
449                         }
450
451                         if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
452                                 /* If it's an in-core inode, then we have to adjust any
453                                    full_dirent or full_dnode structure to point to the
454                                    new version instead of the old */
455                                 f = jffs2_gc_fetch_inode(c, ic->ino, ic->nlink);
456                                 if (IS_ERR(f)) {
457                                         /* Should never happen; it _must_ be present */
458                                         JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
459                                                     ic->ino, PTR_ERR(f));
460                                         BUG();
461                                 }
462                                 /* We don't lock f->sem. There's a number of ways we could
463                                    end up in here with it already being locked, and nobody's
464                                    going to modify it on us anyway because we hold the
465                                    alloc_sem. We're only changing one ->raw pointer too,
466                                    which we can get away with without upsetting readers. */
467                                 adjust_ref = jffs2_incore_replace_raw(c, f, raw,
468                                                                       (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
469                         } else if (unlikely(ic->state != INO_STATE_PRESENT &&
470                                             ic->state != INO_STATE_CHECKEDABSENT &&
471                                             ic->state != INO_STATE_GC)) {
472                                 JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
473                                 BUG();
474                         }
475                 }
476
477                 new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
478
479                 if (adjust_ref) {
480                         BUG_ON(*adjust_ref != raw);
481                         *adjust_ref = new_ref;
482                 }
483                 if (f)
484                         jffs2_gc_release_inode(c, f);
485
486                 if (!ref_obsolete(raw)) {
487                         jeb->dirty_size += rawlen;
488                         jeb->used_size  -= rawlen;
489                         c->dirty_size += rawlen;
490                         c->used_size -= rawlen;
491                         raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
492                         BUG_ON(raw->next_in_ino);
493                 }
494                 ofs += rawlen;
495         }
496
497         kfree(buf);
498
499         /* Fix up the original jeb now it's on the bad_list */
500         if (first_raw == jeb->first_node) {
501                 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
502                 list_move(&jeb->list, &c->erase_pending_list);
503                 c->nr_erasing_blocks++;
504                 jffs2_erase_pending_trigger(c);
505         }
506
507         jffs2_dbg_acct_sanity_check_nolock(c, jeb);
508         jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
509
510         jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
511         jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
512
513         spin_unlock(&c->erase_completion_lock);
514
515         D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len));
516
517 }
518
519 /* Meaning of pad argument:
520    0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
521    1: Pad, do not adjust nextblock free_size
522    2: Pad, adjust nextblock free_size
523 */
524 #define NOPAD           0
525 #define PAD_NOACCOUNT   1
526 #define PAD_ACCOUNTING  2
527
528 static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
529 {
530         struct jffs2_eraseblock *wbuf_jeb;
531         int ret;
532         size_t retlen;
533
534         /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
535            del_timer() the timer we never initialised. */
536         if (!jffs2_is_writebuffered(c))
537                 return 0;
538
539         if (!down_trylock(&c->alloc_sem)) {
540                 up(&c->alloc_sem);
541                 printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
542                 BUG();
543         }
544
545         if (!c->wbuf_len)       /* already checked c->wbuf above */
546                 return 0;
547
548         wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
549         if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
550                 return -ENOMEM;
551
552         /* claim remaining space on the page
553            this happens, if we have a change to a new block,
554            or if fsync forces us to flush the writebuffer.
555            if we have a switch to next page, we will not have
556            enough remaining space for this.
557         */
558         if (pad ) {
559                 c->wbuf_len = PAD(c->wbuf_len);
560
561                 /* Pad with JFFS2_DIRTY_BITMASK initially.  this helps out ECC'd NOR
562                    with 8 byte page size */
563                 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
564
565                 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
566                         struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
567                         padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
568                         padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
569                         padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
570                         padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
571                 }
572         }
573         /* else jffs2_flash_writev has actually filled in the rest of the
574            buffer for us, and will deal with the node refs etc. later. */
575
576 #ifdef BREAKME
577         static int breakme;
578         if (breakme++ == 20) {
579                 printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
580                 breakme = 0;
581                 c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
582                               brokenbuf);
583                 ret = -EIO;
584         } else
585 #endif
586
587                 ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
588
589         if (ret || retlen != c->wbuf_pagesize) {
590                 if (ret)
591                         printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret);
592                 else {
593                         printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
594                                 retlen, c->wbuf_pagesize);
595                         ret = -EIO;
596                 }
597
598                 jffs2_wbuf_recover(c);
599
600                 return ret;
601         }
602
603         /* Adjust free size of the block if we padded. */
604         if (pad) {
605                 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
606
607                 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
608                           (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset));
609
610                 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
611                    padded. If there is less free space in the block than that,
612                    something screwed up */
613                 if (wbuf_jeb->free_size < waste) {
614                         printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
615                                c->wbuf_ofs, c->wbuf_len, waste);
616                         printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
617                                wbuf_jeb->offset, wbuf_jeb->free_size);
618                         BUG();
619                 }
620
621                 spin_lock(&c->erase_completion_lock);
622
623                 jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
624                 /* FIXME: that made it count as dirty. Convert to wasted */
625                 wbuf_jeb->dirty_size -= waste;
626                 c->dirty_size -= waste;
627                 wbuf_jeb->wasted_size += waste;
628                 c->wasted_size += waste;
629         } else
630                 spin_lock(&c->erase_completion_lock);
631
632         /* Stick any now-obsoleted blocks on the erase_pending_list */
633         jffs2_refile_wbuf_blocks(c);
634         jffs2_clear_wbuf_ino_list(c);
635         spin_unlock(&c->erase_completion_lock);
636
637         memset(c->wbuf,0xff,c->wbuf_pagesize);
638         /* adjust write buffer offset, else we get a non contiguous write bug */
639         c->wbuf_ofs += c->wbuf_pagesize;
640         c->wbuf_len = 0;
641         return 0;
642 }
643
644 /* Trigger garbage collection to flush the write-buffer.
645    If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
646    outstanding. If ino arg non-zero, do it only if a write for the
647    given inode is outstanding. */
648 int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
649 {
650         uint32_t old_wbuf_ofs;
651         uint32_t old_wbuf_len;
652         int ret = 0;
653
654         D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
655
656         if (!c->wbuf)
657                 return 0;
658
659         down(&c->alloc_sem);
660         if (!jffs2_wbuf_pending_for_ino(c, ino)) {
661                 D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
662                 up(&c->alloc_sem);
663                 return 0;
664         }
665
666         old_wbuf_ofs = c->wbuf_ofs;
667         old_wbuf_len = c->wbuf_len;
668
669         if (c->unchecked_size) {
670                 /* GC won't make any progress for a while */
671                 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
672                 down_write(&c->wbuf_sem);
673                 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
674                 /* retry flushing wbuf in case jffs2_wbuf_recover
675                    left some data in the wbuf */
676                 if (ret)
677                         ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
678                 up_write(&c->wbuf_sem);
679         } else while (old_wbuf_len &&
680                       old_wbuf_ofs == c->wbuf_ofs) {
681
682                 up(&c->alloc_sem);
683
684                 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
685
686                 ret = jffs2_garbage_collect_pass(c);
687                 if (ret) {
688                         /* GC failed. Flush it with padding instead */
689                         down(&c->alloc_sem);
690                         down_write(&c->wbuf_sem);
691                         ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
692                         /* retry flushing wbuf in case jffs2_wbuf_recover
693                            left some data in the wbuf */
694                         if (ret)
695                                 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
696                         up_write(&c->wbuf_sem);
697                         break;
698                 }
699                 down(&c->alloc_sem);
700         }
701
702         D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
703
704         up(&c->alloc_sem);
705         return ret;
706 }
707
708 /* Pad write-buffer to end and write it, wasting space. */
709 int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
710 {
711         int ret;
712
713         if (!c->wbuf)
714                 return 0;
715
716         down_write(&c->wbuf_sem);
717         ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
718         /* retry - maybe wbuf recover left some data in wbuf. */
719         if (ret)
720                 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
721         up_write(&c->wbuf_sem);
722
723         return ret;
724 }
725
726 static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
727                               size_t len)
728 {
729         if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
730                 return 0;
731
732         if (len > (c->wbuf_pagesize - c->wbuf_len))
733                 len = c->wbuf_pagesize - c->wbuf_len;
734         memcpy(c->wbuf + c->wbuf_len, buf, len);
735         c->wbuf_len += (uint32_t) len;
736         return len;
737 }
738
739 int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
740                        unsigned long count, loff_t to, size_t *retlen,
741                        uint32_t ino)
742 {
743         struct jffs2_eraseblock *jeb;
744         size_t wbuf_retlen, donelen = 0;
745         uint32_t outvec_to = to;
746         int ret, invec;
747
748         /* If not writebuffered flash, don't bother */
749         if (!jffs2_is_writebuffered(c))
750                 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
751
752         down_write(&c->wbuf_sem);
753
754         /* If wbuf_ofs is not initialized, set it to target address */
755         if (c->wbuf_ofs == 0xFFFFFFFF) {
756                 c->wbuf_ofs = PAGE_DIV(to);
757                 c->wbuf_len = PAGE_MOD(to);
758                 memset(c->wbuf,0xff,c->wbuf_pagesize);
759         }
760
761         /*
762          * Sanity checks on target address.  It's permitted to write
763          * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
764          * write at the beginning of a new erase block. Anything else,
765          * and you die.  New block starts at xxx000c (0-b = block
766          * header)
767          */
768         if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
769                 /* It's a write to a new block */
770                 if (c->wbuf_len) {
771                         D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx "
772                                   "causes flush of wbuf at 0x%08x\n",
773                                   (unsigned long)to, c->wbuf_ofs));
774                         ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
775                         if (ret)
776                                 goto outerr;
777                 }
778                 /* set pointer to new block */
779                 c->wbuf_ofs = PAGE_DIV(to);
780                 c->wbuf_len = PAGE_MOD(to);
781         }
782
783         if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
784                 /* We're not writing immediately after the writebuffer. Bad. */
785                 printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write "
786                        "to %08lx\n", (unsigned long)to);
787                 if (c->wbuf_len)
788                         printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
789                                c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
790                 BUG();
791         }
792
793         /* adjust alignment offset */
794         if (c->wbuf_len != PAGE_MOD(to)) {
795                 c->wbuf_len = PAGE_MOD(to);
796                 /* take care of alignment to next page */
797                 if (!c->wbuf_len) {
798                         c->wbuf_len = c->wbuf_pagesize;
799                         ret = __jffs2_flush_wbuf(c, NOPAD);
800                         if (ret)
801                                 goto outerr;
802                 }
803         }
804
805         for (invec = 0; invec < count; invec++) {
806                 int vlen = invecs[invec].iov_len;
807                 uint8_t *v = invecs[invec].iov_base;
808
809                 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
810
811                 if (c->wbuf_len == c->wbuf_pagesize) {
812                         ret = __jffs2_flush_wbuf(c, NOPAD);
813                         if (ret)
814                                 goto outerr;
815                 }
816                 vlen -= wbuf_retlen;
817                 outvec_to += wbuf_retlen;
818                 donelen += wbuf_retlen;
819                 v += wbuf_retlen;
820
821                 if (vlen >= c->wbuf_pagesize) {
822                         ret = c->mtd->write(c->mtd, outvec_to, PAGE_DIV(vlen),
823                                             &wbuf_retlen, v);
824                         if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
825                                 goto outfile;
826
827                         vlen -= wbuf_retlen;
828                         outvec_to += wbuf_retlen;
829                         c->wbuf_ofs = outvec_to;
830                         donelen += wbuf_retlen;
831                         v += wbuf_retlen;
832                 }
833
834                 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
835                 if (c->wbuf_len == c->wbuf_pagesize) {
836                         ret = __jffs2_flush_wbuf(c, NOPAD);
837                         if (ret)
838                                 goto outerr;
839                 }
840
841                 outvec_to += wbuf_retlen;
842                 donelen += wbuf_retlen;
843         }
844
845         /*
846          * If there's a remainder in the wbuf and it's a non-GC write,
847          * remember that the wbuf affects this ino
848          */
849         *retlen = donelen;
850
851         if (jffs2_sum_active()) {
852                 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
853                 if (res)
854                         return res;
855         }
856
857         if (c->wbuf_len && ino)
858                 jffs2_wbuf_dirties_inode(c, ino);
859
860         ret = 0;
861         up_write(&c->wbuf_sem);
862         return ret;
863
864 outfile:
865         /*
866          * At this point we have no problem, c->wbuf is empty. However
867          * refile nextblock to avoid writing again to same address.
868          */
869
870         spin_lock(&c->erase_completion_lock);
871
872         jeb = &c->blocks[outvec_to / c->sector_size];
873         jffs2_block_refile(c, jeb, REFILE_ANYWAY);
874
875         spin_unlock(&c->erase_completion_lock);
876
877 outerr:
878         *retlen = 0;
879         up_write(&c->wbuf_sem);
880         return ret;
881 }
882
883 /*
884  *      This is the entry for flash write.
885  *      Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
886 */
887 int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
888                       size_t *retlen, const u_char *buf)
889 {
890         struct kvec vecs[1];
891
892         if (!jffs2_is_writebuffered(c))
893                 return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
894
895         vecs[0].iov_base = (unsigned char *) buf;
896         vecs[0].iov_len = len;
897         return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
898 }
899
900 /*
901         Handle readback from writebuffer and ECC failure return
902 */
903 int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
904 {
905         loff_t  orbf = 0, owbf = 0, lwbf = 0;
906         int     ret;
907
908         if (!jffs2_is_writebuffered(c))
909                 return c->mtd->read(c->mtd, ofs, len, retlen, buf);
910
911         /* Read flash */
912         down_read(&c->wbuf_sem);
913         ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
914
915         if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
916                 if (ret == -EBADMSG)
917                         printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx)"
918                                " returned ECC error\n", len, ofs);
919                 /*
920                  * We have the raw data without ECC correction in the buffer,
921                  * maybe we are lucky and all data or parts are correct. We
922                  * check the node.  If data are corrupted node check will sort
923                  * it out.  We keep this block, it will fail on write or erase
924                  * and the we mark it bad. Or should we do that now? But we
925                  * should give him a chance.  Maybe we had a system crash or
926                  * power loss before the ecc write or a erase was completed.
927                  * So we return success. :)
928                  */
929                 ret = 0;
930         }
931
932         /* if no writebuffer available or write buffer empty, return */
933         if (!c->wbuf_pagesize || !c->wbuf_len)
934                 goto exit;
935
936         /* if we read in a different block, return */
937         if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
938                 goto exit;
939
940         if (ofs >= c->wbuf_ofs) {
941                 owbf = (ofs - c->wbuf_ofs);     /* offset in write buffer */
942                 if (owbf > c->wbuf_len)         /* is read beyond write buffer ? */
943                         goto exit;
944                 lwbf = c->wbuf_len - owbf;      /* number of bytes to copy */
945                 if (lwbf > len)
946                         lwbf = len;
947         } else {
948                 orbf = (c->wbuf_ofs - ofs);     /* offset in read buffer */
949                 if (orbf > len)                 /* is write beyond write buffer ? */
950                         goto exit;
951                 lwbf = len - orbf;              /* number of bytes to copy */
952                 if (lwbf > c->wbuf_len)
953                         lwbf = c->wbuf_len;
954         }
955         if (lwbf > 0)
956                 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
957
958 exit:
959         up_read(&c->wbuf_sem);
960         return ret;
961 }
962
963 #define NR_OOB_SCAN_PAGES 4
964
965 /* For historical reasons we use only 12 bytes for OOB clean marker */
966 #define OOB_CM_SIZE 12
967
968 static const struct jffs2_unknown_node oob_cleanmarker =
969 {
970         .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
971         .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
972         .totlen = cpu_to_je32(8)
973 };
974
975 /*
976  * Check, if the out of band area is empty. This function knows about the clean
977  * marker and if it is present in OOB, treats the OOB as empty anyway.
978  */
979 int jffs2_check_oob_empty(struct jffs2_sb_info *c,
980                           struct jffs2_eraseblock *jeb, int mode)
981 {
982         int i, ret;
983         int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
984         struct mtd_oob_ops ops;
985
986         ops.mode = MTD_OOB_AUTO;
987         ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
988         ops.oobbuf = c->oobbuf;
989         ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
990         ops.datbuf = NULL;
991
992         ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
993         if (ret || ops.oobretlen != ops.ooblen) {
994                 printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
995                                 " bytes, read %zd bytes, error %d\n",
996                                 jeb->offset, ops.ooblen, ops.oobretlen, ret);
997                 if (!ret)
998                         ret = -EIO;
999                 return ret;
1000         }
1001
1002         for(i = 0; i < ops.ooblen; i++) {
1003                 if (mode && i < cmlen)
1004                         /* Yeah, we know about the cleanmarker */
1005                         continue;
1006
1007                 if (ops.oobbuf[i] != 0xFF) {
1008                         D2(printk(KERN_DEBUG "Found %02x at %x in OOB for "
1009                                   "%08x\n", ops.oobbuf[i], i, jeb->offset));
1010                         return 1;
1011                 }
1012         }
1013
1014         return 0;
1015 }
1016
1017 /*
1018  * Check for a valid cleanmarker.
1019  * Returns: 0 if a valid cleanmarker was found
1020  *          1 if no cleanmarker was found
1021  *          negative error code if an error occurred
1022  */
1023 int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
1024                                  struct jffs2_eraseblock *jeb)
1025 {
1026         struct mtd_oob_ops ops;
1027         int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1028
1029         ops.mode = MTD_OOB_AUTO;
1030         ops.ooblen = cmlen;
1031         ops.oobbuf = c->oobbuf;
1032         ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1033         ops.datbuf = NULL;
1034
1035         ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
1036         if (ret || ops.oobretlen != ops.ooblen) {
1037                 printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
1038                                 " bytes, read %zd bytes, error %d\n",
1039                                 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1040                 if (!ret)
1041                         ret = -EIO;
1042                 return ret;
1043         }
1044
1045         return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
1046 }
1047
1048 int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
1049                                  struct jffs2_eraseblock *jeb)
1050 {
1051         int ret;
1052         struct mtd_oob_ops ops;
1053         int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1054
1055         ops.mode = MTD_OOB_AUTO;
1056         ops.ooblen = cmlen;
1057         ops.oobbuf = (uint8_t *)&oob_cleanmarker;
1058         ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1059         ops.datbuf = NULL;
1060
1061         ret = c->mtd->write_oob(c->mtd, jeb->offset, &ops);
1062         if (ret || ops.oobretlen != ops.ooblen) {
1063                 printk(KERN_ERR "cannot write OOB for EB at %08x, requested %zd"
1064                                 " bytes, read %zd bytes, error %d\n",
1065                                 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1066                 if (!ret)
1067                         ret = -EIO;
1068                 return ret;
1069         }
1070
1071         return 0;
1072 }
1073
1074 /*
1075  * On NAND we try to mark this block bad. If the block was erased more
1076  * than MAX_ERASE_FAILURES we mark it finaly bad.
1077  * Don't care about failures. This block remains on the erase-pending
1078  * or badblock list as long as nobody manipulates the flash with
1079  * a bootloader or something like that.
1080  */
1081
1082 int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1083 {
1084         int     ret;
1085
1086         /* if the count is < max, we try to write the counter to the 2nd page oob area */
1087         if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1088                 return 0;
1089
1090         if (!c->mtd->block_markbad)
1091                 return 1; // What else can we do?
1092
1093         printk(KERN_WARNING "JFFS2: marking eraseblock at %08x\n as bad", bad_offset);
1094         ret = c->mtd->block_markbad(c->mtd, bad_offset);
1095
1096         if (ret) {
1097                 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1098                 return ret;
1099         }
1100         return 1;
1101 }
1102
1103 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1104 {
1105         struct nand_ecclayout *oinfo = c->mtd->ecclayout;
1106
1107         if (!c->mtd->oobsize)
1108                 return 0;
1109
1110         /* Cleanmarker is out-of-band, so inline size zero */
1111         c->cleanmarker_size = 0;
1112
1113         if (!oinfo || oinfo->oobavail == 0) {
1114                 printk(KERN_ERR "inconsistent device description\n");
1115                 return -EINVAL;
1116         }
1117
1118         D1(printk(KERN_DEBUG "JFFS2 using OOB on NAND\n"));
1119
1120         c->oobavail = oinfo->oobavail;
1121
1122         /* Initialise write buffer */
1123         init_rwsem(&c->wbuf_sem);
1124         c->wbuf_pagesize = c->mtd->writesize;
1125         c->wbuf_ofs = 0xFFFFFFFF;
1126
1127         c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1128         if (!c->wbuf)
1129                 return -ENOMEM;
1130
1131         c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->oobavail, GFP_KERNEL);
1132         if (!c->oobbuf) {
1133                 kfree(c->wbuf);
1134                 return -ENOMEM;
1135         }
1136
1137         return 0;
1138 }
1139
1140 void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1141 {
1142         kfree(c->wbuf);
1143         kfree(c->oobbuf);
1144 }
1145
1146 int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1147         c->cleanmarker_size = 0;                /* No cleanmarkers needed */
1148
1149         /* Initialize write buffer */
1150         init_rwsem(&c->wbuf_sem);
1151
1152
1153         c->wbuf_pagesize =  c->mtd->erasesize;
1154
1155         /* Find a suitable c->sector_size
1156          * - Not too much sectors
1157          * - Sectors have to be at least 4 K + some bytes
1158          * - All known dataflashes have erase sizes of 528 or 1056
1159          * - we take at least 8 eraseblocks and want to have at least 8K size
1160          * - The concatenation should be a power of 2
1161         */
1162
1163         c->sector_size = 8 * c->mtd->erasesize;
1164
1165         while (c->sector_size < 8192) {
1166                 c->sector_size *= 2;
1167         }
1168
1169         /* It may be necessary to adjust the flash size */
1170         c->flash_size = c->mtd->size;
1171
1172         if ((c->flash_size % c->sector_size) != 0) {
1173                 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1174                 printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
1175         };
1176
1177         c->wbuf_ofs = 0xFFFFFFFF;
1178         c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1179         if (!c->wbuf)
1180                 return -ENOMEM;
1181
1182         printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
1183
1184         return 0;
1185 }
1186
1187 void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1188         kfree(c->wbuf);
1189 }
1190
1191 int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1192         /* Cleanmarker currently occupies whole programming regions,
1193          * either one or 2 for 8Byte STMicro flashes. */
1194         c->cleanmarker_size = max(16u, c->mtd->writesize);
1195
1196         /* Initialize write buffer */
1197         init_rwsem(&c->wbuf_sem);
1198         c->wbuf_pagesize = c->mtd->writesize;
1199         c->wbuf_ofs = 0xFFFFFFFF;
1200
1201         c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1202         if (!c->wbuf)
1203                 return -ENOMEM;
1204
1205         return 0;
1206 }
1207
1208 void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1209         kfree(c->wbuf);
1210 }