omap2+: add drm device
[pandora-kernel.git] / fs / jffs2 / wbuf.c
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  * Copyright © 2004 Thomas Gleixner <tglx@linutronix.de>
6  *
7  * Created by David Woodhouse <dwmw2@infradead.org>
8  * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
9  *
10  * For licensing information, see the file 'LICENCE' in this directory.
11  *
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/crc32.h>
18 #include <linux/mtd/nand.h>
19 #include <linux/jiffies.h>
20 #include <linux/sched.h>
21
22 #include "nodelist.h"
23
24 /* For testing write failures */
25 #undef BREAKME
26 #undef BREAKMEHEADER
27
28 #ifdef BREAKME
29 static unsigned char *brokenbuf;
30 #endif
31
32 #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
33 #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
34
35 /* max. erase failures before we mark a block bad */
36 #define MAX_ERASE_FAILURES      2
37
38 struct jffs2_inodirty {
39         uint32_t ino;
40         struct jffs2_inodirty *next;
41 };
42
43 static struct jffs2_inodirty inodirty_nomem;
44
45 static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
46 {
47         struct jffs2_inodirty *this = c->wbuf_inodes;
48
49         /* If a malloc failed, consider _everything_ dirty */
50         if (this == &inodirty_nomem)
51                 return 1;
52
53         /* If ino == 0, _any_ non-GC writes mean 'yes' */
54         if (this && !ino)
55                 return 1;
56
57         /* Look to see if the inode in question is pending in the wbuf */
58         while (this) {
59                 if (this->ino == ino)
60                         return 1;
61                 this = this->next;
62         }
63         return 0;
64 }
65
66 static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
67 {
68         struct jffs2_inodirty *this;
69
70         this = c->wbuf_inodes;
71
72         if (this != &inodirty_nomem) {
73                 while (this) {
74                         struct jffs2_inodirty *next = this->next;
75                         kfree(this);
76                         this = next;
77                 }
78         }
79         c->wbuf_inodes = NULL;
80 }
81
82 static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
83 {
84         struct jffs2_inodirty *new;
85
86         /* Mark the superblock dirty so that kupdated will flush... */
87         jffs2_dirty_trigger(c);
88
89         if (jffs2_wbuf_pending_for_ino(c, ino))
90                 return;
91
92         new = kmalloc(sizeof(*new), GFP_KERNEL);
93         if (!new) {
94                 D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
95                 jffs2_clear_wbuf_ino_list(c);
96                 c->wbuf_inodes = &inodirty_nomem;
97                 return;
98         }
99         new->ino = ino;
100         new->next = c->wbuf_inodes;
101         c->wbuf_inodes = new;
102         return;
103 }
104
105 static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
106 {
107         struct list_head *this, *next;
108         static int n;
109
110         if (list_empty(&c->erasable_pending_wbuf_list))
111                 return;
112
113         list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
114                 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
115
116                 D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
117                 list_del(this);
118                 if ((jiffies + (n++)) & 127) {
119                         /* Most of the time, we just erase it immediately. Otherwise we
120                            spend ages scanning it on mount, etc. */
121                         D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
122                         list_add_tail(&jeb->list, &c->erase_pending_list);
123                         c->nr_erasing_blocks++;
124                         jffs2_garbage_collect_trigger(c);
125                 } else {
126                         /* Sometimes, however, we leave it elsewhere so it doesn't get
127                            immediately reused, and we spread the load a bit. */
128                         D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
129                         list_add_tail(&jeb->list, &c->erasable_list);
130                 }
131         }
132 }
133
134 #define REFILE_NOTEMPTY 0
135 #define REFILE_ANYWAY   1
136
137 static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
138 {
139         D1(printk("About to refile bad block at %08x\n", jeb->offset));
140
141         /* File the existing block on the bad_used_list.... */
142         if (c->nextblock == jeb)
143                 c->nextblock = NULL;
144         else /* Not sure this should ever happen... need more coffee */
145                 list_del(&jeb->list);
146         if (jeb->first_node) {
147                 D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
148                 list_add(&jeb->list, &c->bad_used_list);
149         } else {
150                 BUG_ON(allow_empty == REFILE_NOTEMPTY);
151                 /* It has to have had some nodes or we couldn't be here */
152                 D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
153                 list_add(&jeb->list, &c->erase_pending_list);
154                 c->nr_erasing_blocks++;
155                 jffs2_garbage_collect_trigger(c);
156         }
157
158         if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
159                 uint32_t oldfree = jeb->free_size;
160
161                 jffs2_link_node_ref(c, jeb, 
162                                     (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
163                                     oldfree, NULL);
164                 /* convert to wasted */
165                 c->wasted_size += oldfree;
166                 jeb->wasted_size += oldfree;
167                 c->dirty_size -= oldfree;
168                 jeb->dirty_size -= oldfree;
169         }
170
171         jffs2_dbg_dump_block_lists_nolock(c);
172         jffs2_dbg_acct_sanity_check_nolock(c,jeb);
173         jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
174 }
175
176 static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
177                                                             struct jffs2_inode_info *f,
178                                                             struct jffs2_raw_node_ref *raw,
179                                                             union jffs2_node_union *node)
180 {
181         struct jffs2_node_frag *frag;
182         struct jffs2_full_dirent *fd;
183
184         dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
185                     node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
186
187         BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
188                je16_to_cpu(node->u.magic) != 0);
189
190         switch (je16_to_cpu(node->u.nodetype)) {
191         case JFFS2_NODETYPE_INODE:
192                 if (f->metadata && f->metadata->raw == raw) {
193                         dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
194                         return &f->metadata->raw;
195                 }
196                 frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
197                 BUG_ON(!frag);
198                 /* Find a frag which refers to the full_dnode we want to modify */
199                 while (!frag->node || frag->node->raw != raw) {
200                         frag = frag_next(frag);
201                         BUG_ON(!frag);
202                 }
203                 dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
204                 return &frag->node->raw;
205
206         case JFFS2_NODETYPE_DIRENT:
207                 for (fd = f->dents; fd; fd = fd->next) {
208                         if (fd->raw == raw) {
209                                 dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
210                                 return &fd->raw;
211                         }
212                 }
213                 BUG();
214
215         default:
216                 dbg_noderef("Don't care about replacing raw for nodetype %x\n",
217                             je16_to_cpu(node->u.nodetype));
218                 break;
219         }
220         return NULL;
221 }
222
223 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
224 static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
225                               uint32_t ofs)
226 {
227         int ret;
228         size_t retlen;
229         char *eccstr;
230
231         ret = c->mtd->read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
232         if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
233                 printk(KERN_WARNING "jffs2_verify_write(): Read back of page at %08x failed: %d\n", c->wbuf_ofs, ret);
234                 return ret;
235         } else if (retlen != c->wbuf_pagesize) {
236                 printk(KERN_WARNING "jffs2_verify_write(): Read back of page at %08x gave short read: %zd not %d.\n", ofs, retlen, c->wbuf_pagesize);
237                 return -EIO;
238         }
239         if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize))
240                 return 0;
241
242         if (ret == -EUCLEAN)
243                 eccstr = "corrected";
244         else if (ret == -EBADMSG)
245                 eccstr = "correction failed";
246         else
247                 eccstr = "OK or unused";
248
249         printk(KERN_WARNING "Write verify error (ECC %s) at %08x. Wrote:\n",
250                eccstr, c->wbuf_ofs);
251         print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
252                        c->wbuf, c->wbuf_pagesize, 0);
253
254         printk(KERN_WARNING "Read back:\n");
255         print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
256                        c->wbuf_verify, c->wbuf_pagesize, 0);
257
258         return -EIO;
259 }
260 #else
261 #define jffs2_verify_write(c,b,o) (0)
262 #endif
263
264 /* Recover from failure to write wbuf. Recover the nodes up to the
265  * wbuf, not the one which we were starting to try to write. */
266
267 static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
268 {
269         struct jffs2_eraseblock *jeb, *new_jeb;
270         struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
271         size_t retlen;
272         int ret;
273         int nr_refile = 0;
274         unsigned char *buf;
275         uint32_t start, end, ofs, len;
276
277         jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
278
279         spin_lock(&c->erase_completion_lock);
280         if (c->wbuf_ofs % c->mtd->erasesize)
281                 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
282         else
283                 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
284         spin_unlock(&c->erase_completion_lock);
285
286         BUG_ON(!ref_obsolete(jeb->last_node));
287
288         /* Find the first node to be recovered, by skipping over every
289            node which ends before the wbuf starts, or which is obsolete. */
290         for (next = raw = jeb->first_node; next; raw = next) {
291                 next = ref_next(raw);
292
293                 if (ref_obsolete(raw) || 
294                     (next && ref_offset(next) <= c->wbuf_ofs)) {
295                         dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
296                                     ref_offset(raw), ref_flags(raw),
297                                     (ref_offset(raw) + ref_totlen(c, jeb, raw)),
298                                     c->wbuf_ofs);
299                         continue;
300                 }
301                 dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
302                             ref_offset(raw), ref_flags(raw),
303                             (ref_offset(raw) + ref_totlen(c, jeb, raw)));
304
305                 first_raw = raw;
306                 break;
307         }
308
309         if (!first_raw) {
310                 /* All nodes were obsolete. Nothing to recover. */
311                 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
312                 c->wbuf_len = 0;
313                 return;
314         }
315
316         start = ref_offset(first_raw);
317         end = ref_offset(jeb->last_node);
318         nr_refile = 1;
319
320         /* Count the number of refs which need to be copied */
321         while ((raw = ref_next(raw)) != jeb->last_node)
322                 nr_refile++;
323
324         dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
325                     start, end, end - start, nr_refile);
326
327         buf = NULL;
328         if (start < c->wbuf_ofs) {
329                 /* First affected node was already partially written.
330                  * Attempt to reread the old data into our buffer. */
331
332                 buf = kmalloc(end - start, GFP_KERNEL);
333                 if (!buf) {
334                         printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
335
336                         goto read_failed;
337                 }
338
339                 /* Do the read... */
340                 ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
341
342                 /* ECC recovered ? */
343                 if ((ret == -EUCLEAN || ret == -EBADMSG) &&
344                     (retlen == c->wbuf_ofs - start))
345                         ret = 0;
346
347                 if (ret || retlen != c->wbuf_ofs - start) {
348                         printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
349
350                         kfree(buf);
351                         buf = NULL;
352                 read_failed:
353                         first_raw = ref_next(first_raw);
354                         nr_refile--;
355                         while (first_raw && ref_obsolete(first_raw)) {
356                                 first_raw = ref_next(first_raw);
357                                 nr_refile--;
358                         }
359
360                         /* If this was the only node to be recovered, give up */
361                         if (!first_raw) {
362                                 c->wbuf_len = 0;
363                                 return;
364                         }
365
366                         /* It wasn't. Go on and try to recover nodes complete in the wbuf */
367                         start = ref_offset(first_raw);
368                         dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
369                                     start, end, end - start, nr_refile);
370
371                 } else {
372                         /* Read succeeded. Copy the remaining data from the wbuf */
373                         memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
374                 }
375         }
376         /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
377            Either 'buf' contains the data, or we find it in the wbuf */
378
379         /* ... and get an allocation of space from a shiny new block instead */
380         ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
381         if (ret) {
382                 printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
383                 kfree(buf);
384                 return;
385         }
386
387         /* The summary is not recovered, so it must be disabled for this erase block */
388         jffs2_sum_disable_collecting(c->summary);
389
390         ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
391         if (ret) {
392                 printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
393                 kfree(buf);
394                 return;
395         }
396
397         ofs = write_ofs(c);
398
399         if (end-start >= c->wbuf_pagesize) {
400                 /* Need to do another write immediately, but it's possible
401                    that this is just because the wbuf itself is completely
402                    full, and there's nothing earlier read back from the
403                    flash. Hence 'buf' isn't necessarily what we're writing
404                    from. */
405                 unsigned char *rewrite_buf = buf?:c->wbuf;
406                 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
407
408                 D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
409                           towrite, ofs));
410
411 #ifdef BREAKMEHEADER
412                 static int breakme;
413                 if (breakme++ == 20) {
414                         printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
415                         breakme = 0;
416                         c->mtd->write(c->mtd, ofs, towrite, &retlen,
417                                       brokenbuf);
418                         ret = -EIO;
419                 } else
420 #endif
421                         ret = c->mtd->write(c->mtd, ofs, towrite, &retlen,
422                                             rewrite_buf);
423
424                 if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
425                         /* Argh. We tried. Really we did. */
426                         printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
427                         kfree(buf);
428
429                         if (retlen)
430                                 jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
431
432                         return;
433                 }
434                 printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
435
436                 c->wbuf_len = (end - start) - towrite;
437                 c->wbuf_ofs = ofs + towrite;
438                 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
439                 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
440         } else {
441                 /* OK, now we're left with the dregs in whichever buffer we're using */
442                 if (buf) {
443                         memcpy(c->wbuf, buf, end-start);
444                 } else {
445                         memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
446                 }
447                 c->wbuf_ofs = ofs;
448                 c->wbuf_len = end - start;
449         }
450
451         /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
452         new_jeb = &c->blocks[ofs / c->sector_size];
453
454         spin_lock(&c->erase_completion_lock);
455         for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
456                 uint32_t rawlen = ref_totlen(c, jeb, raw);
457                 struct jffs2_inode_cache *ic;
458                 struct jffs2_raw_node_ref *new_ref;
459                 struct jffs2_raw_node_ref **adjust_ref = NULL;
460                 struct jffs2_inode_info *f = NULL;
461
462                 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
463                           rawlen, ref_offset(raw), ref_flags(raw), ofs));
464
465                 ic = jffs2_raw_ref_to_ic(raw);
466
467                 /* Ick. This XATTR mess should be fixed shortly... */
468                 if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
469                         struct jffs2_xattr_datum *xd = (void *)ic;
470                         BUG_ON(xd->node != raw);
471                         adjust_ref = &xd->node;
472                         raw->next_in_ino = NULL;
473                         ic = NULL;
474                 } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
475                         struct jffs2_xattr_datum *xr = (void *)ic;
476                         BUG_ON(xr->node != raw);
477                         adjust_ref = &xr->node;
478                         raw->next_in_ino = NULL;
479                         ic = NULL;
480                 } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
481                         struct jffs2_raw_node_ref **p = &ic->nodes;
482
483                         /* Remove the old node from the per-inode list */
484                         while (*p && *p != (void *)ic) {
485                                 if (*p == raw) {
486                                         (*p) = (raw->next_in_ino);
487                                         raw->next_in_ino = NULL;
488                                         break;
489                                 }
490                                 p = &((*p)->next_in_ino);
491                         }
492
493                         if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
494                                 /* If it's an in-core inode, then we have to adjust any
495                                    full_dirent or full_dnode structure to point to the
496                                    new version instead of the old */
497                                 f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink);
498                                 if (IS_ERR(f)) {
499                                         /* Should never happen; it _must_ be present */
500                                         JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
501                                                     ic->ino, PTR_ERR(f));
502                                         BUG();
503                                 }
504                                 /* We don't lock f->sem. There's a number of ways we could
505                                    end up in here with it already being locked, and nobody's
506                                    going to modify it on us anyway because we hold the
507                                    alloc_sem. We're only changing one ->raw pointer too,
508                                    which we can get away with without upsetting readers. */
509                                 adjust_ref = jffs2_incore_replace_raw(c, f, raw,
510                                                                       (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
511                         } else if (unlikely(ic->state != INO_STATE_PRESENT &&
512                                             ic->state != INO_STATE_CHECKEDABSENT &&
513                                             ic->state != INO_STATE_GC)) {
514                                 JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
515                                 BUG();
516                         }
517                 }
518
519                 new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
520
521                 if (adjust_ref) {
522                         BUG_ON(*adjust_ref != raw);
523                         *adjust_ref = new_ref;
524                 }
525                 if (f)
526                         jffs2_gc_release_inode(c, f);
527
528                 if (!ref_obsolete(raw)) {
529                         jeb->dirty_size += rawlen;
530                         jeb->used_size  -= rawlen;
531                         c->dirty_size += rawlen;
532                         c->used_size -= rawlen;
533                         raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
534                         BUG_ON(raw->next_in_ino);
535                 }
536                 ofs += rawlen;
537         }
538
539         kfree(buf);
540
541         /* Fix up the original jeb now it's on the bad_list */
542         if (first_raw == jeb->first_node) {
543                 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
544                 list_move(&jeb->list, &c->erase_pending_list);
545                 c->nr_erasing_blocks++;
546                 jffs2_garbage_collect_trigger(c);
547         }
548
549         jffs2_dbg_acct_sanity_check_nolock(c, jeb);
550         jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
551
552         jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
553         jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
554
555         spin_unlock(&c->erase_completion_lock);
556
557         D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len));
558
559 }
560
561 /* Meaning of pad argument:
562    0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
563    1: Pad, do not adjust nextblock free_size
564    2: Pad, adjust nextblock free_size
565 */
566 #define NOPAD           0
567 #define PAD_NOACCOUNT   1
568 #define PAD_ACCOUNTING  2
569
570 static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
571 {
572         struct jffs2_eraseblock *wbuf_jeb;
573         int ret;
574         size_t retlen;
575
576         /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
577            del_timer() the timer we never initialised. */
578         if (!jffs2_is_writebuffered(c))
579                 return 0;
580
581         if (!mutex_is_locked(&c->alloc_sem)) {
582                 printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
583                 BUG();
584         }
585
586         if (!c->wbuf_len)       /* already checked c->wbuf above */
587                 return 0;
588
589         wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
590         if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
591                 return -ENOMEM;
592
593         /* claim remaining space on the page
594            this happens, if we have a change to a new block,
595            or if fsync forces us to flush the writebuffer.
596            if we have a switch to next page, we will not have
597            enough remaining space for this.
598         */
599         if (pad ) {
600                 c->wbuf_len = PAD(c->wbuf_len);
601
602                 /* Pad with JFFS2_DIRTY_BITMASK initially.  this helps out ECC'd NOR
603                    with 8 byte page size */
604                 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
605
606                 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
607                         struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
608                         padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
609                         padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
610                         padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
611                         padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
612                 }
613         }
614         /* else jffs2_flash_writev has actually filled in the rest of the
615            buffer for us, and will deal with the node refs etc. later. */
616
617 #ifdef BREAKME
618         static int breakme;
619         if (breakme++ == 20) {
620                 printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
621                 breakme = 0;
622                 c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
623                               brokenbuf);
624                 ret = -EIO;
625         } else
626 #endif
627
628                 ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
629
630         if (ret) {
631                 printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n", ret);
632                 goto wfail;
633         } else if (retlen != c->wbuf_pagesize) {
634                 printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
635                        retlen, c->wbuf_pagesize);
636                 ret = -EIO;
637                 goto wfail;
638         } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) {
639         wfail:
640                 jffs2_wbuf_recover(c);
641
642                 return ret;
643         }
644
645         /* Adjust free size of the block if we padded. */
646         if (pad) {
647                 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
648
649                 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
650                           (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset));
651
652                 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
653                    padded. If there is less free space in the block than that,
654                    something screwed up */
655                 if (wbuf_jeb->free_size < waste) {
656                         printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
657                                c->wbuf_ofs, c->wbuf_len, waste);
658                         printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
659                                wbuf_jeb->offset, wbuf_jeb->free_size);
660                         BUG();
661                 }
662
663                 spin_lock(&c->erase_completion_lock);
664
665                 jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
666                 /* FIXME: that made it count as dirty. Convert to wasted */
667                 wbuf_jeb->dirty_size -= waste;
668                 c->dirty_size -= waste;
669                 wbuf_jeb->wasted_size += waste;
670                 c->wasted_size += waste;
671         } else
672                 spin_lock(&c->erase_completion_lock);
673
674         /* Stick any now-obsoleted blocks on the erase_pending_list */
675         jffs2_refile_wbuf_blocks(c);
676         jffs2_clear_wbuf_ino_list(c);
677         spin_unlock(&c->erase_completion_lock);
678
679         memset(c->wbuf,0xff,c->wbuf_pagesize);
680         /* adjust write buffer offset, else we get a non contiguous write bug */
681         c->wbuf_ofs += c->wbuf_pagesize;
682         c->wbuf_len = 0;
683         return 0;
684 }
685
686 /* Trigger garbage collection to flush the write-buffer.
687    If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
688    outstanding. If ino arg non-zero, do it only if a write for the
689    given inode is outstanding. */
690 int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
691 {
692         uint32_t old_wbuf_ofs;
693         uint32_t old_wbuf_len;
694         int ret = 0;
695
696         D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
697
698         if (!c->wbuf)
699                 return 0;
700
701         mutex_lock(&c->alloc_sem);
702         if (!jffs2_wbuf_pending_for_ino(c, ino)) {
703                 D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
704                 mutex_unlock(&c->alloc_sem);
705                 return 0;
706         }
707
708         old_wbuf_ofs = c->wbuf_ofs;
709         old_wbuf_len = c->wbuf_len;
710
711         if (c->unchecked_size) {
712                 /* GC won't make any progress for a while */
713                 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
714                 down_write(&c->wbuf_sem);
715                 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
716                 /* retry flushing wbuf in case jffs2_wbuf_recover
717                    left some data in the wbuf */
718                 if (ret)
719                         ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
720                 up_write(&c->wbuf_sem);
721         } else while (old_wbuf_len &&
722                       old_wbuf_ofs == c->wbuf_ofs) {
723
724                 mutex_unlock(&c->alloc_sem);
725
726                 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
727
728                 ret = jffs2_garbage_collect_pass(c);
729                 if (ret) {
730                         /* GC failed. Flush it with padding instead */
731                         mutex_lock(&c->alloc_sem);
732                         down_write(&c->wbuf_sem);
733                         ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
734                         /* retry flushing wbuf in case jffs2_wbuf_recover
735                            left some data in the wbuf */
736                         if (ret)
737                                 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
738                         up_write(&c->wbuf_sem);
739                         break;
740                 }
741                 mutex_lock(&c->alloc_sem);
742         }
743
744         D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
745
746         mutex_unlock(&c->alloc_sem);
747         return ret;
748 }
749
750 /* Pad write-buffer to end and write it, wasting space. */
751 int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
752 {
753         int ret;
754
755         if (!c->wbuf)
756                 return 0;
757
758         down_write(&c->wbuf_sem);
759         ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
760         /* retry - maybe wbuf recover left some data in wbuf. */
761         if (ret)
762                 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
763         up_write(&c->wbuf_sem);
764
765         return ret;
766 }
767
768 static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
769                               size_t len)
770 {
771         if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
772                 return 0;
773
774         if (len > (c->wbuf_pagesize - c->wbuf_len))
775                 len = c->wbuf_pagesize - c->wbuf_len;
776         memcpy(c->wbuf + c->wbuf_len, buf, len);
777         c->wbuf_len += (uint32_t) len;
778         return len;
779 }
780
781 int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
782                        unsigned long count, loff_t to, size_t *retlen,
783                        uint32_t ino)
784 {
785         struct jffs2_eraseblock *jeb;
786         size_t wbuf_retlen, donelen = 0;
787         uint32_t outvec_to = to;
788         int ret, invec;
789
790         /* If not writebuffered flash, don't bother */
791         if (!jffs2_is_writebuffered(c))
792                 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
793
794         down_write(&c->wbuf_sem);
795
796         /* If wbuf_ofs is not initialized, set it to target address */
797         if (c->wbuf_ofs == 0xFFFFFFFF) {
798                 c->wbuf_ofs = PAGE_DIV(to);
799                 c->wbuf_len = PAGE_MOD(to);
800                 memset(c->wbuf,0xff,c->wbuf_pagesize);
801         }
802
803         /*
804          * Sanity checks on target address.  It's permitted to write
805          * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
806          * write at the beginning of a new erase block. Anything else,
807          * and you die.  New block starts at xxx000c (0-b = block
808          * header)
809          */
810         if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
811                 /* It's a write to a new block */
812                 if (c->wbuf_len) {
813                         D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx "
814                                   "causes flush of wbuf at 0x%08x\n",
815                                   (unsigned long)to, c->wbuf_ofs));
816                         ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
817                         if (ret)
818                                 goto outerr;
819                 }
820                 /* set pointer to new block */
821                 c->wbuf_ofs = PAGE_DIV(to);
822                 c->wbuf_len = PAGE_MOD(to);
823         }
824
825         if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
826                 /* We're not writing immediately after the writebuffer. Bad. */
827                 printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write "
828                        "to %08lx\n", (unsigned long)to);
829                 if (c->wbuf_len)
830                         printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
831                                c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
832                 BUG();
833         }
834
835         /* adjust alignment offset */
836         if (c->wbuf_len != PAGE_MOD(to)) {
837                 c->wbuf_len = PAGE_MOD(to);
838                 /* take care of alignment to next page */
839                 if (!c->wbuf_len) {
840                         c->wbuf_len = c->wbuf_pagesize;
841                         ret = __jffs2_flush_wbuf(c, NOPAD);
842                         if (ret)
843                                 goto outerr;
844                 }
845         }
846
847         for (invec = 0; invec < count; invec++) {
848                 int vlen = invecs[invec].iov_len;
849                 uint8_t *v = invecs[invec].iov_base;
850
851                 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
852
853                 if (c->wbuf_len == c->wbuf_pagesize) {
854                         ret = __jffs2_flush_wbuf(c, NOPAD);
855                         if (ret)
856                                 goto outerr;
857                 }
858                 vlen -= wbuf_retlen;
859                 outvec_to += wbuf_retlen;
860                 donelen += wbuf_retlen;
861                 v += wbuf_retlen;
862
863                 if (vlen >= c->wbuf_pagesize) {
864                         ret = c->mtd->write(c->mtd, outvec_to, PAGE_DIV(vlen),
865                                             &wbuf_retlen, v);
866                         if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
867                                 goto outfile;
868
869                         vlen -= wbuf_retlen;
870                         outvec_to += wbuf_retlen;
871                         c->wbuf_ofs = outvec_to;
872                         donelen += wbuf_retlen;
873                         v += wbuf_retlen;
874                 }
875
876                 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
877                 if (c->wbuf_len == c->wbuf_pagesize) {
878                         ret = __jffs2_flush_wbuf(c, NOPAD);
879                         if (ret)
880                                 goto outerr;
881                 }
882
883                 outvec_to += wbuf_retlen;
884                 donelen += wbuf_retlen;
885         }
886
887         /*
888          * If there's a remainder in the wbuf and it's a non-GC write,
889          * remember that the wbuf affects this ino
890          */
891         *retlen = donelen;
892
893         if (jffs2_sum_active()) {
894                 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
895                 if (res)
896                         return res;
897         }
898
899         if (c->wbuf_len && ino)
900                 jffs2_wbuf_dirties_inode(c, ino);
901
902         ret = 0;
903         up_write(&c->wbuf_sem);
904         return ret;
905
906 outfile:
907         /*
908          * At this point we have no problem, c->wbuf is empty. However
909          * refile nextblock to avoid writing again to same address.
910          */
911
912         spin_lock(&c->erase_completion_lock);
913
914         jeb = &c->blocks[outvec_to / c->sector_size];
915         jffs2_block_refile(c, jeb, REFILE_ANYWAY);
916
917         spin_unlock(&c->erase_completion_lock);
918
919 outerr:
920         *retlen = 0;
921         up_write(&c->wbuf_sem);
922         return ret;
923 }
924
925 /*
926  *      This is the entry for flash write.
927  *      Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
928 */
929 int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
930                       size_t *retlen, const u_char *buf)
931 {
932         struct kvec vecs[1];
933
934         if (!jffs2_is_writebuffered(c))
935                 return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
936
937         vecs[0].iov_base = (unsigned char *) buf;
938         vecs[0].iov_len = len;
939         return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
940 }
941
942 /*
943         Handle readback from writebuffer and ECC failure return
944 */
945 int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
946 {
947         loff_t  orbf = 0, owbf = 0, lwbf = 0;
948         int     ret;
949
950         if (!jffs2_is_writebuffered(c))
951                 return c->mtd->read(c->mtd, ofs, len, retlen, buf);
952
953         /* Read flash */
954         down_read(&c->wbuf_sem);
955         ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
956
957         if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
958                 if (ret == -EBADMSG)
959                         printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx)"
960                                " returned ECC error\n", len, ofs);
961                 /*
962                  * We have the raw data without ECC correction in the buffer,
963                  * maybe we are lucky and all data or parts are correct. We
964                  * check the node.  If data are corrupted node check will sort
965                  * it out.  We keep this block, it will fail on write or erase
966                  * and the we mark it bad. Or should we do that now? But we
967                  * should give him a chance.  Maybe we had a system crash or
968                  * power loss before the ecc write or a erase was completed.
969                  * So we return success. :)
970                  */
971                 ret = 0;
972         }
973
974         /* if no writebuffer available or write buffer empty, return */
975         if (!c->wbuf_pagesize || !c->wbuf_len)
976                 goto exit;
977
978         /* if we read in a different block, return */
979         if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
980                 goto exit;
981
982         if (ofs >= c->wbuf_ofs) {
983                 owbf = (ofs - c->wbuf_ofs);     /* offset in write buffer */
984                 if (owbf > c->wbuf_len)         /* is read beyond write buffer ? */
985                         goto exit;
986                 lwbf = c->wbuf_len - owbf;      /* number of bytes to copy */
987                 if (lwbf > len)
988                         lwbf = len;
989         } else {
990                 orbf = (c->wbuf_ofs - ofs);     /* offset in read buffer */
991                 if (orbf > len)                 /* is write beyond write buffer ? */
992                         goto exit;
993                 lwbf = len - orbf;              /* number of bytes to copy */
994                 if (lwbf > c->wbuf_len)
995                         lwbf = c->wbuf_len;
996         }
997         if (lwbf > 0)
998                 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
999
1000 exit:
1001         up_read(&c->wbuf_sem);
1002         return ret;
1003 }
1004
1005 #define NR_OOB_SCAN_PAGES 4
1006
1007 /* For historical reasons we use only 8 bytes for OOB clean marker */
1008 #define OOB_CM_SIZE 8
1009
1010 static const struct jffs2_unknown_node oob_cleanmarker =
1011 {
1012         .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
1013         .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
1014         .totlen = constant_cpu_to_je32(8)
1015 };
1016
1017 /*
1018  * Check, if the out of band area is empty. This function knows about the clean
1019  * marker and if it is present in OOB, treats the OOB as empty anyway.
1020  */
1021 int jffs2_check_oob_empty(struct jffs2_sb_info *c,
1022                           struct jffs2_eraseblock *jeb, int mode)
1023 {
1024         int i, ret;
1025         int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1026         struct mtd_oob_ops ops;
1027
1028         ops.mode = MTD_OPS_AUTO_OOB;
1029         ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
1030         ops.oobbuf = c->oobbuf;
1031         ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1032         ops.datbuf = NULL;
1033
1034         ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
1035         if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
1036                 printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
1037                                 " bytes, read %zd bytes, error %d\n",
1038                                 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1039                 if (!ret || mtd_is_bitflip(ret))
1040                         ret = -EIO;
1041                 return ret;
1042         }
1043
1044         for(i = 0; i < ops.ooblen; i++) {
1045                 if (mode && i < cmlen)
1046                         /* Yeah, we know about the cleanmarker */
1047                         continue;
1048
1049                 if (ops.oobbuf[i] != 0xFF) {
1050                         D2(printk(KERN_DEBUG "Found %02x at %x in OOB for "
1051                                   "%08x\n", ops.oobbuf[i], i, jeb->offset));
1052                         return 1;
1053                 }
1054         }
1055
1056         return 0;
1057 }
1058
1059 /*
1060  * Check for a valid cleanmarker.
1061  * Returns: 0 if a valid cleanmarker was found
1062  *          1 if no cleanmarker was found
1063  *          negative error code if an error occurred
1064  */
1065 int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
1066                                  struct jffs2_eraseblock *jeb)
1067 {
1068         struct mtd_oob_ops ops;
1069         int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1070
1071         ops.mode = MTD_OPS_AUTO_OOB;
1072         ops.ooblen = cmlen;
1073         ops.oobbuf = c->oobbuf;
1074         ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1075         ops.datbuf = NULL;
1076
1077         ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
1078         if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
1079                 printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
1080                                 " bytes, read %zd bytes, error %d\n",
1081                                 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1082                 if (!ret || mtd_is_bitflip(ret))
1083                         ret = -EIO;
1084                 return ret;
1085         }
1086
1087         return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
1088 }
1089
1090 int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
1091                                  struct jffs2_eraseblock *jeb)
1092 {
1093         int ret;
1094         struct mtd_oob_ops ops;
1095         int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1096
1097         ops.mode = MTD_OPS_AUTO_OOB;
1098         ops.ooblen = cmlen;
1099         ops.oobbuf = (uint8_t *)&oob_cleanmarker;
1100         ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1101         ops.datbuf = NULL;
1102
1103         ret = c->mtd->write_oob(c->mtd, jeb->offset, &ops);
1104         if (ret || ops.oobretlen != ops.ooblen) {
1105                 printk(KERN_ERR "cannot write OOB for EB at %08x, requested %zd"
1106                                 " bytes, read %zd bytes, error %d\n",
1107                                 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1108                 if (!ret)
1109                         ret = -EIO;
1110                 return ret;
1111         }
1112
1113         return 0;
1114 }
1115
1116 /*
1117  * On NAND we try to mark this block bad. If the block was erased more
1118  * than MAX_ERASE_FAILURES we mark it finally bad.
1119  * Don't care about failures. This block remains on the erase-pending
1120  * or badblock list as long as nobody manipulates the flash with
1121  * a bootloader or something like that.
1122  */
1123
1124 int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1125 {
1126         int     ret;
1127
1128         /* if the count is < max, we try to write the counter to the 2nd page oob area */
1129         if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1130                 return 0;
1131
1132         if (!c->mtd->block_markbad)
1133                 return 1; // What else can we do?
1134
1135         printk(KERN_WARNING "JFFS2: marking eraseblock at %08x\n as bad", bad_offset);
1136         ret = c->mtd->block_markbad(c->mtd, bad_offset);
1137
1138         if (ret) {
1139                 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1140                 return ret;
1141         }
1142         return 1;
1143 }
1144
1145 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1146 {
1147         struct nand_ecclayout *oinfo = c->mtd->ecclayout;
1148
1149         if (!c->mtd->oobsize)
1150                 return 0;
1151
1152         /* Cleanmarker is out-of-band, so inline size zero */
1153         c->cleanmarker_size = 0;
1154
1155         if (!oinfo || oinfo->oobavail == 0) {
1156                 printk(KERN_ERR "inconsistent device description\n");
1157                 return -EINVAL;
1158         }
1159
1160         D1(printk(KERN_DEBUG "JFFS2 using OOB on NAND\n"));
1161
1162         c->oobavail = oinfo->oobavail;
1163
1164         /* Initialise write buffer */
1165         init_rwsem(&c->wbuf_sem);
1166         c->wbuf_pagesize = c->mtd->writesize;
1167         c->wbuf_ofs = 0xFFFFFFFF;
1168
1169         c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1170         if (!c->wbuf)
1171                 return -ENOMEM;
1172
1173         c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->oobavail, GFP_KERNEL);
1174         if (!c->oobbuf) {
1175                 kfree(c->wbuf);
1176                 return -ENOMEM;
1177         }
1178
1179 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1180         c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1181         if (!c->wbuf_verify) {
1182                 kfree(c->oobbuf);
1183                 kfree(c->wbuf);
1184                 return -ENOMEM;
1185         }
1186 #endif
1187         return 0;
1188 }
1189
1190 void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1191 {
1192 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1193         kfree(c->wbuf_verify);
1194 #endif
1195         kfree(c->wbuf);
1196         kfree(c->oobbuf);
1197 }
1198
1199 int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1200         c->cleanmarker_size = 0;                /* No cleanmarkers needed */
1201
1202         /* Initialize write buffer */
1203         init_rwsem(&c->wbuf_sem);
1204
1205
1206         c->wbuf_pagesize =  c->mtd->erasesize;
1207
1208         /* Find a suitable c->sector_size
1209          * - Not too much sectors
1210          * - Sectors have to be at least 4 K + some bytes
1211          * - All known dataflashes have erase sizes of 528 or 1056
1212          * - we take at least 8 eraseblocks and want to have at least 8K size
1213          * - The concatenation should be a power of 2
1214         */
1215
1216         c->sector_size = 8 * c->mtd->erasesize;
1217
1218         while (c->sector_size < 8192) {
1219                 c->sector_size *= 2;
1220         }
1221
1222         /* It may be necessary to adjust the flash size */
1223         c->flash_size = c->mtd->size;
1224
1225         if ((c->flash_size % c->sector_size) != 0) {
1226                 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1227                 printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
1228         };
1229
1230         c->wbuf_ofs = 0xFFFFFFFF;
1231         c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1232         if (!c->wbuf)
1233                 return -ENOMEM;
1234
1235 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1236         c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1237         if (!c->wbuf_verify) {
1238                 kfree(c->oobbuf);
1239                 kfree(c->wbuf);
1240                 return -ENOMEM;
1241         }
1242 #endif
1243
1244         printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
1245
1246         return 0;
1247 }
1248
1249 void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1250 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1251         kfree(c->wbuf_verify);
1252 #endif
1253         kfree(c->wbuf);
1254 }
1255
1256 int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1257         /* Cleanmarker currently occupies whole programming regions,
1258          * either one or 2 for 8Byte STMicro flashes. */
1259         c->cleanmarker_size = max(16u, c->mtd->writesize);
1260
1261         /* Initialize write buffer */
1262         init_rwsem(&c->wbuf_sem);
1263         c->wbuf_pagesize = c->mtd->writesize;
1264         c->wbuf_ofs = 0xFFFFFFFF;
1265
1266         c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1267         if (!c->wbuf)
1268                 return -ENOMEM;
1269
1270 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1271         c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1272         if (!c->wbuf_verify) {
1273                 kfree(c->wbuf);
1274                 return -ENOMEM;
1275         }
1276 #endif
1277         return 0;
1278 }
1279
1280 void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1281 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1282         kfree(c->wbuf_verify);
1283 #endif
1284         kfree(c->wbuf);
1285 }
1286
1287 int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
1288         c->cleanmarker_size = 0;
1289
1290         if (c->mtd->writesize == 1)
1291                 /* We do not need write-buffer */
1292                 return 0;
1293
1294         init_rwsem(&c->wbuf_sem);
1295
1296         c->wbuf_pagesize =  c->mtd->writesize;
1297         c->wbuf_ofs = 0xFFFFFFFF;
1298         c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1299         if (!c->wbuf)
1300                 return -ENOMEM;
1301
1302         printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
1303
1304         return 0;
1305 }
1306
1307 void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) {
1308         kfree(c->wbuf);
1309 }