[PATCH] Make address_space_operations->invalidatepage return void
[pandora-kernel.git] / include / linux / buffer_head.h
1 /*
2  * include/linux/buffer_head.h
3  *
4  * Everything to do with buffer_heads.
5  */
6
7 #ifndef _LINUX_BUFFER_HEAD_H
8 #define _LINUX_BUFFER_HEAD_H
9
10 #include <linux/types.h>
11 #include <linux/fs.h>
12 #include <linux/linkage.h>
13 #include <linux/pagemap.h>
14 #include <linux/wait.h>
15 #include <asm/atomic.h>
16
17 enum bh_state_bits {
18         BH_Uptodate,    /* Contains valid data */
19         BH_Dirty,       /* Is dirty */
20         BH_Lock,        /* Is locked */
21         BH_Req,         /* Has been submitted for I/O */
22         BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
23                           * IO completion of other buffers in the page
24                           */
25
26         BH_Mapped,      /* Has a disk mapping */
27         BH_New,         /* Disk mapping was newly created by get_block */
28         BH_Async_Read,  /* Is under end_buffer_async_read I/O */
29         BH_Async_Write, /* Is under end_buffer_async_write I/O */
30         BH_Delay,       /* Buffer is not yet allocated on disk */
31         BH_Boundary,    /* Block is followed by a discontiguity */
32         BH_Write_EIO,   /* I/O error on write */
33         BH_Ordered,     /* ordered write */
34         BH_Eopnotsupp,  /* operation not supported (barrier) */
35
36         BH_PrivateStart,/* not a state bit, but the first bit available
37                          * for private allocation by other entities
38                          */
39 };
40
41 #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
42
43 struct page;
44 struct buffer_head;
45 struct address_space;
46 typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
47
48 /*
49  * Keep related fields in common cachelines.  The most commonly accessed
50  * field (b_state) goes at the start so the compiler does not generate
51  * indexed addressing for it.
52  */
53 struct buffer_head {
54         /* First cache line: */
55         unsigned long b_state;          /* buffer state bitmap (see above) */
56         struct buffer_head *b_this_page;/* circular list of page's buffers */
57         struct page *b_page;            /* the page this bh is mapped to */
58         atomic_t b_count;               /* users using this block */
59         u32 b_size;                     /* block size */
60
61         sector_t b_blocknr;             /* block number */
62         char *b_data;                   /* pointer to data block */
63
64         struct block_device *b_bdev;
65         bh_end_io_t *b_end_io;          /* I/O completion */
66         void *b_private;                /* reserved for b_end_io */
67         struct list_head b_assoc_buffers; /* associated with another mapping */
68 };
69
70 /*
71  * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
72  * and buffer_foo() functions.
73  */
74 #define BUFFER_FNS(bit, name)                                           \
75 static inline void set_buffer_##name(struct buffer_head *bh)            \
76 {                                                                       \
77         set_bit(BH_##bit, &(bh)->b_state);                              \
78 }                                                                       \
79 static inline void clear_buffer_##name(struct buffer_head *bh)          \
80 {                                                                       \
81         clear_bit(BH_##bit, &(bh)->b_state);                            \
82 }                                                                       \
83 static inline int buffer_##name(const struct buffer_head *bh)           \
84 {                                                                       \
85         return test_bit(BH_##bit, &(bh)->b_state);                      \
86 }
87
88 /*
89  * test_set_buffer_foo() and test_clear_buffer_foo()
90  */
91 #define TAS_BUFFER_FNS(bit, name)                                       \
92 static inline int test_set_buffer_##name(struct buffer_head *bh)        \
93 {                                                                       \
94         return test_and_set_bit(BH_##bit, &(bh)->b_state);              \
95 }                                                                       \
96 static inline int test_clear_buffer_##name(struct buffer_head *bh)      \
97 {                                                                       \
98         return test_and_clear_bit(BH_##bit, &(bh)->b_state);            \
99 }                                                                       \
100
101 /*
102  * Emit the buffer bitops functions.   Note that there are also functions
103  * of the form "mark_buffer_foo()".  These are higher-level functions which
104  * do something in addition to setting a b_state bit.
105  */
106 BUFFER_FNS(Uptodate, uptodate)
107 BUFFER_FNS(Dirty, dirty)
108 TAS_BUFFER_FNS(Dirty, dirty)
109 BUFFER_FNS(Lock, locked)
110 TAS_BUFFER_FNS(Lock, locked)
111 BUFFER_FNS(Req, req)
112 TAS_BUFFER_FNS(Req, req)
113 BUFFER_FNS(Mapped, mapped)
114 BUFFER_FNS(New, new)
115 BUFFER_FNS(Async_Read, async_read)
116 BUFFER_FNS(Async_Write, async_write)
117 BUFFER_FNS(Delay, delay)
118 BUFFER_FNS(Boundary, boundary)
119 BUFFER_FNS(Write_EIO, write_io_error)
120 BUFFER_FNS(Ordered, ordered)
121 BUFFER_FNS(Eopnotsupp, eopnotsupp)
122
123 #define bh_offset(bh)           ((unsigned long)(bh)->b_data & ~PAGE_MASK)
124 #define touch_buffer(bh)        mark_page_accessed(bh->b_page)
125
126 /* If we *know* page->private refers to buffer_heads */
127 #define page_buffers(page)                                      \
128         ({                                                      \
129                 BUG_ON(!PagePrivate(page));                     \
130                 ((struct buffer_head *)page_private(page));     \
131         })
132 #define page_has_buffers(page)  PagePrivate(page)
133
134 /*
135  * Declarations
136  */
137
138 void FASTCALL(mark_buffer_dirty(struct buffer_head *bh));
139 void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
140 void set_bh_page(struct buffer_head *bh,
141                 struct page *page, unsigned long offset);
142 int try_to_free_buffers(struct page *);
143 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
144                 int retry);
145 void create_empty_buffers(struct page *, unsigned long,
146                         unsigned long b_state);
147 void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
148 void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
149 void end_buffer_async_write(struct buffer_head *bh, int uptodate);
150
151 /* Things to do with buffers at mapping->private_list */
152 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
153 int inode_has_buffers(struct inode *);
154 void invalidate_inode_buffers(struct inode *);
155 int remove_inode_buffers(struct inode *inode);
156 int sync_mapping_buffers(struct address_space *mapping);
157 void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
158
159 void mark_buffer_async_write(struct buffer_head *bh);
160 void invalidate_bdev(struct block_device *, int);
161 int sync_blockdev(struct block_device *bdev);
162 void __wait_on_buffer(struct buffer_head *);
163 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
164 int fsync_bdev(struct block_device *);
165 struct super_block *freeze_bdev(struct block_device *);
166 void thaw_bdev(struct block_device *, struct super_block *);
167 int fsync_super(struct super_block *);
168 int fsync_no_super(struct block_device *);
169 struct buffer_head *__find_get_block(struct block_device *, sector_t, int);
170 struct buffer_head * __getblk(struct block_device *, sector_t, int);
171 void __brelse(struct buffer_head *);
172 void __bforget(struct buffer_head *);
173 void __breadahead(struct block_device *, sector_t block, int size);
174 struct buffer_head *__bread(struct block_device *, sector_t block, int size);
175 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
176 void free_buffer_head(struct buffer_head * bh);
177 void FASTCALL(unlock_buffer(struct buffer_head *bh));
178 void FASTCALL(__lock_buffer(struct buffer_head *bh));
179 void ll_rw_block(int, int, struct buffer_head * bh[]);
180 int sync_dirty_buffer(struct buffer_head *bh);
181 int submit_bh(int, struct buffer_head *);
182 void write_boundary_block(struct block_device *bdev,
183                         sector_t bblock, unsigned blocksize);
184
185 extern int buffer_heads_over_limit;
186
187 /*
188  * Generic address_space_operations implementations for buffer_head-backed
189  * address_spaces.
190  */
191 int try_to_release_page(struct page * page, gfp_t gfp_mask);
192 void block_invalidatepage(struct page *page, unsigned long offset);
193 void do_invalidatepage(struct page *page, unsigned long offset);
194 int block_write_full_page(struct page *page, get_block_t *get_block,
195                                 struct writeback_control *wbc);
196 int block_read_full_page(struct page*, get_block_t*);
197 int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
198 int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*,
199                                 loff_t *);
200 int generic_cont_expand(struct inode *inode, loff_t size);
201 int generic_cont_expand_simple(struct inode *inode, loff_t size);
202 int block_commit_write(struct page *page, unsigned from, unsigned to);
203 void block_sync_page(struct page *);
204 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
205 int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
206 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
207 int file_fsync(struct file *, struct dentry *, int);
208 int nobh_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
209 int nobh_commit_write(struct file *, struct page *, unsigned, unsigned);
210 int nobh_truncate_page(struct address_space *, loff_t);
211 int nobh_writepage(struct page *page, get_block_t *get_block,
212                         struct writeback_control *wbc);
213
214
215 /*
216  * inline definitions
217  */
218
219 static inline void attach_page_buffers(struct page *page,
220                 struct buffer_head *head)
221 {
222         page_cache_get(page);
223         SetPagePrivate(page);
224         set_page_private(page, (unsigned long)head);
225 }
226
227 static inline void get_bh(struct buffer_head *bh)
228 {
229         atomic_inc(&bh->b_count);
230 }
231
232 static inline void put_bh(struct buffer_head *bh)
233 {
234         smp_mb__before_atomic_dec();
235         atomic_dec(&bh->b_count);
236 }
237
238 static inline void brelse(struct buffer_head *bh)
239 {
240         if (bh)
241                 __brelse(bh);
242 }
243
244 static inline void bforget(struct buffer_head *bh)
245 {
246         if (bh)
247                 __bforget(bh);
248 }
249
250 static inline struct buffer_head *
251 sb_bread(struct super_block *sb, sector_t block)
252 {
253         return __bread(sb->s_bdev, block, sb->s_blocksize);
254 }
255
256 static inline void
257 sb_breadahead(struct super_block *sb, sector_t block)
258 {
259         __breadahead(sb->s_bdev, block, sb->s_blocksize);
260 }
261
262 static inline struct buffer_head *
263 sb_getblk(struct super_block *sb, sector_t block)
264 {
265         return __getblk(sb->s_bdev, block, sb->s_blocksize);
266 }
267
268 static inline struct buffer_head *
269 sb_find_get_block(struct super_block *sb, sector_t block)
270 {
271         return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
272 }
273
274 static inline void
275 map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
276 {
277         set_buffer_mapped(bh);
278         bh->b_bdev = sb->s_bdev;
279         bh->b_blocknr = block;
280 }
281
282 /*
283  * Calling wait_on_buffer() for a zero-ref buffer is illegal, so we call into
284  * __wait_on_buffer() just to trip a debug check.  Because debug code in inline
285  * functions is bloaty.
286  */
287 static inline void wait_on_buffer(struct buffer_head *bh)
288 {
289         might_sleep();
290         if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0)
291                 __wait_on_buffer(bh);
292 }
293
294 static inline void lock_buffer(struct buffer_head *bh)
295 {
296         might_sleep();
297         if (test_set_buffer_locked(bh))
298                 __lock_buffer(bh);
299 }
300
301 #endif /* _LINUX_BUFFER_HEAD_H */