2 * linux/fs/ufs/ufs_dir.c
5 * Adrian Rodriguez (adrian@franklins-tower.rutgers.edu)
6 * Laboratory for Computer Science Research Computing Facility
7 * Rutgers, The State University of New Jersey
9 * swab support by Francois-Rene Rideau <fare@tunes.org> 19970406
11 * 4.4BSD (FreeBSD) support added on February 1st 1998 by
12 * Niels Kristian Bech Jensen <nkbj@image.dk> partially based
13 * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>.
15 * Migration to usage of "page cache" on May 2006 by
16 * Evgeniy Dushistov <dushistov@mail.ru> based on ext2 code base.
19 #include <linux/time.h>
21 #include <linux/ufs_fs.h>
22 #include <linux/smp_lock.h>
23 #include <linux/sched.h>
31 #define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
37 * NOTE! unlike strncmp, ufs_match returns 1 for success, 0 for failure.
39 * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller.
41 static inline int ufs_match(struct super_block *sb, int len,
42 const char * const name, struct ufs_dir_entry * de)
44 if (len != ufs_get_de_namlen(sb, de))
48 return !memcmp(name, de->d_name, len);
51 static int ufs_commit_chunk(struct page *page, unsigned from, unsigned to)
53 struct inode *dir = page->mapping->host;
56 page->mapping->a_ops->commit_write(NULL, page, from, to);
58 err = write_one_page(page, 1);
64 static inline void ufs_put_page(struct page *page)
67 page_cache_release(page);
70 static inline unsigned long ufs_dir_pages(struct inode *inode)
72 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
75 ino_t ufs_inode_by_name(struct inode *dir, struct dentry *dentry)
78 struct ufs_dir_entry *de;
81 de = ufs_find_entry(dir, dentry, &page);
83 res = fs32_to_cpu(dir->i_sb, de->d_ino);
90 /* Releases the page */
91 void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
92 struct page *page, struct inode *inode)
94 unsigned from = (char *) de - (char *) page_address(page);
95 unsigned to = from + fs16_to_cpu(dir->i_sb, de->d_reclen);
99 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
101 de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino);
102 ufs_set_de_type(dir->i_sb, de, inode->i_mode);
103 err = ufs_commit_chunk(page, from, to);
105 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
106 mark_inode_dirty(dir);
110 static void ufs_check_page(struct page *page)
112 struct inode *dir = page->mapping->host;
113 struct super_block *sb = dir->i_sb;
114 char *kaddr = page_address(page);
115 unsigned offs, rec_len;
116 unsigned limit = PAGE_CACHE_SIZE;
117 struct ufs_dir_entry *p;
120 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
121 limit = dir->i_size & ~PAGE_CACHE_MASK;
122 if (limit & (UFS_SECTOR_SIZE - 1))
127 for (offs = 0; offs <= limit - UFS_DIR_REC_LEN(1); offs += rec_len) {
128 p = (struct ufs_dir_entry *)(kaddr + offs);
129 rec_len = fs16_to_cpu(sb, p->d_reclen);
131 if (rec_len < UFS_DIR_REC_LEN(1))
135 if (rec_len < UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, p)))
137 if (((offs + rec_len - 1) ^ offs) & ~(UFS_SECTOR_SIZE-1))
139 if (fs32_to_cpu(sb, p->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg *
140 UFS_SB(sb)->s_uspi->s_ncg))
146 SetPageChecked(page);
149 /* Too bad, we had an error */
152 ufs_error(sb, "ufs_check_page",
153 "size of directory #%lu is not a multiple of chunk size",
158 error = "rec_len is smaller than minimal";
161 error = "unaligned directory entry";
164 error = "rec_len is too small for name_len";
167 error = "directory entry across blocks";
170 error = "inode out of bounds";
172 ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - "
173 "offset=%lu, rec_len=%d, name_len=%d",
174 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
175 rec_len, ufs_get_de_namlen(sb, p));
178 p = (struct ufs_dir_entry *)(kaddr + offs);
179 ufs_error (sb, "ext2_check_page",
180 "entry in directory #%lu spans the page boundary"
182 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs);
184 SetPageChecked(page);
188 static struct page *ufs_get_page(struct inode *dir, unsigned long n)
190 struct address_space *mapping = dir->i_mapping;
191 struct page *page = read_cache_page(mapping, n,
192 (filler_t*)mapping->a_ops->readpage, NULL);
194 wait_on_page_locked(page);
196 if (!PageUptodate(page))
198 if (!PageChecked(page))
199 ufs_check_page(page);
207 return ERR_PTR(-EIO);
211 * Return the offset into page `page_nr' of the last valid
212 * byte in that page, plus one.
215 ufs_last_byte(struct inode *inode, unsigned long page_nr)
217 unsigned last_byte = inode->i_size;
219 last_byte -= page_nr << PAGE_CACHE_SHIFT;
220 if (last_byte > PAGE_CACHE_SIZE)
221 last_byte = PAGE_CACHE_SIZE;
225 static inline struct ufs_dir_entry *
226 ufs_next_entry(struct super_block *sb, struct ufs_dir_entry *p)
228 return (struct ufs_dir_entry *)((char *)p +
229 fs16_to_cpu(sb, p->d_reclen));
232 struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
234 struct page *page = ufs_get_page(dir, 0);
235 struct ufs_dir_entry *de = NULL;
238 de = ufs_next_entry(dir->i_sb,
239 (struct ufs_dir_entry *)page_address(page));
248 * finds an entry in the specified directory with the wanted name. It
249 * returns the page in which the entry was found, and the entry itself
250 * (as a parameter - res_dir). Page is returned mapped and unlocked.
251 * Entry is guaranteed to be valid.
253 struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct dentry *dentry,
254 struct page **res_page)
256 struct super_block *sb = dir->i_sb;
257 const char *name = dentry->d_name.name;
258 int namelen = dentry->d_name.len;
259 unsigned reclen = UFS_DIR_REC_LEN(namelen);
260 unsigned long start, n;
261 unsigned long npages = ufs_dir_pages(dir);
262 struct page *page = NULL;
263 struct ufs_dir_entry *de;
265 UFSD(("ENTER, dir_ino %lu, name %s, namlen %u\n", dir->i_ino, name, namelen));
267 if (npages == 0 || namelen > UFS_MAXNAMLEN)
273 /* start = ei->i_dir_start_lookup; */
280 page = ufs_get_page(dir, n);
282 kaddr = page_address(page);
283 de = (struct ufs_dir_entry *) kaddr;
284 kaddr += ufs_last_byte(dir, n) - reclen;
285 while ((char *) de <= kaddr) {
286 if (de->d_reclen == 0) {
287 ufs_error(dir->i_sb, __FUNCTION__,
288 "zero-length directory entry");
292 if (ufs_match(sb, namelen, name, de))
294 de = ufs_next_entry(sb, de);
300 } while (n != start);
306 /* ei->i_dir_start_lookup = n; */
313 int ufs_add_link(struct dentry *dentry, struct inode *inode)
315 struct inode *dir = dentry->d_parent->d_inode;
316 const char *name = dentry->d_name.name;
317 int namelen = dentry->d_name.len;
318 struct super_block *sb = dir->i_sb;
319 unsigned reclen = UFS_DIR_REC_LEN(namelen);
320 unsigned short rec_len, name_len;
321 struct page *page = NULL;
322 struct ufs_dir_entry *de;
323 unsigned long npages = ufs_dir_pages(dir);
329 UFSD(("ENTER, name %s, namelen %u\n", name, namelen));
332 * We take care of directory expansion in the same loop.
333 * This code plays outside i_size, so it locks the page
334 * to protect that region.
336 for (n = 0; n <= npages; n++) {
339 page = ufs_get_page(dir, n);
344 kaddr = page_address(page);
345 dir_end = kaddr + ufs_last_byte(dir, n);
346 de = (struct ufs_dir_entry *)kaddr;
347 kaddr += PAGE_CACHE_SIZE - reclen;
348 while ((char *)de <= kaddr) {
349 if ((char *)de == dir_end) {
352 rec_len = UFS_SECTOR_SIZE;
353 de->d_reclen = cpu_to_fs16(sb, UFS_SECTOR_SIZE);
357 if (de->d_reclen == 0) {
358 ufs_error(dir->i_sb, __FUNCTION__,
359 "zero-length directory entry");
364 if (ufs_match(sb, namelen, name, de))
366 name_len = UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de));
367 rec_len = fs16_to_cpu(sb, de->d_reclen);
368 if (!de->d_ino && rec_len >= reclen)
370 if (rec_len >= name_len + reclen)
372 de = (struct ufs_dir_entry *) ((char *) de + rec_len);
381 from = (char*)de - (char*)page_address(page);
383 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
387 struct ufs_dir_entry *de1 =
388 (struct ufs_dir_entry *) ((char *) de + name_len);
389 de1->d_reclen = cpu_to_fs16(sb, rec_len - name_len);
390 de->d_reclen = cpu_to_fs16(sb, name_len);
395 ufs_set_de_namlen(sb, de, namelen);
396 memcpy(de->d_name, name, namelen + 1);
397 de->d_ino = cpu_to_fs32(sb, inode->i_ino);
398 ufs_set_de_type(sb, de, inode->i_mode);
400 err = ufs_commit_chunk(page, from, to);
401 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
403 mark_inode_dirty(dir);
414 static inline unsigned
415 ufs_validate_entry(struct super_block *sb, char *base,
416 unsigned offset, unsigned mask)
418 struct ufs_dir_entry *de = (struct ufs_dir_entry*)(base + offset);
419 struct ufs_dir_entry *p = (struct ufs_dir_entry*)(base + (offset&mask));
420 while ((char*)p < (char*)de) {
421 if (p->d_reclen == 0)
423 p = ufs_next_entry(sb, p);
425 return (char *)p - base;
430 * This is blatantly stolen from ext2fs
433 ufs_readdir(struct file *filp, void *dirent, filldir_t filldir)
435 loff_t pos = filp->f_pos;
436 struct inode *inode = filp->f_dentry->d_inode;
437 struct super_block *sb = inode->i_sb;
438 unsigned int offset = pos & ~PAGE_CACHE_MASK;
439 unsigned long n = pos >> PAGE_CACHE_SHIFT;
440 unsigned long npages = ufs_dir_pages(inode);
441 unsigned chunk_mask = ~(UFS_SECTOR_SIZE - 1);
442 int need_revalidate = filp->f_version != inode->i_version;
443 unsigned flags = UFS_SB(sb)->s_flags;
447 if (pos > inode->i_size - UFS_DIR_REC_LEN(1))
450 for ( ; n < npages; n++, offset = 0) {
452 struct ufs_dir_entry *de;
454 struct page *page = ufs_get_page(inode, n);
457 ufs_error(sb, __FUNCTION__,
460 filp->f_pos += PAGE_CACHE_SIZE - offset;
463 kaddr = page_address(page);
464 if (unlikely(need_revalidate)) {
466 offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
467 filp->f_pos = (n<<PAGE_CACHE_SHIFT) + offset;
469 filp->f_version = inode->i_version;
472 de = (struct ufs_dir_entry *)(kaddr+offset);
473 limit = kaddr + ufs_last_byte(inode, n) - UFS_DIR_REC_LEN(1);
474 for ( ;(char*)de <= limit; de = ufs_next_entry(sb, de)) {
475 if (de->d_reclen == 0) {
476 ufs_error(sb, __FUNCTION__,
477 "zero-length directory entry");
483 unsigned char d_type = DT_UNKNOWN;
485 offset = (char *)de - kaddr;
487 UFSD(("filldir(%s,%u)\n", de->d_name,
488 fs32_to_cpu(sb, de->d_ino)));
489 UFSD(("namlen %u\n", ufs_get_de_namlen(sb, de)));
491 if ((flags & UFS_DE_MASK) == UFS_DE_44BSD)
492 d_type = de->d_u.d_44.d_type;
494 over = filldir(dirent, de->d_name,
495 ufs_get_de_namlen(sb, de),
496 (n<<PAGE_CACHE_SHIFT) | offset,
497 fs32_to_cpu(sb, de->d_ino), d_type);
503 filp->f_pos += fs16_to_cpu(sb, de->d_reclen);
512 * ufs_delete_entry deletes a directory entry by merging it with the
515 int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
518 struct super_block *sb = inode->i_sb;
519 struct address_space *mapping = page->mapping;
520 char *kaddr = page_address(page);
521 unsigned from = ((char*)dir - kaddr) & ~(UFS_SECTOR_SIZE - 1);
522 unsigned to = ((char*)dir - kaddr) + fs16_to_cpu(sb, dir->d_reclen);
523 struct ufs_dir_entry *pde = NULL;
524 struct ufs_dir_entry *de = (struct ufs_dir_entry *) (kaddr + from);
529 UFSD(("ino %u, reclen %u, namlen %u, name %s\n",
530 fs32_to_cpu(sb, de->d_ino),
531 fs16_to_cpu(sb, de->d_reclen),
532 ufs_get_de_namlen(sb, de), de->d_name));
534 while ((char*)de < (char*)dir) {
535 if (de->d_reclen == 0) {
536 ufs_error(inode->i_sb, __FUNCTION__,
537 "zero-length directory entry");
542 de = ufs_next_entry(sb, de);
545 from = (char*)pde - (char*)page_address(page);
547 err = mapping->a_ops->prepare_write(NULL, page, from, to);
550 pde->d_reclen = cpu_to_fs16(sb, to-from);
552 err = ufs_commit_chunk(page, from, to);
553 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
554 mark_inode_dirty(inode);
561 int ufs_make_empty(struct inode * inode, struct inode *dir)
563 struct super_block * sb = dir->i_sb;
564 struct address_space *mapping = inode->i_mapping;
565 struct page *page = grab_cache_page(mapping, 0);
566 struct ufs_dir_entry * de;
573 err = mapping->a_ops->prepare_write(NULL, page, 0, UFS_SECTOR_SIZE);
580 base = (char*)page_address(page);
581 memset(base, 0, PAGE_CACHE_SIZE);
583 de = (struct ufs_dir_entry *) base;
585 de->d_ino = cpu_to_fs32(sb, inode->i_ino);
586 ufs_set_de_type(sb, de, inode->i_mode);
587 ufs_set_de_namlen(sb, de, 1);
588 de->d_reclen = cpu_to_fs16(sb, UFS_DIR_REC_LEN(1));
589 strcpy (de->d_name, ".");
590 de = (struct ufs_dir_entry *)
591 ((char *)de + fs16_to_cpu(sb, de->d_reclen));
592 de->d_ino = cpu_to_fs32(sb, dir->i_ino);
593 ufs_set_de_type(sb, de, dir->i_mode);
594 de->d_reclen = cpu_to_fs16(sb, UFS_SECTOR_SIZE - UFS_DIR_REC_LEN(1));
595 ufs_set_de_namlen(sb, de, 2);
596 strcpy (de->d_name, "..");
598 err = ufs_commit_chunk(page, 0, UFS_SECTOR_SIZE);
601 page_cache_release(page);
606 * routine to check that the specified directory is empty (for rmdir)
608 int ufs_empty_dir(struct inode * inode)
610 struct super_block *sb = inode->i_sb;
611 struct page *page = NULL;
612 unsigned long i, npages = ufs_dir_pages(inode);
614 for (i = 0; i < npages; i++) {
616 struct ufs_dir_entry *de;
617 page = ufs_get_page(inode, i);
622 kaddr = page_address(page);
623 de = (struct ufs_dir_entry *)kaddr;
624 kaddr += ufs_last_byte(inode, i) - UFS_DIR_REC_LEN(1);
626 while ((char *)de <= kaddr) {
627 if (de->d_reclen == 0) {
628 ufs_error(inode->i_sb, __FUNCTION__,
629 "zero-length directory entry: "
630 "kaddr=%p, de=%p\n", kaddr, de);
634 u16 namelen=ufs_get_de_namlen(sb, de);
635 /* check for . and .. */
636 if (de->d_name[0] != '.')
642 fs32_to_cpu(sb, de->d_ino))
644 } else if (de->d_name[1] != '.')
647 de = ufs_next_entry(sb, de);
658 const struct file_operations ufs_dir_operations = {
659 .read = generic_read_dir,
660 .readdir = ufs_readdir,