diff --git a/README.md b/README.md index dcac9fd..e230077 100644 --- a/README.md +++ b/README.md @@ -109,7 +109,6 @@ cat $PWD/foo.img | docker run -i --rm $INT_IMAGE mdir -i /file.img /abc Future plans are to add the following: * embed boot code in `mbr` e.g. `altmbr.bin` (no need for `gpt` since an ESP with `/EFI/BOOT/BOOT.EFI` will boot) -* `ext4` filesystem writing (read-only already works) * `Joliet` extensions to `iso9660` * `Rock Ridge` sparse file support - supports the flag, but not yet reading or writing * `squashfs` sparse file support - currently treats sparse files as regular files diff --git a/filesystem/ext4/bitmaps.go b/filesystem/ext4/bitmaps.go deleted file mode 100644 index 1363ff2..0000000 --- a/filesystem/ext4/bitmaps.go +++ /dev/null @@ -1,104 +0,0 @@ -package ext4 - -import "fmt" - -// bitmap is a structure holding a bitmap -type bitmap struct { - bits []byte -} - -// bitmapFromBytes create a bitmap struct from bytes -func bitmapFromBytes(b []byte) *bitmap { - // just copy them over - bits := make([]byte, len(b)) - copy(bits, b) - bm := bitmap{ - bits: bits, - } - - return &bm -} - -// toBytes returns raw bytes ready to be written to disk -func (bm *bitmap) toBytes() []byte { - b := make([]byte, len(bm.bits)) - copy(b, bm.bits) - - return b -} - -func (bm *bitmap) checkFree(location int) (bool, error) { - byteNumber, bitNumber := findBitForIndex(location) - if byteNumber > len(bm.bits) { - return false, fmt.Errorf("location %d is not in %d size bitmap", location, len(bm.bits)*8) - } - mask := byte(0x1) << bitNumber - return bm.bits[byteNumber]&mask == mask, nil -} - -func (bm *bitmap) free(location int) error { - byteNumber, bitNumber := findBitForIndex(location) - if byteNumber > len(bm.bits) { - return fmt.Errorf("location %d is not in %d size bitmap", location, len(bm.bits)*8) - } - mask := byte(0x1) << bitNumber - mask = ^mask - bm.bits[byteNumber] &= mask - return nil -} - -func (bm *bitmap) use(location int) error { - byteNumber, bitNumber := findBitForIndex(location) - if byteNumber > len(bm.bits) { - return fmt.Errorf("location %d is not in %d size bitmap", location, len(bm.bits)*8) - } - mask := byte(0x1) << bitNumber - bm.bits[byteNumber] |= mask - return nil -} - -func (bm *bitmap) findFirstFree() int { - var location = -1 - for i, b := range bm.bits { - // if all used, continue to next - if b&0xff == 0xff { - continue - } - // not all used, so find first bit set to 0 - for j := uint8(0); j < 8; j++ { - mask := byte(0x1) << j - if b&mask != mask { - location = 8*i + (8 - int(j)) - break - } - } - break - } - return location -} - -//nolint:revive // params are unused as of yet, but will be used in the future -func (bm *bitmap) findFirstUsed() int { - var location int = -1 - for i, b := range bm.bits { - // if all free, continue to next - if b == 0x00 { - continue - } - // not all free, so find first bit set to 1 - for j := uint8(0); j < 8; j++ { - mask := byte(0x1) << j - mask = ^mask - if b|mask != mask { - location = 8*i + (8 - int(j)) - break - } - } - break - } - return location -} - -func findBitForIndex(index int) (byteNumber int, bitNumber uint8) { - return index / 8, uint8(index % 8) -} diff --git a/filesystem/ext4/blockgroup.go b/filesystem/ext4/blockgroup.go index 92deb6d..bf3b426 100644 --- a/filesystem/ext4/blockgroup.go +++ b/filesystem/ext4/blockgroup.go @@ -2,14 +2,16 @@ package ext4 import ( "fmt" + + "github.com/diskfs/go-diskfs/util" ) // blockGroup is a structure holding the data about a single block group // //nolint:unused // will be used in the future, not yet type blockGroup struct { - inodeBitmap *bitmap - blockBitmap *bitmap + inodeBitmap *util.Bitmap + blockBitmap *util.Bitmap blockSize int number int inodeTableSize int @@ -26,8 +28,8 @@ func blockGroupFromBytes(b []byte, blockSize, groupNumber int) (*blockGroup, err if actualSize != expectedSize { return nil, fmt.Errorf("expected to be passed %d bytes for 2 blocks of size %d, instead received %d", expectedSize, blockSize, actualSize) } - inodeBitmap := bitmapFromBytes(b[0:blockSize]) - blockBitmap := bitmapFromBytes(b[blockSize : 2*blockSize]) + inodeBitmap := util.BitmapFromBytes(b[0:blockSize]) + blockBitmap := util.BitmapFromBytes(b[blockSize : 2*blockSize]) bg := blockGroup{ inodeBitmap: inodeBitmap, @@ -43,8 +45,8 @@ func blockGroupFromBytes(b []byte, blockSize, groupNumber int) (*blockGroup, err //nolint:unused // will be used in the future, not yet func (bg *blockGroup) toBytes() ([]byte, error) { b := make([]byte, 2*bg.blockSize) - inodeBitmapBytes := bg.inodeBitmap.toBytes() - blockBitmapBytes := bg.blockBitmap.toBytes() + inodeBitmapBytes := bg.inodeBitmap.ToBytes() + blockBitmapBytes := bg.blockBitmap.ToBytes() b = append(b, inodeBitmapBytes...) b = append(b, blockBitmapBytes...) diff --git a/filesystem/ext4/checksum.go b/filesystem/ext4/checksum.go index ccd65d8..d7ffea4 100644 --- a/filesystem/ext4/checksum.go +++ b/filesystem/ext4/checksum.go @@ -29,6 +29,8 @@ func directoryChecksummer(seed, inodeNumber, inodeGeneration uint32) checksummer // directoryChecksumAppender returns a function that implements checksumAppender for a directory entries block // original calculations can be seen for e2fsprogs https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/lib/ext2fs/csum.c#n301 // and in the linux tree https://github.com/torvalds/linux/blob/master/fs/ext4/namei.c#L376-L384 +// +//nolint:unparam // inodeGeneration is always 0 func directoryChecksumAppender(seed, inodeNumber, inodeGeneration uint32) checksumAppender { fn := directoryChecksummer(seed, inodeNumber, inodeGeneration) return func(b []byte) []byte { diff --git a/filesystem/ext4/consts.go b/filesystem/ext4/consts.go new file mode 100644 index 0000000..2295aa0 --- /dev/null +++ b/filesystem/ext4/consts.go @@ -0,0 +1,5 @@ +package ext4 + +const ( + maxUint16 uint64 = 1<<16 - 1 +) diff --git a/filesystem/ext4/directory.go b/filesystem/ext4/directory.go index 80b1957..24535f0 100644 --- a/filesystem/ext4/directory.go +++ b/filesystem/ext4/directory.go @@ -36,7 +36,7 @@ func (d *Directory) toBytes(bytesPerBlock uint32, checksumFunc checksumAppender) switch { case len(block)+len(b2) > int(bytesPerBlock)-minDirEntryLength: // if adding this one will go past the end of the block, pad out the previous - block = b[:len(block)-previousLength] + block = block[:len(block)-previousLength] previousB := previousEntry.toBytes(uint16(int(bytesPerBlock) - len(block) - minDirEntryLength)) block = append(block, previousB...) // add the checksum diff --git a/filesystem/ext4/ext4.go b/filesystem/ext4/ext4.go index fd26e80..b322dd7 100644 --- a/filesystem/ext4/ext4.go +++ b/filesystem/ext4/ext4.go @@ -4,9 +4,12 @@ import ( "encoding/binary" "errors" "fmt" + "io" + iofs "io/fs" "math" "os" "path" + "sort" "strings" "time" @@ -39,7 +42,7 @@ const ( maxClusterSize int = 65529 bytesPerSlot int = 32 maxCharsLongFilename int = 13 - maxBlocksPerExtent int = 32768 + maxBlocksPerExtent uint16 = 32768 million int = 1000000 billion int = 1000 * million firstNonReservedInode uint32 = 11 // traditional @@ -91,8 +94,6 @@ type FileSystem struct { bootSector []byte superblock *superblock groupDescriptors *groupDescriptors - dataBlockBitmap bitmap - inodeBitmap bitmap blockGroups int64 size int64 start int64 @@ -681,7 +682,7 @@ func (fs *FileSystem) Type() filesystem.Type { // * It will make the entire tree path if it does not exist // * It will not return an error if the path already exists func (fs *FileSystem) Mkdir(p string) error { - _, _, err := fs.readDirWithMkdir(p, true) + _, err := fs.readDirWithMkdir(p, true) // we are not interesting in returning the entries return err } @@ -692,20 +693,20 @@ func (fs *FileSystem) Mkdir(p string) error { // // Will return an error if the directory does not exist or is a regular file and not a directory func (fs *FileSystem) ReadDir(p string) ([]os.FileInfo, error) { - _, entries, err := fs.readDirWithMkdir(p, false) + dir, err := fs.readDirWithMkdir(p, false) if err != nil { return nil, fmt.Errorf("error reading directory %s: %v", p, err) } // once we have made it here, looping is done. We have found the final entry // we need to return all of the file info - count := len(entries) + count := len(dir.entries) ret := make([]os.FileInfo, count) - for i, e := range entries { + for i, e := range dir.entries { in, err := fs.readInode(e.inode) if err != nil { return nil, fmt.Errorf("could not read inode %d at position %d in directory: %v", e.inode, i, err) } - ret[i] = FileInfo{ + ret[i] = &FileInfo{ modTime: in.modifyTime, name: e.filename, size: int64(in.size), @@ -723,47 +724,30 @@ func (fs *FileSystem) ReadDir(p string) ([]os.FileInfo, error) { // // returns an error if the file does not exist func (fs *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) { - // get the path - dir := path.Dir(p) filename := path.Base(p) - // if the dir == filename, then it is just / - if dir == filename { - return nil, fmt.Errorf("cannot open directory %s as file", p) - } - // get the directory entries - parentDir, entries, err := fs.readDirWithMkdir(dir, false) + dir := path.Dir(p) + parentDir, entry, err := fs.getEntryAndParent(p) if err != nil { - return nil, fmt.Errorf("could not read directory entries for %s", dir) + return nil, err } - // we now know that the directory exists, see if the file exists - var targetEntry *directoryEntry - for _, e := range entries { - if e.filename != filename { - continue - } - // cannot do anything with directories - if e.fileType == dirFileTypeDirectory { - return nil, fmt.Errorf("cannot open directory %s as file", p) - } - // if we got this far, we have found the file - targetEntry = e - break + if entry != nil && entry.fileType == dirFileTypeDirectory { + return nil, fmt.Errorf("cannot open directory %s as file", p) } // see if the file exists // if the file does not exist, and is not opened for os.O_CREATE, return an error - if targetEntry == nil { + if entry == nil { if flag&os.O_CREATE == 0 { return nil, fmt.Errorf("target file %s does not exist and was not asked to create", p) } // else create it - targetEntry, err = fs.mkFile(parentDir, filename) + entry, err = fs.mkFile(parentDir, filename) if err != nil { return nil, fmt.Errorf("failed to create file %s: %v", p, err) } } // get the inode - inodeNumber := targetEntry.inode + inodeNumber := entry.inode inode, err := fs.readInode(inodeNumber) if err != nil { return nil, fmt.Errorf("could not read inode number %d: %v", inodeNumber, err) @@ -794,7 +778,7 @@ func (fs *FileSystem) OpenFile(p string, flag int) (filesystem.File, error) { return nil, fmt.Errorf("could not read extent tree for inode %d: %v", inodeNumber, err) } return &File{ - directoryEntry: targetEntry, + directoryEntry: entry, inode: inode, isReadWrite: flag&os.O_RDWR != 0, isAppend: flag&os.O_APPEND != 0, @@ -812,12 +796,223 @@ func (fs *FileSystem) Label() string { return fs.superblock.volumeLabel } +// Rm remove file or directory at path. +// If path is directory, it only will remove if it is empty. +// If path is a file, it will remove the file. +// Will not remove any parents. +// Error if the file does not exist or is not an empty directory +func (fs *FileSystem) Rm(p string) error { + parentDir, entry, err := fs.getEntryAndParent(p) + if err != nil { + return err + } + if parentDir.root && entry == &parentDir.directoryEntry { + return fmt.Errorf("cannot remove root directory") + } + if entry == nil { + return fmt.Errorf("file does not exist: %s", p) + } + // if it is a directory, it must be empty + if entry.fileType == dirFileTypeDirectory { + // read the directory + entries, err := fs.readDirectory(entry.inode) + if err != nil { + return fmt.Errorf("could not read directory %s: %v", p, err) + } + if len(entries) > 2 { + return fmt.Errorf("directory not empty: %s", p) + } + } + // at this point, it is either a file or an empty directory, so remove it + + // free up the blocks + // read the inode to find the blocks + removedInode, err := fs.readInode(entry.inode) + if err != nil { + return fmt.Errorf("could not read inode %d for %s: %v", entry.inode, p, err) + } + extents, err := removedInode.extents.blocks(fs) + if err != nil { + return fmt.Errorf("could not read extents for inode %d for %s: %v", entry.inode, p, err) + } + // clear the inode from the inode bitmap + inodeBG := blockGroupForInode(int(entry.inode), fs.superblock.inodesPerGroup) + inodeBitmap, err := fs.readInodeBitmap(inodeBG) + if err != nil { + return fmt.Errorf("could not read inode bitmap: %v", err) + } + // clear up the blocks from the block bitmap. We are not clearing the block content, just the bitmap. + // keep a cache of bitmaps, so we do not have to read them again and again + blockBitmaps := make(map[int]*util.Bitmap) + for _, e := range extents { + for i := e.startingBlock; i < e.startingBlock+uint64(e.count); i++ { + // determine what block group this block is in, and read the bitmap for that blockgroup + bg := blockGroupForBlock(int(i), fs.superblock.blocksPerGroup) + dataBlockBitmap, ok := blockBitmaps[bg] + if !ok { + dataBlockBitmap, err = fs.readBlockBitmap(bg) + if err != nil { + return fmt.Errorf("could not read block bitmap: %v", err) + } + blockBitmaps[bg] = dataBlockBitmap + } + // the extent lists the absolute block number, but the bitmap is relative to the block group + blockInBG := int(i) - int(fs.superblock.blocksPerGroup)*bg + if err := dataBlockBitmap.Clear(blockInBG); err != nil { + return fmt.Errorf("could not clear block bitmap for block %d: %v", i, err) + } + } + } + for bg, dataBlockBitmap := range blockBitmaps { + if err := fs.writeBlockBitmap(dataBlockBitmap, bg); err != nil { + return fmt.Errorf("could not write block bitmap back to disk: %v", err) + } + } + + // remove the directory entry from the parent + newEntries := make([]*directoryEntry, 0, len(parentDir.entries)-1) + for _, e := range parentDir.entries { + if e.inode == entry.inode { + continue + } + newEntries = append(newEntries, e) + } + parentDir.entries = newEntries + // write the parent directory back + dirBytes := parentDir.toBytes(fs.superblock.blockSize, directoryChecksumAppender(fs.superblock.checksumSeed, parentDir.inode, 0)) + parentInode, err := fs.readInode(parentDir.inode) + if err != nil { + return fmt.Errorf("could not read inode %d for %s: %v", entry.inode, path.Base(p), err) + } + extents, err = parentInode.extents.blocks(fs) + if err != nil { + return fmt.Errorf("could not read extents for inode %d for %s: %v", entry.inode, path.Base(p), err) + } + for _, e := range extents { + for i := 0; i < int(e.count); i++ { + b := dirBytes[i:fs.superblock.blockSize] + if _, err := fs.file.WriteAt(b, (int64(i)+int64(e.startingBlock))*int64(fs.superblock.blockSize)); err != nil { + return fmt.Errorf("could not write inode bitmap back to disk: %v", err) + } + } + } + + // remove the inode from the bitmap and write the inode bitmap back + // inode is absolute, but bitmap is relative to block group + inodeInBG := int(entry.inode) - int(fs.superblock.inodesPerGroup)*inodeBG + if err := inodeBitmap.Clear(inodeInBG); err != nil { + return fmt.Errorf("could not clear inode bitmap for inode %d: %v", entry.inode, err) + } + + // write the inode bitmap back + if err := fs.writeInodeBitmap(inodeBitmap, inodeBG); err != nil { + return fmt.Errorf("could not write inode bitmap back to disk: %v", err) + } + // update the group descriptor + gd := fs.groupDescriptors.descriptors[inodeBG] + + // update the group descriptor inodes and blocks + gd.freeInodes++ + gd.freeBlocks += uint32(removedInode.blocks) + // write the group descriptor back + gdBytes := gd.toBytes(fs.superblock.gdtChecksumType(), fs.superblock.uuid.ID()) + gdtBlock := 1 + if fs.superblock.blockSize == 1024 { + gdtBlock = 2 + } + if _, err := fs.file.WriteAt(gdBytes, fs.start+int64(gdtBlock)*int64(fs.superblock.blockSize)+int64(gd.number)*int64(fs.superblock.groupDescriptorSize)); err != nil { + return fmt.Errorf("could not write Group Descriptor bytes to file: %v", err) + } + + // we could remove the inode from the inode table in the group descriptor, + // but we do not need to do so. Since we are not reusing the inode, we can just leave it there, + // the bitmap always is checked before reusing an inode location. + fs.superblock.freeInodes++ + fs.superblock.freeBlocks += removedInode.blocks + return fs.writeSuperblock() +} + +func (fs *FileSystem) Truncate(p string, size int64) error { + _, entry, err := fs.getEntryAndParent(p) + if err != nil { + return err + } + if entry == nil { + return fmt.Errorf("file does not exist: %s", p) + } + if entry.fileType == dirFileTypeDirectory { + return fmt.Errorf("cannot truncate directory %s", p) + } + // it is not a directory, and it exists, so truncate it + inode, err := fs.readInode(entry.inode) + if err != nil { + return fmt.Errorf("could not read inode %d in directory: %v", entry.inode, err) + } + // change the file size + inode.size = uint64(size) + + // free used blocks if shrank, or reserve new blocks if grew + // both of which mean updating the superblock, and the extents tree in the inode + + // write the inode back + return fs.writeInode(inode) +} + +// getEntryAndParent given a path, get the Directory for the parent and the directory entry for the file. +// If the directory does not exist, returns an error. +// If the file does not exist, does not return an error, but rather returns a nil entry. +func (fs *FileSystem) getEntryAndParent(p string) (parent *Directory, entry *directoryEntry, err error) { + dir := path.Dir(p) + filename := path.Base(p) + // get the directory entries + parentDir, err := fs.readDirWithMkdir(dir, false) + if err != nil { + return nil, nil, fmt.Errorf("could not read directory entries for %s", dir) + } + // we now know that the directory exists, see if the file exists + var targetEntry *directoryEntry + if parentDir.root && filename == "/" { + // root directory + return parentDir, &parentDir.directoryEntry, nil + } + + for _, e := range parentDir.entries { + if e.filename != filename { + continue + } + // if we got this far, we have found the file + targetEntry = e + break + } + return parentDir, targetEntry, nil +} + +// Stat return fs.FileInfo about a specific file path. +func (fs *FileSystem) Stat(p string) (iofs.FileInfo, error) { + _, entry, err := fs.getEntryAndParent(p) + if err != nil { + return nil, err + } + if entry == nil { + return nil, fmt.Errorf("file does not exist: %s", p) + } + in, err := fs.readInode(entry.inode) + if err != nil { + return nil, fmt.Errorf("could not read inode %d in directory: %v", entry.inode, err) + } + return &FileInfo{ + modTime: in.modifyTime, + name: entry.filename, + size: int64(in.size), + isDir: entry.fileType == dirFileTypeDirectory, + }, nil +} + // SetLabel changes the label on the writable filesystem. Different file system may hav different // length constraints. -// -//nolint:revive // will use params when read-write func (fs *FileSystem) SetLabel(label string) error { - return errors.New("cannot set label, filesystem currently read-only") + fs.superblock.volumeLabel = label + return fs.writeSuperblock() } // readInode read a single inode from disk @@ -858,7 +1053,7 @@ func (fs *FileSystem) readInode(inodeNumber uint32) (*inode, error) { if err != nil { return nil, fmt.Errorf("could not read extent tree for symlink inode %d: %v", inodeNumber, err) } - b, err := fs.readFileBytes(extents) + b, err := fs.readFileBytes(extents, inode.size) if err != nil { return nil, fmt.Errorf("could not read symlink target for inode %d: %v", inodeNumber, err) } @@ -881,10 +1076,13 @@ func (fs *FileSystem) writeInode(i *inode) error { // byteStart := inodeTableBlock * sb.blockSize // offsetInode is how many inodes in our inode is offsetInode := (i.number - 1) % inodesPerGroup + byteStart := inodeTableBlock * uint64(sb.blockSize) + // offsetInode is how many inodes in our inode is + // offset is how many bytes in our inode is // offset is how many bytes in our inode is offset := int64(offsetInode) * int64(inodeSize) inodeBytes := i.toBytes(sb) - wrote, err := fs.file.WriteAt(inodeBytes, offset) + wrote, err := fs.file.WriteAt(inodeBytes, int64(byteStart)+offset) if err != nil { return fmt.Errorf("failed to write inode %d at offset %d of block %d from block group %d: %v", i.number, offset, inodeTableBlock, bg, err) } @@ -907,7 +1105,7 @@ func (fs *FileSystem) readDirectory(inodeNumber uint32) ([]*directoryEntry, erro return nil, fmt.Errorf("unable to get blocks for inode %d: %w", in.number, err) } // read the contents of the file across all blocks - b, err := fs.readFileBytes(extents) + b, err := fs.readFileBytes(extents, in.size) if err != nil { return nil, fmt.Errorf("error reading file bytes for inode %d: %v", inodeNumber, err) } @@ -936,12 +1134,15 @@ func (fs *FileSystem) readDirectory(inodeNumber uint32) ([]*directoryEntry, erro // readFileBytes read all of the bytes for an individual file pointed at by a given inode // normally not very useful, but helpful when reading an entire directory. -func (fs *FileSystem) readFileBytes(extents extents) ([]byte, error) { +func (fs *FileSystem) readFileBytes(extents extents, filesize uint64) ([]byte, error) { // walk through each one, gobbling up the bytes b := make([]byte, 0, fs.superblock.blockSize) for i, e := range extents { start := e.startingBlock * uint64(fs.superblock.blockSize) count := uint64(e.count) * uint64(fs.superblock.blockSize) + if uint64(len(b))+count > filesize { + count = filesize - uint64(len(b)) + } b2 := make([]byte, count) read, err := fs.file.ReadAt(b2, int64(start)) if err != nil { @@ -951,25 +1152,23 @@ func (fs *FileSystem) readFileBytes(extents extents) ([]byte, error) { return nil, fmt.Errorf("read %d bytes instead of %d for extent %d", read, count, i) } b = append(b, b2...) + if uint64(len(b)) >= filesize { + break + } } return b, nil } -//nolint:revive // params are unused because this still is read-only, but it will be read-write at some point -func (fs *FileSystem) writeDirectoryEntries(dir *Directory) error { - return errors.New("unsupported write directory entries, currently read-only") -} - -// make a file -// -//nolint:revive // params are unused because this still is read-only, but it will be read-write at some point +// mkFile make a file with a given name in the given directory. func (fs *FileSystem) mkFile(parent *Directory, name string) (*directoryEntry, error) { - return nil, errors.New("unsupported to create a file, currently read-only") + return fs.mkDirEntry(parent, name, false) } -// readDirWithMkdir - walks down a directory tree to the last entry -// if it does not exist, it may or may not make it -func (fs *FileSystem) readDirWithMkdir(p string, doMake bool) (*Directory, []*directoryEntry, error) { +// readDirWithMkdir - walks down a directory tree to the last entry in p. +// For example, if p is /a/b/c, it will walk down to c. +// Expects c to be a directory. +// If each step in the tree does not exist, it will either make it if doMake is true, or return an error. +func (fs *FileSystem) readDirWithMkdir(p string, doMake bool) (*Directory, error) { paths := splitPath(p) // walk down the directory tree until all paths have been walked or we cannot find something @@ -981,11 +1180,13 @@ func (fs *FileSystem) readDirWithMkdir(p string, doMake bool) (*Directory, []*di filename: "", fileType: dirFileTypeDirectory, }, + root: true, } entries, err := fs.readDirectory(rootInode) if err != nil { - return nil, nil, fmt.Errorf("failed to read directory %s", "/") + return nil, fmt.Errorf("failed to read directory %s", "/") } + currentDir.entries = entries for i, subp := range paths { // do we have an entry whose name is the same as this name? found := false @@ -994,7 +1195,7 @@ func (fs *FileSystem) readDirWithMkdir(p string, doMake bool) (*Directory, []*di continue } if e.fileType != dirFileTypeDirectory { - return nil, nil, fmt.Errorf("cannot create directory at %s since it is a file", "/"+strings.Join(paths[0:i+1], "/")) + return nil, fmt.Errorf("cannot create directory at %s since it is a file", "/"+strings.Join(paths[0:i+1], "/")) } // the filename matches, and it is a subdirectory, so we can break after saving the directory entry, which contains the inode found = true @@ -1011,29 +1212,26 @@ func (fs *FileSystem) readDirWithMkdir(p string, doMake bool) (*Directory, []*di var subdirEntry *directoryEntry subdirEntry, err = fs.mkSubdir(currentDir, subp) if err != nil { - return nil, nil, fmt.Errorf("failed to create subdirectory %s", "/"+strings.Join(paths[0:i+1], "/")) - } - // write the directory entries to disk - err = fs.writeDirectoryEntries(currentDir) - if err != nil { - return nil, nil, fmt.Errorf("error writing directory entries to disk: %v", err) + return nil, fmt.Errorf("failed to create subdirectory %s", "/"+strings.Join(paths[0:i+1], "/")) } // save where we are to search next currentDir = &Directory{ directoryEntry: *subdirEntry, } } else { - return nil, nil, fmt.Errorf("path %s not found", "/"+strings.Join(paths[0:i+1], "/")) + return nil, fmt.Errorf("path %s not found", "/"+strings.Join(paths[0:i+1], "/")) } } // get all of the entries in this directory entries, err = fs.readDirectory(currentDir.inode) if err != nil { - return nil, nil, fmt.Errorf("failed to read directory %s", "/"+strings.Join(paths[0:i+1], "/")) + return nil, fmt.Errorf("failed to read directory %s", "/"+strings.Join(paths[0:i+1], "/")) } + currentDir.entries = entries } // once we have made it here, looping is done; we have found the final entry - return currentDir, entries, nil + currentDir.entries = entries + return currentDir, nil } // readBlock read a single block from disk @@ -1086,8 +1284,465 @@ func recalculateBlocksize(numblocks, size int64) (sectorsPerBlock int, blocksize } // mkSubdir make a subdirectory of a given name inside the parent -// -//nolint:revive // params are unused because this still is read-only, but it will be read-write at some point +// 1- allocate a single data block for the directory +// 2- create an inode in the inode table pointing to that data block +// 3- mark the inode in the inode bitmap +// 4- mark the data block in the data block bitmap +// 5- create a directory entry in the parent directory data blocks func (fs *FileSystem) mkSubdir(parent *Directory, name string) (*directoryEntry, error) { - return nil, errors.New("mksubdir not yet supported") + return fs.mkDirEntry(parent, name, true) +} + +func (fs *FileSystem) mkDirEntry(parent *Directory, name string, isDir bool) (*directoryEntry, error) { + // still to do: + // - write directory entry in parent + // - write inode to disk + + // create an inode + inodeNumber, err := fs.allocateInode(parent.inode) + if err != nil { + return nil, fmt.Errorf("could not allocate inode for file %s: %w", name, err) + } + // get extents for the file - prefer in the same block group as the inode, if possible + newExtents, err := fs.allocateExtents(1, nil) + if err != nil { + return nil, fmt.Errorf("could not allocate disk space for file %s: %w", name, err) + } + extentTreeParsed, err := extendExtentTree(nil, newExtents, fs, nil) + if err != nil { + return nil, fmt.Errorf("could not convert extents into tree: %w", err) + } + // normally, after getting a tree from extents, you would need to then allocate all of the blocks + // in the extent tree - leafs and intermediate. However, because we are allocating a new directory + // with a single extent, we *know* it can fit in the inode itself (which has a max of 4), so no need + + // create a directory entry for the file + deFileType := dirFileTypeRegular + fileType := fileTypeRegularFile + var contentSize uint64 + if isDir { + deFileType = dirFileTypeDirectory + fileType = fileTypeDirectory + contentSize = uint64(fs.superblock.blockSize) + } + de := directoryEntry{ + inode: inodeNumber, + filename: name, + fileType: deFileType, + } + parent.entries = append(parent.entries, &de) + // write the parent out to disk + bytesPerBlock := fs.superblock.blockSize + parentDirBytes := parent.toBytes(bytesPerBlock, directoryChecksumAppender(fs.superblock.checksumSeed, parent.inode, 0)) + // check if parent has increased in size beyond allocated blocks + parentInode, err := fs.readInode(parent.inode) + if err != nil { + return nil, fmt.Errorf("could not read inode %d of parent directory: %w", parent.inode, err) + } + + // write the directory entry in the parent + // figure out which block it goes into, and possibly rebalance the directory entries hash tree + parentExtents, err := parentInode.extents.blocks(fs) + if err != nil { + return nil, fmt.Errorf("could not read parent extents for directory: %w", err) + } + dirFile := &File{ + inode: parentInode, + directoryEntry: &directoryEntry{ + inode: parent.inode, + filename: name, + fileType: dirFileTypeDirectory, + }, + filesystem: fs, + isReadWrite: true, + isAppend: true, + offset: 0, + extents: parentExtents, + } + wrote, err := dirFile.Write(parentDirBytes) + if err != nil && err != io.EOF { + return nil, fmt.Errorf("unable to write new directory: %w", err) + } + if wrote != len(parentDirBytes) { + return nil, fmt.Errorf("wrote only %d bytes instead of expected %d for new directory", wrote, len(parentDirBytes)) + } + + // write the inode for the new entry out + now := time.Now() + in := inode{ + number: inodeNumber, + permissionsGroup: parentInode.permissionsGroup, + permissionsOwner: parentInode.permissionsOwner, + permissionsOther: parentInode.permissionsOther, + fileType: fileType, + owner: parentInode.owner, + group: parentInode.group, + size: contentSize, + hardLinks: 2, + blocks: newExtents.blockCount(), + flags: &inodeFlags{}, + nfsFileVersion: 0, + version: 0, + inodeSize: parentInode.inodeSize, + deletionTime: 0, + accessTime: now, + changeTime: now, + createTime: now, + modifyTime: now, + extendedAttributeBlock: 0, + project: 0, + extents: extentTreeParsed, + } + // write the inode to disk + if err := fs.writeInode(&in); err != nil { + return nil, fmt.Errorf("could not write inode for new directory: %w", err) + } + // if a directory, put entries for . and .. in the first block for the new directory + if isDir { + initialEntries := []*directoryEntry{ + { + inode: inodeNumber, + filename: ".", + fileType: dirFileTypeDirectory, + }, + { + inode: parent.inode, + filename: "..", + fileType: dirFileTypeDirectory, + }, + } + newDir := Directory{ + directoryEntry: de, + root: false, + entries: initialEntries, + } + dirBytes := newDir.toBytes(fs.superblock.blockSize, directoryChecksumAppender(fs.superblock.checksumSeed, inodeNumber, 0)) + // write the bytes out to disk + dirFile = &File{ + inode: &in, + directoryEntry: &directoryEntry{ + inode: inodeNumber, + filename: name, + fileType: dirFileTypeDirectory, + }, + filesystem: fs, + isReadWrite: true, + isAppend: true, + offset: 0, + extents: *newExtents, + } + wrote, err := dirFile.Write(dirBytes) + if err != nil && err != io.EOF { + return nil, fmt.Errorf("unable to write new directory: %w", err) + } + if wrote != len(dirBytes) { + return nil, fmt.Errorf("wrote only %d bytes instead of expected %d for new entry", wrote, len(dirBytes)) + } + } + + // return + return &de, nil +} + +// allocateInode allocate a single inode +// passed the parent, so it can know where to allocate it +// logic: +// - parent is 0 : root inode, will allocate at 2 +// - parent is 2 : child of root, will try to spread out +// - else : try to collocate with parent, if possible +func (fs *FileSystem) allocateInode(parent uint32) (uint32, error) { + var ( + inodeNumber = -1 + ) + if parent == 0 { + inodeNumber = 2 + } + // load the inode bitmap + var ( + bg int + gd groupDescriptor + ) + + for _, gd = range fs.groupDescriptors.descriptors { + if inodeNumber != -1 { + break + } + bg := int(gd.number) + bm, err := fs.readInodeBitmap(bg) + if err != nil { + return 0, fmt.Errorf("could not read inode bitmap: %w", err) + } + // get first free inode + inodeNumber = bm.FirstFree(0) + // if we found a + if inodeNumber == -1 { + continue + } + // set it as marked + if err := bm.Set(inodeNumber); err != nil { + return 0, fmt.Errorf("could not set inode bitmap: %w", err) + } + // write the inode bitmap bytes + if err := fs.writeInodeBitmap(bm, bg); err != nil { + return 0, fmt.Errorf("could not write inode bitmap: %w", err) + } + } + if inodeNumber == -1 { + return 0, errors.New("no free inodes available") + } + + // reduce number of free inodes in that descriptor in the group descriptor table + gd.freeInodes-- + + // get the group descriptor as bytes + gdBytes := gd.toBytes(fs.superblock.gdtChecksumType(), fs.superblock.uuid.ID()) + + // write the group descriptor bytes + // gdt starts in block 1 of any redundant copies, specifically in BG 0 + gdtBlock := 1 + blockByteLocation := gdtBlock * int(fs.superblock.blockSize) + gdOffset := fs.start + int64(blockByteLocation) + int64(bg)*int64(fs.superblock.groupDescriptorSize) + wrote, err := fs.file.WriteAt(gdBytes, gdOffset) + if err != nil { + return 0, fmt.Errorf("unable to write group descriptor bytes for blockgroup %d: %v", bg, err) + } + if wrote != len(gdBytes) { + return 0, fmt.Errorf("wrote only %d bytes instead of expected %d for group descriptor of block group %d", wrote, len(gdBytes), bg) + } + + return uint32(inodeNumber), nil +} + +// allocateExtents allocate the data blocks in extents that are +// to be used for a file of a given size +// arguments are file size in bytes and existing extents +// if previous is nil, then we are not (re)sizing an existing file but creating a new one +// returns the extents to be used in order +func (fs *FileSystem) allocateExtents(size uint64, previous *extents) (*extents, error) { + // 1- calculate how many blocks are needed + required := size / uint64(fs.superblock.blockSize) + remainder := size % uint64(fs.superblock.blockSize) + if remainder > 0 { + required++ + } + // 2- see how many blocks already are allocated + var allocated uint64 + if previous != nil { + allocated = previous.blockCount() + } + // 3- if needed, allocate new blocks in extents + extraBlockCount := required - allocated + // if we have enough, do not add anything + if extraBlockCount <= 0 { + return previous, nil + } + + // if there are not enough blocks left on the filesystem, return an error + if fs.superblock.freeBlocks < extraBlockCount { + return nil, fmt.Errorf("only %d blocks free, requires additional %d", fs.superblock.freeBlocks, extraBlockCount) + } + + // now we need to look for as many contiguous blocks as possible + // first calculate the minimum number of extents needed + + // if all of the extents, except possibly the last, are maximum size, then we need minExtents extents + // we loop through, trying to allocate an extent as large as our remaining blocks or maxBlocksPerExtent, + // whichever is smaller + blockGroupCount := fs.blockGroups + // TODO: instead of starting with BG 0, should start with BG where the inode for this file/dir is located + var ( + newExtents []extent + datablockBitmaps = map[int]*util.Bitmap{} + blocksPerGroup = fs.superblock.blocksPerGroup + ) + + var i int64 + for i = 0; i < blockGroupCount && allocated < extraBlockCount; i++ { + // keep track if we allocated anything in this blockgroup + // 1- read the GDT for this blockgroup to find the location of the block bitmap + // and total free blocks + // 2- read the block bitmap from disk + // 3- find the maximum contiguous space available + bs, err := fs.readBlockBitmap(int(i)) + if err != nil { + return nil, fmt.Errorf("could not read block bitmap for block group %d: %v", i, err) + } + // now find our unused blocks and how many there are in a row as potential extents + if extraBlockCount > maxUint16 { + return nil, fmt.Errorf("cannot allocate more than %d blocks in a single extent", maxUint16) + } + // get the list of free blocks + blockList := bs.FreeList() + + // create possible extents by size + // Step 3: Group contiguous blocks into extents + var extents []extent + for _, freeBlock := range blockList { + start, length := freeBlock.Position, freeBlock.Count + for length > 0 { + extentLength := min(length, int(maxBlocksPerExtent)) + extents = append(extents, extent{startingBlock: uint64(start) + uint64(i)*uint64(blocksPerGroup), count: uint16(extentLength)}) + start += extentLength + length -= extentLength + } + } + + // sort in descending order + sort.Slice(extents, func(i, j int) bool { + return extents[i].count > extents[j].count + }) + + var allocatedBlocks uint64 + for _, ext := range extents { + if extraBlockCount <= 0 { + break + } + extentToAdd := ext + if uint64(ext.count) >= extraBlockCount { + extentToAdd = extent{startingBlock: ext.startingBlock, count: uint16(extraBlockCount)} + } + newExtents = append(newExtents, extentToAdd) + allocatedBlocks += uint64(extentToAdd.count) + extraBlockCount -= uint64(extentToAdd.count) + // set the marked blocks in the bitmap, and save the bitmap + for block := extentToAdd.startingBlock; block < extentToAdd.startingBlock+uint64(extentToAdd.count); block++ { + // determine what block group this block is in, and read the bitmap for that blockgroup + // the extent lists the absolute block number, but the bitmap is relative to the block group + blockInGroup := block - uint64(i)*uint64(blocksPerGroup) + if err := bs.Set(int(blockInGroup)); err != nil { + return nil, fmt.Errorf("could not clear block bitmap for block %d: %v", i, err) + } + } + + // do *not* write the bitmap back yet, as we do not yet know if we will be able to fulfill the entire request. + // instead save it for later + datablockBitmaps[int(i)] = bs + } + } + if extraBlockCount > 0 { + return nil, fmt.Errorf("could not allocate %d blocks", extraBlockCount) + } + + // write the block bitmaps back to disk + for bg, bs := range datablockBitmaps { + if err := fs.writeBlockBitmap(bs, bg); err != nil { + return nil, fmt.Errorf("could not write block bitmap for block group %d: %v", bg, err) + } + } + + // need to update the total blocks used/free in superblock + fs.superblock.freeBlocks -= allocated + // update the blockBitmapChecksum for any updated block groups in GDT + // write updated superblock and GDT to disk + if err := fs.writeSuperblock(); err != nil { + return nil, fmt.Errorf("could not write superblock: %w", err) + } + // write backup copies + var exten extents = newExtents + return &exten, nil +} + +// readInodeBitmap read the inode bitmap off the disk. +// This would be more efficient if we just read one group descriptor's bitmap +// but for now we are about functionality, not efficiency, so it will read the whole thing. +func (fs *FileSystem) readInodeBitmap(group int) (*util.Bitmap, error) { + if group >= len(fs.groupDescriptors.descriptors) { + return nil, fmt.Errorf("block group %d does not exist", group) + } + gd := fs.groupDescriptors.descriptors[group] + bitmapLocation := gd.inodeBitmapLocation + bitmapByteCount := fs.superblock.inodesPerGroup / 8 + b := make([]byte, bitmapByteCount) + offset := int64(bitmapLocation*uint64(fs.superblock.blockSize) + uint64(fs.start)) + read, err := fs.file.ReadAt(b, offset) + if err != nil { + return nil, fmt.Errorf("unable to read inode bitmap for blockgroup %d: %w", gd.number, err) + } + if read != int(bitmapByteCount) { + return nil, fmt.Errorf("Read %d bytes instead of expected %d for inode bitmap of block group %d", read, bitmapByteCount, gd.number) + } + // only take bytes corresponding to the number of inodes per group + + // create a bitmap + bs := util.NewBitmap(int(fs.superblock.blockSize) * len(fs.groupDescriptors.descriptors)) + bs.FromBytes(b) + return bs, nil +} + +// writeInodeBitmap write the inode bitmap to the disk. +func (fs *FileSystem) writeInodeBitmap(bm *util.Bitmap, group int) error { + if group >= len(fs.groupDescriptors.descriptors) { + return fmt.Errorf("block group %d does not exist", group) + } + b := bm.ToBytes() + gd := fs.groupDescriptors.descriptors[group] + bitmapByteCount := fs.superblock.inodesPerGroup / 8 + bitmapLocation := gd.inodeBitmapLocation + offset := int64(bitmapLocation*uint64(fs.superblock.blockSize) + uint64(fs.start)) + wrote, err := fs.file.WriteAt(b, offset) + if err != nil { + return fmt.Errorf("unable to write inode bitmap for blockgroup %d: %w", gd.number, err) + } + if wrote != int(bitmapByteCount) { + return fmt.Errorf("wrote %d bytes instead of expected %d for inode bitmap of block group %d", wrote, bitmapByteCount, gd.number) + } + + return nil +} + +func (fs *FileSystem) readBlockBitmap(group int) (*util.Bitmap, error) { + if group >= len(fs.groupDescriptors.descriptors) { + return nil, fmt.Errorf("block group %d does not exist", group) + } + gd := fs.groupDescriptors.descriptors[group] + bitmapLocation := gd.blockBitmapLocation + b := make([]byte, fs.superblock.blockSize) + offset := int64(bitmapLocation*uint64(fs.superblock.blockSize) + uint64(fs.start)) + read, err := fs.file.ReadAt(b, offset) + if err != nil { + return nil, fmt.Errorf("unable to read block bitmap for blockgroup %d: %w", gd.number, err) + } + if read != int(fs.superblock.blockSize) { + return nil, fmt.Errorf("Read %d bytes instead of expected %d for block bitmap of block group %d", read, fs.superblock.blockSize, gd.number) + } + // create a bitmap + bs := util.NewBitmap(int(fs.superblock.blockSize) * len(fs.groupDescriptors.descriptors)) + bs.FromBytes(b) + return bs, nil +} + +// writeBlockBitmap write the inode bitmap to the disk. +func (fs *FileSystem) writeBlockBitmap(bm *util.Bitmap, group int) error { + if group >= len(fs.groupDescriptors.descriptors) { + return fmt.Errorf("block group %d does not exist", group) + } + b := bm.ToBytes() + gd := fs.groupDescriptors.descriptors[group] + bitmapLocation := gd.blockBitmapLocation + offset := int64(bitmapLocation*uint64(fs.superblock.blockSize) + uint64(fs.start)) + wrote, err := fs.file.WriteAt(b, offset) + if err != nil { + return fmt.Errorf("unable to write block bitmap for blockgroup %d: %w", gd.number, err) + } + if wrote != int(fs.superblock.blockSize) { + return fmt.Errorf("wrote %d bytes instead of expected %d for block bitmap of block group %d", wrote, fs.superblock.blockSize, gd.number) + } + + return nil +} + +func (fs *FileSystem) writeSuperblock() error { + superblockBytes, err := fs.superblock.toBytes() + if err != nil { + return fmt.Errorf("could not convert superblock to bytes: %v", err) + } + _, err = fs.file.WriteAt(superblockBytes, fs.start+int64(BootSectorSize)) + return err +} + +func blockGroupForInode(inodeNumber int, inodesPerGroup uint32) int { + return (inodeNumber - 1) / int(inodesPerGroup) +} +func blockGroupForBlock(blockNumber int, blocksPerGroup uint32) int { + return (blockNumber - 1) / int(blocksPerGroup) } diff --git a/filesystem/ext4/ext4.md b/filesystem/ext4/ext4.md index 05f6229..0cfa719 100644 --- a/filesystem/ext4/ext4.md +++ b/filesystem/ext4/ext4.md @@ -1,7 +1,7 @@ # ext4 This file describes the layout on disk of ext4. It is a living document and probably will be deleted rather than committed to git. -The primary reference document is [here](https://ext4.wiki.kernel.org/index.php/Ext4_Disk_Layout#Overview). +The primary reference document is [here](https://ext4.wiki.kernel.org/index.php/Ext4_Disk_Layout). Also useful are: @@ -276,8 +276,20 @@ When removing an entry, you only ever need to rebalance the node from which you 1. Move the median up to the parent node 1. If necessary, rebalance the parent node -### Convert Classical Linear to Hash Tree +### Convert Classical Linear Directory Entries to Hash Tree +The conversion usually happens when a single entry will exceed the capacity of a single block. + +1. Switch the flag in the inode to hash-tree +1. Calculate the hash of each entry +1. Create 2 new blocks: + * 1 for the bottom half of the entries + * 1 for the top half of the entries +1. Move the bottom half of the entries into the bottom block +1. Move the top half of the entries into the top block +1. Zero out the current single file block, which previously had the classic linear directory entries +1. Write the header into the tree block, with the 0-hash-value pointing to the bottom block +1. Write one entry after the header, for the lowest hash value of the upper block, pointing to the upper block ### Read File Contents @@ -287,4 +299,37 @@ When removing an entry, you only ever need to rebalance the node from which you ### Create File +1. Walk the tree until you find the inode for the parent directory. +1. Find a free inode using the inode bitmap. +1. Find a free block using the block bitmap. +1. Create the inode for the new file in the inode table. Be sure to update all the dependencies: + * inode bitmap + * inode table + * inode count in the block group table + * inode count in the superblock +1. Reserve a data block for the new file in the block group table. Be sure to update all the dependencies: + * block bitmap + * block count in the block group table + * block count in the superblock +1. Create the file entry in the parent directory. Depends on if this is classic linear directory or hash tree directory. Note that if it is classic linear, calculate the new size before writing the entry. If it is bigger than a single block, convert to hash tree. TODO: is this the right boundary, single block? + * Classic linear directory: + 1. Find the last block in the parent directory "file" + 1. Add a classical linear directory entry at the end of it + 1. Update the inode for the parent directory with the new file size + * Hash tree directory: + 1. Calculate the hash of the new directory entry name + 1. Determine which block in the parent directory "file" the new entry should live, based on the hash table + 1. Find the block + 1. Add a classical linear entry at the end of it + 1. Update the inode for the parent directory with the new file size + + ### Write File Contents + +1. Walk the tree until you find the inode for the file you want. +1. Find the data blocks for that inode, see [inode to data blocks](#inode-to-data-blocks). +1. Write the data to the data blocks. +1. If the data written exceeds the end of the last block, reserve a new block, update the inode extent tree, and write the data to the new block. +1. Update the inode with the filesize +1. Update the block group table with the used blocks +1. Update the superblock with the used blocks diff --git a/filesystem/ext4/ext4_test.go b/filesystem/ext4/ext4_test.go index 9f75245..b9d72c7 100644 --- a/filesystem/ext4/ext4_test.go +++ b/filesystem/ext4/ext4_test.go @@ -7,6 +7,8 @@ import ( "fmt" "io" "os" + "path" + "path/filepath" "slices" "strings" "testing" @@ -146,3 +148,271 @@ func TestReadFile(t *testing.T) { }) } } + +// copy infile to outfile +func testCopyFile(infile, outfile string) error { + in, err := os.Open(infile) + if err != nil { + return fmt.Errorf("Error opening input file: %w", err) + } + defer in.Close() + out, err := os.Create(outfile) + if err != nil { + return fmt.Errorf("Error opening output file: %w", err) + } + defer out.Close() + if _, err := io.Copy(out, in); err != nil { + return fmt.Errorf("Error copying file contents: %w", err) + } + return nil +} + +// creates a copy of the ready-to-run ext4 img file, so we can manipulate it as desired +// without affecting the original +func testCreateImgCopy(t *testing.T) string { + t.Helper() + dir := t.TempDir() + outfile := filepath.Join(dir, path.Base(imgFile)) + if err := testCopyFile(imgFile, outfile); err != nil { + t.Fatalf("Error copying image file: %v", err) + } + return outfile +} + +func TestWriteFile(t *testing.T) { + var newFile = "newlygeneratedfile.dat" + tests := []struct { + name string + path string + flag int + offset int64 + size int + readAll bool + expected []byte + err error + }{ + {"create invalid path", "/do/not/exist/any/where", os.O_CREATE, 0, 0, false, nil, errors.New("could not read directory entries")}, + {"create in root", "/" + newFile, os.O_CREATE | os.O_RDWR, 0, 0, false, []byte("hello world"), nil}, + {"create in valid subdirectory", "/foo/" + newFile, os.O_CREATE | os.O_RDWR, 0, 0, false, []byte("hello world"), nil}, + {"create exists as directory", "/foo", os.O_CREATE, 0, 0, false, nil, errors.New("cannot open directory /foo as file")}, + {"create exists as file", "/random.dat", os.O_CREATE | os.O_RDWR, 0, 0, false, nil, nil}, + {"append invalid path", "/do/not/exist/any/where", os.O_APPEND, 0, 0, false, nil, errors.New("could not read directory entries")}, + {"append exists as directory", "/foo", os.O_APPEND, 0, 0, false, nil, errors.New("cannot open directory /foo as file")}, + {"append exists as file", "/random.dat", os.O_APPEND | os.O_RDWR, 0, 0, false, nil, nil}, + {"overwrite invalid path", "/do/not/exist/any/where", os.O_RDWR, 0, 0, false, nil, errors.New("could not read directory entries")}, + {"overwrite exists as directory", "/foo", os.O_RDWR, 0, 0, false, nil, errors.New("cannot open directory /foo as file")}, + {"overwrite exists as file", "/random.dat", os.O_RDWR, 0, 0, false, nil, nil}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + outfile := testCreateImgCopy(t) + f, err := os.OpenFile(outfile, os.O_RDWR, 0) + if err != nil { + t.Fatalf("Error opening test image: %v", err) + } + defer f.Close() + fs, err := Read(f, 100*MB, 0, 512) + if err != nil { + t.Fatalf("Error reading filesystem: %v", err) + } + file, err := fs.OpenFile(tt.path, tt.flag) + switch { + case err != nil && tt.err == nil: + t.Fatalf("unexpected error opening file: %v", err) + case err == nil && tt.err != nil: + t.Fatalf("missing expected error opening file: %v", tt.err) + case err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error()): + t.Fatalf("mismatched error opening file, expected '%v' got '%v'", tt.err, err) + case err == nil: + if _, err := file.Seek(tt.offset, io.SeekStart); err != nil { + t.Fatalf("Error seeking file for write: %v", err) + } + n, err := file.Write(tt.expected) + if err != nil && err != io.EOF { + t.Fatalf("Error writing file: %v", err) + } + if n != len(tt.expected) { + t.Fatalf("short write, expected %d bytes got %d", len(tt.expected), n) + } + // now read from the file and see that it matches what we wrote + if _, err := file.Seek(tt.offset, io.SeekStart); err != nil { + t.Fatalf("Error seeking file for read: %v", err) + } + b := make([]byte, len(tt.expected)) + n, err = file.Read(b) + if err != nil && err != io.EOF { + t.Fatalf("Error reading file: %v", err) + } + if n != len(tt.expected) { + t.Fatalf("short read, expected %d bytes got %d", len(tt.expected), n) + } + if !bytes.Equal(b, tt.expected) { + t.Errorf("file data mismatch") + } + } + }) + } +} + +func TestRm(t *testing.T) { + tests := []struct { + name string + path string + err error + }{ + {"invalid path", "/do/not/exist/any/where", errors.New("could not read directory entries")}, + {"root dir", "/", errors.New("cannot remove root directory")}, + {"root file", "/random.dat", nil}, + {"subdir file", "/foo/subdirfile.txt", nil}, + {"nonexistent file", "/foo/nonexistent.dat", errors.New("file does not exist")}, + {"non-empty dir", "/foo", errors.New("directory not empty")}, + {"empty dir", "/foo/dir1", nil}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + outfile := testCreateImgCopy(t) + f, err := os.OpenFile(outfile, os.O_RDWR, 0) + if err != nil { + t.Fatalf("Error opening test image: %v", err) + } + defer f.Close() + fs, err := Read(f, 100*MB, 0, 512) + if err != nil { + t.Fatalf("Error reading filesystem: %v", err) + } + err = fs.Rm(tt.path) + switch { + case err != nil && tt.err == nil: + t.Fatalf("unexpected error removing file: %v", err) + case err == nil && tt.err != nil: + t.Fatalf("missing expected error removing file: %v", tt.err) + case err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error()): + t.Fatalf("mismatched error removing file, expected '%v' got '%v'", tt.err, err) + case err == nil: + // make sure the file no longer exists + _, err := fs.OpenFile(tt.path, 0) + if err == nil { + t.Fatalf("expected error opening file after removal") + } + } + }) + } +} + +func TestTruncateFile(t *testing.T) { + tests := []struct { + name string + path string + exists bool // if the path is supposed to exist before or not + err error + }{ + {"invalid path", "/do/not/exist/any/where", false, errors.New("could not read directory entries")}, + {"root dir", "/", true, errors.New("cannot truncate directory")}, + {"sub dir", "/foo", true, errors.New("cannot truncate directory")}, + {"valid file", "/random.dat", true, nil}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + outfile := testCreateImgCopy(t) + f, err := os.OpenFile(outfile, os.O_RDWR, 0) + if err != nil { + t.Fatalf("Error opening test image: %v", err) + } + defer f.Close() + fs, err := Read(f, 100*MB, 0, 512) + if err != nil { + t.Fatalf("Error reading filesystem: %v", err) + } + // get the original size of the file + var origSize int64 + if tt.exists { + fi, err := fs.Stat(tt.path) + if err != nil { + t.Fatalf("Error getting file info before truncate: %v", err) + } + origSize = fi.Size() + } + + // truncate the file to a random number of bytes + targetSize := int64(1000) + if origSize == targetSize { + targetSize = 2000 + } + err = fs.Truncate(tt.path, targetSize) + switch { + case err != nil && tt.err == nil: + t.Fatalf("unexpected error truncating file: %v", err) + case err == nil && tt.err != nil: + t.Fatalf("missing expected error truncating file: %v", tt.err) + case err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error()): + t.Fatalf("mismatched error truncating file, expected '%v' got '%v'", tt.err, err) + case err == nil: + // make sure the file size is now the target size + fi, err := fs.Stat(tt.path) + if err != nil { + t.Fatalf("Error getting file info after truncate: %v", err) + } + if fi.Size() != targetSize { + t.Errorf("expected file size to be %d, got %d", targetSize, fi.Size()) + } + } + }) + } +} + +func TestMkdir(t *testing.T) { + tests := []struct { + name string + path string + err error + }{ + {"parent exists", "/foo/bar", nil}, + {"parent does not exist", "/baz/bar", nil}, + {"parent is file", "/random.dat/bar", errors.New("cannot create directory at")}, + {"path exists", "/foo", nil}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + outfile := testCreateImgCopy(t) + f, err := os.OpenFile(outfile, os.O_RDWR, 0) + if err != nil { + t.Fatalf("Error opening test image: %v", err) + } + defer f.Close() + fs, err := Read(f, 100*MB, 0, 512) + if err != nil { + t.Fatalf("Error reading filesystem: %v", err) + } + err = fs.Mkdir(tt.path) + switch { + case err != nil && tt.err == nil: + t.Fatalf("unexpected error creating directory: %v", err) + case err == nil && tt.err != nil: + t.Fatalf("missing expected error creating directory: %v", tt.err) + case err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error()): + t.Fatalf("mismatched error creating directory, expected '%v' got '%v'", tt.err, err) + case err == nil: + // make sure the directory exists + entries, err := fs.ReadDir(tt.path) + if err != nil { + t.Fatalf("Error reading directory: %v", err) + } + if len(entries) < 2 { + t.Fatalf("expected at least 2 entries in directory, for . and .. , got %d", len(entries)) + } + if entries[0].Name() != "." { + t.Errorf("expected . entry in directory") + } + if entries[1].Name() != ".." { + t.Errorf("expected .. entry in directory") + } + if !entries[0].IsDir() { + t.Errorf("expected . entry to be a directory") + } + if !entries[1].IsDir() { + t.Errorf("expected .. entry to be a directory") + } + } + }) + } +} diff --git a/filesystem/ext4/extent.go b/filesystem/ext4/extent.go index 33d8cff..e5d456e 100644 --- a/filesystem/ext4/extent.go +++ b/filesystem/ext4/extent.go @@ -3,6 +3,7 @@ package ext4 import ( "encoding/binary" "fmt" + "sort" ) const ( @@ -38,10 +39,10 @@ func (e *extent) equal(a *extent) bool { return *e == *a } -// blocks how many blocks are covered in the extents +// blockCount how many blocks are covered in the extents // //nolint:unused // useful function for future -func (e extents) blocks() uint64 { +func (e extents) blockCount() uint64 { var count uint64 for _, ext := range e { count += uint64(ext.count) @@ -62,6 +63,11 @@ type extentBlockFinder interface { blocks(fs *FileSystem) (extents, error) // toBytes convert this extentBlockFinder to bytes to be stored in a block or inode toBytes() []byte + getDepth() uint16 + getMax() uint16 + getBlockSize() uint32 + getFileBlock() uint32 + getCount() uint32 } var ( @@ -139,7 +145,7 @@ func (e extentLeafNode) findBlocks(start, count uint64, _ *FileSystem) ([]uint64 // blocks find the actual blocks for a range in the file. leaf nodes already have all of the data inside, // so the FileSystem reference is unused. func (e extentLeafNode) blocks(_ *FileSystem) (extents, error) { - return e.extents[:], nil + return e.extents, nil } // toBytes convert the node to raw bytes to be stored, either in a block or in an inode @@ -160,6 +166,26 @@ func (e extentLeafNode) toBytes() []byte { return b } +func (e *extentLeafNode) getDepth() uint16 { + return e.depth +} + +func (e *extentLeafNode) getMax() uint16 { + return e.max +} + +func (e *extentLeafNode) getBlockSize() uint32 { + return e.blockSize +} + +func (e *extentLeafNode) getFileBlock() uint32 { + return e.extents[0].fileBlock +} + +func (e *extentLeafNode) getCount() uint32 { + return uint32(len(e.extents)) +} + // extentInternalNode represents an internal node in a tree of extents // it includes the information in the header and the internal nodes // By definition, this is an internal node, so depth>0 @@ -251,6 +277,25 @@ func (e extentInternalNode) toBytes() []byte { } return b } +func (e *extentInternalNode) getDepth() uint16 { + return e.depth +} + +func (e *extentInternalNode) getMax() uint16 { + return e.max +} + +func (e *extentInternalNode) getBlockSize() uint32 { + return e.blockSize +} + +func (e *extentInternalNode) getFileBlock() uint32 { + return e.children[0].fileBlock +} + +func (e *extentInternalNode) getCount() uint32 { + return uint32(len(e.children)) +} // parseExtents takes bytes, parses them to find the actual extents or the next blocks down. // It does not recurse down the tree, as we do not want to do that until we actually are ready @@ -278,7 +323,9 @@ func parseExtents(b []byte, blocksize, start, count uint32) (extentBlockFinder, // we have parsed the header, now read either the leaf entries or the intermediate nodes switch e.depth { case 0: - var leafNode extentLeafNode + leafNode := extentLeafNode{ + extentNodeHeader: e, + } // read the leaves for i := 0; i < int(e.entries); i++ { start := i*extentTreeEntryLength + extentTreeHeaderLength @@ -291,11 +338,11 @@ func parseExtents(b []byte, blocksize, start, count uint32) (extentBlockFinder, startingBlock: binary.LittleEndian.Uint64(diskBlock), }) } - ret = leafNode + ret = &leafNode default: - var ( - internalNode extentInternalNode - ) + internalNode := extentInternalNode{ + extentNodeHeader: e, + } for i := 0; i < int(e.entries); i++ { start := i*extentTreeEntryLength + extentTreeHeaderLength diskBlock := make([]byte, 8) @@ -313,8 +360,374 @@ func parseExtents(b []byte, blocksize, start, count uint32) (extentBlockFinder, if len(internalNode.children) > 0 { internalNode.children[len(internalNode.children)-1].count = start + count - internalNode.children[len(internalNode.children)-1].fileBlock } - ret = internalNode + ret = &internalNode } return ret, nil } + +// extendExtentTree extends extent tree with a slice of new extents +// if the existing tree is nil, create a new one. +// For example, if the input is an extent tree - like the kind found in an inode - and you want to add more extents to it, +// you add the provided extents, and it expands the tree, including creating new internal nodes and writing them to disk, as needed. + +func extendExtentTree(existing extentBlockFinder, added *extents, fs *FileSystem, parent *extentInternalNode) (extentBlockFinder, error) { + // Check if existing is a leaf or internal node + switch node := existing.(type) { + case *extentLeafNode: + return extendLeafNode(node, added, fs, parent) + case *extentInternalNode: + return extendInternalNode(node, added, fs, parent) + case nil: + // brand new extent tree. The root is in the inode, which has a max of 4 extents. + return createRootExtentTree(added, fs) + default: + return nil, fmt.Errorf("unsupported extentBlockFinder type") + } +} + +func createRootExtentTree(added *extents, fs *FileSystem) (extentBlockFinder, error) { + // the root always is in the inode, which has a maximum of 4 extents. If it fits within that, we can just create a leaf node. + if len(*added) <= 4 { + return &extentLeafNode{ + extentNodeHeader: extentNodeHeader{ + depth: 0, + entries: uint16(len(*added)), + max: 4, + blockSize: fs.superblock.blockSize, + }, + extents: *added, + }, nil + } + // in theory, we never should be creating a root internal node. We always should be starting with an extent or two, + // and later expanding the file. + // It might be theoretically possible, though, so we will handle it in the future. + return nil, fmt.Errorf("cannot create root internal node") +} + +func extendLeafNode(node *extentLeafNode, added *extents, fs *FileSystem, parent *extentInternalNode) (extentBlockFinder, error) { + // Check if the leaf node has enough space for the added extents + if len(node.extents)+len(*added) <= int(node.max) { + // Simply append the extents if there's enough space + node.extents = append(node.extents, *added...) + node.entries = uint16(len(node.extents)) + + // Write the updated node back to the disk + err := writeNodeToDisk(node, fs, parent) + if err != nil { + return nil, err + } + + return node, nil + } + + // If not enough space, split the node + newNodes, err := splitLeafNode(node, added, fs, parent) + if err != nil { + return nil, err + } + + // Check if the original node was the root + if parent == nil { + // Create a new internal node to reference the split leaf nodes + var newNodesAsBlockFinder []extentBlockFinder + for _, n := range newNodes { + newNodesAsBlockFinder = append(newNodesAsBlockFinder, n) + } + newRoot := createInternalNode(newNodesAsBlockFinder, nil, fs) + return newRoot, nil + } + + // If the original node was not the root, handle the parent internal node + parentNode, err := getParentNode(node, fs) + if err != nil { + return nil, err + } + + return extendInternalNode(parentNode, added, fs, parent) +} + +func splitLeafNode(node *extentLeafNode, added *extents, fs *FileSystem, parent *extentInternalNode) ([]*extentLeafNode, error) { + // Combine existing and new extents + allExtents := node.extents + allExtents = append(allExtents, *added...) + // Sort extents by fileBlock to maintain order + sort.Slice(allExtents, func(i, j int) bool { + return allExtents[i].fileBlock < allExtents[j].fileBlock + }) + + // Calculate the midpoint to split the extents + mid := len(allExtents) / 2 + + // Create the first new leaf node + firstLeaf := &extentLeafNode{ + extentNodeHeader: extentNodeHeader{ + depth: 0, + entries: uint16(mid), + max: node.max, + blockSize: node.blockSize, + }, + extents: allExtents[:mid], + } + + // Create the second new leaf node + secondLeaf := &extentLeafNode{ + extentNodeHeader: extentNodeHeader{ + depth: 0, + entries: uint16(len(allExtents) - mid), + max: node.max, + blockSize: node.blockSize, + }, + extents: allExtents[mid:], + } + + // Write new leaf nodes to the disk + err := writeNodeToDisk(firstLeaf, fs, parent) + if err != nil { + return nil, err + } + err = writeNodeToDisk(secondLeaf, fs, parent) + if err != nil { + return nil, err + } + + return []*extentLeafNode{firstLeaf, secondLeaf}, nil +} + +func createInternalNode(nodes []extentBlockFinder, parent *extentInternalNode, fs *FileSystem) *extentInternalNode { + internalNode := &extentInternalNode{ + extentNodeHeader: extentNodeHeader{ + depth: nodes[0].getDepth() + 1, // Depth is 1 more than the children + entries: uint16(len(nodes)), + max: nodes[0].getMax(), // Assuming uniform max for all nodes + blockSize: nodes[0].getBlockSize(), + }, + children: make([]*extentChildPtr, len(nodes)), + } + + for i, node := range nodes { + internalNode.children[i] = &extentChildPtr{ + fileBlock: node.getFileBlock(), + count: node.getCount(), + diskBlock: getBlockNumberFromNode(node, parent), + } + } + + // Write the new internal node to the disk + err := writeNodeToDisk(internalNode, fs, parent) + if err != nil { + return nil + } + + return internalNode +} + +func getBlockNumberFromNode(node extentBlockFinder, parent *extentInternalNode) uint64 { + for _, childPtr := range parent.children { + if childPtrMatchesNode(childPtr, node) { + return childPtr.diskBlock + } + } + return 0 // Return 0 or an appropriate error value if the block number is not found +} + +// Helper function to match a child pointer to a node +func childPtrMatchesNode(childPtr *extentChildPtr, node extentBlockFinder) bool { + switch n := node.(type) { + case *extentLeafNode: + return childPtr.fileBlock == n.extents[0].fileBlock + case *extentInternalNode: + // Logic to determine if the childPtr matches the internal node + // Placeholder: Implement based on your specific matching criteria + return true + default: + return false + } +} + +func extendInternalNode(node *extentInternalNode, added *extents, fs *FileSystem, parent *extentInternalNode) (extentBlockFinder, error) { + // Find the appropriate child node to extend + childIndex := findChildNode(node, added) + childPtr := node.children[childIndex] + + // Load the actual child node from the disk + childNode, err := loadChildNode(childPtr, fs) + if err != nil { + return nil, err + } + + // Recursively extend the child node + updatedChild, err := extendExtentTree(childNode, added, fs, node) + if err != nil { + return nil, err + } + + // Update the current internal node to reference the updated child + switch updatedChild := updatedChild.(type) { + case *extentLeafNode: + node.children[childIndex] = &extentChildPtr{ + fileBlock: updatedChild.extents[0].fileBlock, + count: uint32(len(updatedChild.extents)), + diskBlock: getBlockNumberFromNode(updatedChild, node), + } + case *extentInternalNode: + node.children[childIndex] = &extentChildPtr{ + fileBlock: updatedChild.children[0].fileBlock, + count: uint32(len(updatedChild.children)), + diskBlock: getBlockNumberFromNode(updatedChild, node), + } + default: + return nil, fmt.Errorf("unsupported updatedChild type") + } + + // Check if the internal node is at capacity + if len(node.children) > int(node.max) { + // Split the internal node if it's at capacity + newInternalNodes, err := splitInternalNode(node, node.children[childIndex], fs, parent) + if err != nil { + return nil, err + } + + // Check if the original node was the root + if parent == nil { + // Create a new internal node as the new root + var newNodesAsBlockFinder []extentBlockFinder + for _, n := range newInternalNodes { + newNodesAsBlockFinder = append(newNodesAsBlockFinder, n) + } + newRoot := createInternalNode(newNodesAsBlockFinder, nil, fs) + return newRoot, nil + } + + // If the original node was not the root, handle the parent internal node + return extendInternalNode(parent, added, fs, parent) + } + + // Write the updated node back to the disk + err = writeNodeToDisk(node, fs, parent) + if err != nil { + return nil, err + } + + return node, nil +} + +// Helper function to get the parent node of a given internal node +// +//nolint:revive // this parameter will be used eventually +func getParentNode(node extentBlockFinder, fs *FileSystem) (*extentInternalNode, error) { + // Logic to find and return the parent node of the given node + // This is a placeholder and needs to be implemented based on your specific tree structure + return nil, fmt.Errorf("getParentNode not implemented") +} + +func splitInternalNode(node *extentInternalNode, newChild *extentChildPtr, fs *FileSystem, parent *extentInternalNode) ([]*extentInternalNode, error) { + // Combine existing children with the new child + allChildren := node.children + allChildren = append(allChildren, newChild) + // Sort children by fileBlock to maintain order + sort.Slice(allChildren, func(i, j int) bool { + return allChildren[i].fileBlock < allChildren[j].fileBlock + }) + + // Calculate the midpoint to split the children + mid := len(allChildren) / 2 + + // Create the first new internal node + firstInternal := &extentInternalNode{ + extentNodeHeader: extentNodeHeader{ + depth: node.depth, + entries: uint16(mid), + max: node.max, + blockSize: node.blockSize, + }, + children: allChildren[:mid], + } + + // Create the second new internal node + secondInternal := &extentInternalNode{ + extentNodeHeader: extentNodeHeader{ + depth: node.depth, + entries: uint16(len(allChildren) - mid), + max: node.max, + blockSize: node.blockSize, + }, + children: allChildren[mid:], + } + + // Write new internal nodes to the disk + err := writeNodeToDisk(firstInternal, fs, parent) + if err != nil { + return nil, err + } + err = writeNodeToDisk(secondInternal, fs, parent) + if err != nil { + return nil, err + } + + return []*extentInternalNode{firstInternal, secondInternal}, nil +} + +func writeNodeToDisk(node extentBlockFinder, fs *FileSystem, parent *extentInternalNode) error { + var blockNumber uint64 + if parent != nil { + blockNumber = getBlockNumberFromNode(node, parent) + } else { + blockNumber = getNewBlockNumber(fs) + } + + if blockNumber == 0 { + return fmt.Errorf("block number not found for node") + } + + data := node.toBytes() + _, err := fs.file.WriteAt(data, int64(blockNumber)*int64(fs.superblock.blockSize)) + return err +} + +// Helper function to get a new block number when there is no parent +// +//nolint:revive // this parameter will be used eventually +func getNewBlockNumber(fs *FileSystem) uint64 { + // Logic to allocate a new block + // This is a placeholder and needs to be implemented based on your specific filesystem structure + return 0 // Placeholder: Replace with actual implementation +} + +// Helper function to find the block number of a child node from its parent +func findChildBlockNumber(parent *extentInternalNode, child extentBlockFinder) uint64 { + for _, childPtr := range parent.children { + if childPtrMatchesNode(childPtr, child) { + return childPtr.diskBlock + } + } + return 0 +} + +func findChildNode(node *extentInternalNode, added *extents) int { + // Assuming added extents are sorted, find the correct child node to extend + addedSlice := *added + for i, child := range node.children { + if addedSlice[0].fileBlock < child.fileBlock { + return i - 1 + } + } + return len(node.children) - 1 +} + +// loadChildNode load up a child node from the disk +// +//nolint:unparam // this parameter will be used eventually +func loadChildNode(childPtr *extentChildPtr, fs *FileSystem) (extentBlockFinder, error) { + data := make([]byte, fs.superblock.blockSize) + _, err := fs.file.ReadAt(data, int64(childPtr.diskBlock)*int64(fs.superblock.blockSize)) + if err != nil { + return nil, err + } + + // Logic to decode data into an extentBlockFinder (extentLeafNode or extentInternalNode) + // This is a placeholder and needs to be implemented based on your specific encoding scheme + var node extentBlockFinder + // Implement the logic to decode the node from the data + return node, nil +} diff --git a/filesystem/ext4/file.go b/filesystem/ext4/file.go index 337dcba..4dc6539 100644 --- a/filesystem/ext4/file.go +++ b/filesystem/ext4/file.go @@ -1,7 +1,6 @@ package ext4 import ( - "errors" "fmt" "io" ) @@ -87,10 +86,101 @@ func (fl *File) Read(b []byte) (int, error) { // returns a non-nil error when n != len(b) // writes to the last known offset in the file from last read or write // use Seek() to set at a particular point -// -//nolint:revive // params not used because still read-only, will be used in the future when read-write -func (fl *File) Write(p []byte) (int, error) { - return 0, errors.New("not implemented") +func (fl *File) Write(b []byte) (int, error) { + var ( + fileSize = int64(fl.size) + originalFileSize = int64(fl.size) + blockCount = fl.blocks + originalBlockCount = fl.blocks + blocksize = uint64(fl.filesystem.superblock.blockSize) + ) + if !fl.isReadWrite { + return 0, fmt.Errorf("file is not open for writing") + } + + // if adding these bytes goes past the filesize, update the inode filesize to the new size and write the inode + // if adding these bytes goes past the total number of blocks, add more blocks, update the inode block count and write the inode + // if the offset is greater than the filesize, update the inode filesize to the offset + if fl.offset >= fileSize { + fl.size = uint64(fl.offset) + } + + // Calculate the number of bytes to write + bytesToWrite := int64(len(b)) + + offsetAfterWrite := fl.offset + bytesToWrite + if offsetAfterWrite > int64(fl.size) { + fl.size = uint64(fl.offset + bytesToWrite) + } + + // calculate the number of blocks in the file post-write + newBlockCount := fl.size / blocksize + if fl.size%blocksize > 0 { + newBlockCount++ + } + blocksNeeded := newBlockCount - blockCount + bytesNeeded := blocksNeeded * blocksize + if newBlockCount > blockCount { + newExtents, err := fl.filesystem.allocateExtents(bytesNeeded, &fl.extents) + if err != nil { + return 0, fmt.Errorf("could not allocate disk space for file %w", err) + } + extentTreeParsed, err := extendExtentTree(fl.inode.extents, newExtents, fl.filesystem, nil) + if err != nil { + return 0, fmt.Errorf("could not convert extents into tree: %w", err) + } + fl.inode.extents = extentTreeParsed + fl.blocks = newBlockCount + } + + if originalFileSize != int64(fl.size) || originalBlockCount != fl.blocks { + err := fl.filesystem.writeInode(fl.inode) + if err != nil { + return 0, fmt.Errorf("could not write inode: %w", err) + } + } + + writtenBytes := int64(0) + + // the offset given for reading is relative to the file, so we need to calculate + // where these are in the extents relative to the file + writeStartBlock := uint64(fl.offset) / blocksize + for _, e := range fl.extents { + // if the last block of the extent is before the first block we want to write, skip it + if uint64(e.fileBlock)+uint64(e.count) < writeStartBlock { + continue + } + // extentSize is the number of bytes on the disk for the extent + extentSize := int64(e.count) * int64(blocksize) + // where do we start and end in the extent? + startPositionInExtent := fl.offset - int64(e.fileBlock)*int64(blocksize) + leftInExtent := extentSize - startPositionInExtent + // how many bytes are left in the extent? + toWriteInOffset := bytesToWrite - writtenBytes + if toWriteInOffset > leftInExtent { + toWriteInOffset = leftInExtent + } + // read those bytes + startPosOnDisk := e.startingBlock*blocksize + uint64(startPositionInExtent) + b2 := make([]byte, toWriteInOffset) + copy(b2, b[writtenBytes:]) + written, err := fl.filesystem.file.WriteAt(b2, int64(startPosOnDisk)) + if err != nil { + return int(writtenBytes), fmt.Errorf("failed to read bytes: %v", err) + } + writtenBytes += int64(written) + fl.offset += int64(written) + + if written >= len(b) { + break + } + } + var err error + if fl.offset >= fileSize { + err = io.EOF + } + + return int(writtenBytes), err } // Seek set the offset to a particular point in the file diff --git a/filesystem/ext4/fileinfo.go b/filesystem/ext4/fileinfo.go index 4449c28..4a6e5c3 100644 --- a/filesystem/ext4/fileinfo.go +++ b/filesystem/ext4/fileinfo.go @@ -16,33 +16,33 @@ type FileInfo struct { } // IsDir abbreviation for Mode().IsDir() -func (fi FileInfo) IsDir() bool { +func (fi *FileInfo) IsDir() bool { return fi.isDir } // ModTime modification time -func (fi FileInfo) ModTime() time.Time { +func (fi *FileInfo) ModTime() time.Time { return fi.modTime } // Mode returns file mode -func (fi FileInfo) Mode() os.FileMode { +func (fi *FileInfo) Mode() os.FileMode { return fi.mode } // Name base name of the file // // will return the long name of the file. If none exists, returns the shortname and extension -func (fi FileInfo) Name() string { +func (fi *FileInfo) Name() string { return fi.name } // Size length in bytes for regular files -func (fi FileInfo) Size() int64 { +func (fi *FileInfo) Size() int64 { return fi.size } // Sys underlying data source - not supported yet and so will return nil -func (fi FileInfo) Sys() interface{} { +func (fi *FileInfo) Sys() interface{} { return nil } diff --git a/filesystem/ext4/groupdescriptors.go b/filesystem/ext4/groupdescriptors.go index b289902..995cda0 100644 --- a/filesystem/ext4/groupdescriptors.go +++ b/filesystem/ext4/groupdescriptors.go @@ -1,8 +1,10 @@ package ext4 import ( + "cmp" "encoding/binary" "fmt" + "slices" "github.com/diskfs/go-diskfs/filesystem/ext4/crc" ) @@ -116,6 +118,21 @@ func (gds *groupDescriptors) toBytes(checksumType gdtChecksumType, hashSeed uint return b } +// byFreeBlocks provides a sorted list of groupDescriptors by free blocks, descending. +// If you want them ascending, sort if. +func (gds *groupDescriptors) byFreeBlocks() []groupDescriptor { + // make a copy of the slice + gdSlice := make([]groupDescriptor, len(gds.descriptors)) + copy(gdSlice, gds.descriptors) + + // sort the slice + slices.SortFunc(gdSlice, func(a, b groupDescriptor) int { + return cmp.Compare(a.freeBlocks, b.freeBlocks) + }) + + return gdSlice +} + // groupDescriptorFromBytes create a groupDescriptor struct from bytes func groupDescriptorFromBytes(b []byte, gdSize uint16, number int, checksumType gdtChecksumType, hashSeed uint32) (*groupDescriptor, error) { // block count, reserved block count and free blocks depends on whether the fs is 64-bit or not diff --git a/filesystem/ext4/inode.go b/filesystem/ext4/inode.go index 01a077b..b760c0c 100644 --- a/filesystem/ext4/inode.go +++ b/filesystem/ext4/inode.go @@ -255,7 +255,7 @@ func inodeFromBytes(b []byte, sb *superblock, number uint32) (*inode, error) { } else { // parse the extent information in the inode to get the root of the extents tree // we do not walk the entire tree, to get a slice of blocks for the file. - // If we want to do that, we call the extentBlockFinder.toBlocks() method + // If we want to do that, we call the extentBlockFinder.blocks() method allExtents, err = parseExtents(extentInfo, sb.blockSize, 0, uint32(blocks)) if err != nil { return nil, fmt.Errorf("error parsing extent tree: %v", err) diff --git a/filesystem/ext4/superblock.go b/filesystem/ext4/superblock.go index f6a4013..fcafda9 100644 --- a/filesystem/ext4/superblock.go +++ b/filesystem/ext4/superblock.go @@ -700,7 +700,12 @@ func (sb *superblock) gdtChecksumType() gdtChecksumType { } func (sb *superblock) blockGroupCount() uint64 { - return sb.blockCount / uint64(sb.blocksPerGroup) + whole := sb.blockCount / uint64(sb.blocksPerGroup) + part := sb.blockCount % uint64(sb.blocksPerGroup) + if part > 0 { + whole++ + } + return whole } // calculateBackupSuperblocks calculate which block groups should have backup superblocks. diff --git a/filesystem/ext4/testdata/buildimg.sh b/filesystem/ext4/testdata/buildimg.sh index 542b280..b1e3306 100755 --- a/filesystem/ext4/testdata/buildimg.sh +++ b/filesystem/ext4/testdata/buildimg.sh @@ -16,6 +16,8 @@ dd if=/dev/zero of=two-k-file.dat bs=1024 count=2 dd if=/dev/zero of=six-k-file.dat bs=1024 count=6 dd if=/dev/zero of=seven-k-file.dat bs=1024 count=7 dd if=/dev/zero of=ten-meg-file.dat bs=1M count=10 +echo "This is a subdir file" > foo/subdirfile.txt +# `set +x` and then `set -x` because otherwie the logs are overloaded with creating 10000 directories set +x i=0; until [ $i -gt 10000 ]; do mkdir foo/dir${i}; i=$(( $i+1 )); done set -x diff --git a/util/bitmap.go b/util/bitmap.go new file mode 100644 index 0000000..6fb85a1 --- /dev/null +++ b/util/bitmap.go @@ -0,0 +1,171 @@ +package util + +import "fmt" + +// Bitmap is a structure holding a bitmap +type Bitmap struct { + bits []byte +} + +// Contiguous a position and count of contiguous bits, either free or set +type Contiguous struct { + Position int + Count int +} + +// BitmapFromBytes create a bitmap struct from bytes +func BitmapFromBytes(b []byte) *Bitmap { + // just copy them over + bits := make([]byte, len(b)) + copy(bits, b) + bm := Bitmap{ + bits: bits, + } + + return &bm +} + +// NewBitmap creates a new bitmap of size bytes; it is not in bits to force the caller to have +// a complete set +func NewBitmap(bytes int) *Bitmap { + bm := Bitmap{ + bits: make([]byte, bytes), + } + return &bm +} + +// ToBytes returns raw bytes underlying the bitmap +func (bm *Bitmap) ToBytes() []byte { + b := make([]byte, len(bm.bits)) + copy(b, bm.bits) + + return b +} + +// FromBytes overwrite the existing map with the contents of the bytes. +// It is the equivalent of BitmapFromBytes, but uses an existing Bitmap. +func (bm *Bitmap) FromBytes(b []byte) { + bm.bits = make([]byte, len(b)) + copy(bm.bits, b) +} + +// IsSet check if a specific bit location is set +func (bm *Bitmap) IsSet(location int) (bool, error) { + byteNumber, bitNumber := findBitForIndex(location) + if byteNumber > len(bm.bits) { + return false, fmt.Errorf("location %d is not in %d size bitmap", location, len(bm.bits)*8) + } + mask := byte(0x1) << bitNumber + return bm.bits[byteNumber]&mask == mask, nil +} + +// Clear a specific bit location +func (bm *Bitmap) Clear(location int) error { + byteNumber, bitNumber := findBitForIndex(location) + if byteNumber > len(bm.bits) { + return fmt.Errorf("location %d is not in %d size bitmap", location, len(bm.bits)*8) + } + mask := byte(0x1) << bitNumber + mask = ^mask + bm.bits[byteNumber] &= mask + return nil +} + +// Set a specific bit location +func (bm *Bitmap) Set(location int) error { + byteNumber, bitNumber := findBitForIndex(location) + if byteNumber > len(bm.bits) { + return fmt.Errorf("location %d is not in %d size bitmap", location, len(bm.bits)*8) + } + mask := byte(0x1) << bitNumber + bm.bits[byteNumber] |= mask + return nil +} + +// FirstFree returns the first free bit in the bitmap +// Begins at start, so if you want to find the first free bit, pass start=1. +// Returns -1 if none found. +func (bm *Bitmap) FirstFree(start int) int { + var location = -1 + candidates := bm.bits[start/8:] + for i, b := range candidates { + // if all used, continue to next byte + if b&0xff == 0xff { + continue + } + // not all used, so find first bit set to 0 + for j := uint8(0); j < 8; j++ { + mask := byte(0x1) << j + if b&mask != mask { + location = 8*i + int(j) + break + } + } + break + } + return location +} + +// FirstSet returns location of first set bit in the bitmap +func (bm *Bitmap) FirstSet() int { + var location = -1 + for i, b := range bm.bits { + // if all free, continue to next + if b == 0x00 { + continue + } + // not all free, so find first bit set to 1 + for j := uint8(0); j < 8; j++ { + mask := byte(0x1) << j + mask = ^mask + if b|mask != mask { + location = 8*i + (8 - int(j)) + break + } + } + break + } + return location +} + +// FreeList returns a slicelist of contiguous free locations by location. +// It is sorted by location. If you want to sort it by size, uses sort.Slice +// for example, if the bitmap is 10010010 00100000 10000010, it will return +// +// 1: 2, // 2 free bits at position 1 +// 4: 2, // 2 free bits at position 4 +// 8: 3, // 3 free bits at position 8 +// 11: 5 // 5 free bits at position 11 +// 17: 5 // 5 free bits at position 17 +// 23: 1, // 1 free bit at position 23 +// +// if you want it in reverse order, just reverse the slice. +func (bm *Bitmap) FreeList() []Contiguous { + var list []Contiguous + var location = -1 + var count = 0 + for i, b := range bm.bits { + for j := uint8(0); j < 8; j++ { + mask := byte(0x1) << j + switch { + case b&mask != mask: + if location == -1 { + location = 8*i + int(j) + } + count++ + case location != -1: + list = append(list, Contiguous{location, count}) + location = -1 + count = 0 + } + } + } + if location != -1 { + list = append(list, Contiguous{location, count}) + } + return list +} + +func findBitForIndex(index int) (byteNumber int, bitNumber uint8) { + return index / 8, uint8(index % 8) +}