diff --git a/sortix/Makefile b/sortix/Makefile
index cd704bd8..c285f3cc 100644
--- a/sortix/Makefile
+++ b/sortix/Makefile
@@ -83,6 +83,7 @@ descriptor.o \
dispmsg.o \
dtable.o \
elf.o \
+fcache.o \
fsfunc.o \
fs/kram.o \
fs/user.o \
diff --git a/sortix/fcache.cpp b/sortix/fcache.cpp
new file mode 100644
index 00000000..330b5e8f
--- /dev/null
+++ b/sortix/fcache.cpp
@@ -0,0 +1,473 @@
+/*******************************************************************************
+
+ Copyright(C) Jonas 'Sortie' Termansen 2013.
+
+ This file is part of Sortix.
+
+ Sortix is free software: you can redistribute it and/or modify it under the
+ terms of the GNU General Public License as published by the Free Software
+ Foundation, either version 3 of the License, or (at your option) any later
+ version.
+
+ Sortix is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ details.
+
+ You should have received a copy of the GNU General Public License along with
+ Sortix. If not, see .
+
+ fcache.cpp
+ Cache mechanism for file contents.
+
+*******************************************************************************/
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace Sortix {
+
+__attribute__((unused))
+static uintptr_t MakeBlockId(size_t area, size_t blocks_per_area, size_t block)
+{
+ return (uintptr_t) area * (uintptr_t) blocks_per_area + (uintptr_t) block;
+}
+
+__attribute__((unused))
+static uintptr_t MakeBlockInformation(uintptr_t blockid, uintptr_t flags)
+{
+ return (blockid << 12) | (flags & ((1 << (12+1)) - 1));
+}
+
+__attribute__((unused))
+static uintptr_t FlagsOfBlockInformation(uintptr_t info)
+{
+ return info & ((1 << (12+1)) - 1);
+}
+
+__attribute__((unused))
+static uintptr_t BlockIdOfBlockInformation(uintptr_t info)
+{
+ return info >> 12;
+}
+
+__attribute__((unused))
+static size_t BlockOfBlockId(uintptr_t blockid, size_t blocks_per_area)
+{
+ return (size_t) (blockid % blocks_per_area);
+}
+
+__attribute__((unused))
+static size_t AreaOfBlockId(uintptr_t blockid, size_t blocks_per_area)
+{
+ return (size_t) (blockid / blocks_per_area);
+}
+
+BlockCache::BlockCache()
+{
+ areas = NULL;
+ areas_used = 0;
+ areas_length = 0;
+ blocks_per_area = 1024UL;
+ mru_block = NULL;
+ lru_block = NULL;
+ unused_block = NULL;
+ bcache_mutex = KTHREAD_MUTEX_INITIALIZER;
+}
+
+BlockCache::~BlockCache()
+{
+ // TODO: Clean up everything here!
+}
+
+BlockCacheBlock* BlockCache::AcquireBlock()
+{
+ ScopedLock lock(&bcache_mutex);
+ if ( !unused_block && !AddArea() )
+ return NULL;
+ BlockCacheBlock* ret = unused_block;
+ assert(ret);
+ unused_block = unused_block->next_block;
+ ret->prev_block = ret->next_block = NULL;
+ if ( unused_block )
+ unused_block->prev_block = NULL;
+ ret->information |= BCACHE_USED;
+ LinkBlock(ret);
+ return ret;
+}
+
+void BlockCache::ReleaseBlock(BlockCacheBlock* block)
+{
+ ScopedLock lock(&bcache_mutex);
+ UnlinkBlock(block);
+ if ( unused_block )
+ unused_block->prev_block = block;
+ block->next_block = unused_block;
+ block->prev_block = NULL;
+ block->information &= ~BCACHE_USED;
+ unused_block = block;
+}
+
+uint8_t* BlockCache::BlockData(BlockCacheBlock* block)
+{
+ ScopedLock lock(&bcache_mutex);
+ uintptr_t block_id = BlockIdOfBlockInformation(block->information);
+ uintptr_t area_num = AreaOfBlockId(block_id, blocks_per_area);
+ uintptr_t area_off = BlockOfBlockId(block_id, blocks_per_area);
+ return areas[area_num].data + area_off * Page::Size();
+}
+
+void BlockCache::MarkUsed(BlockCacheBlock* block)
+{
+ ScopedLock lock(&bcache_mutex);
+ UnlinkBlock(block);
+ LinkBlock(block);
+}
+
+void BlockCache::MarkModified(BlockCacheBlock* block)
+{
+ ScopedLock lock(&bcache_mutex);
+ UnlinkBlock(block);
+ LinkBlock(block);
+ block->information |= BCACHE_MODIFIED;
+}
+
+void BlockCache::UnlinkBlock(BlockCacheBlock* block)
+{
+ (block->prev_block ? block->prev_block->next_block : mru_block) = block->next_block;
+ (block->next_block ? block->next_block->prev_block : lru_block) = block->prev_block;
+ block->next_block = block->prev_block = NULL;
+}
+
+void BlockCache::LinkBlock(BlockCacheBlock* block)
+{
+ assert(!(block->next_block || block == lru_block));
+ assert(!(block->prev_block || block == mru_block));
+ block->prev_block = NULL;
+ block->next_block = mru_block;
+ if ( mru_block )
+ mru_block->prev_block = block;
+ mru_block = block;
+ if ( !lru_block )
+ lru_block = block;
+}
+
+bool BlockCache::AddArea()
+{
+ if ( areas_used == areas_length )
+ {
+ size_t new_length = areas_length ? 2 * areas_length : 8UL;
+ size_t new_size = new_length * sizeof(BlockCacheArea);
+ BlockCacheArea* new_areas = (BlockCacheArea*) realloc(areas, new_size);
+ if ( !new_areas )
+ return false;
+ areas = new_areas;
+ areas_length = new_length;
+ }
+
+ size_t area_memory_size = Page::Size() * blocks_per_area;
+ int prot = PROT_KREAD | PROT_KWRITE;
+
+ BlockCacheArea* area = &areas[areas_used];
+ if ( !AllocateKernelAddress(&area->addralloc, area_memory_size) )
+ goto cleanup_done;
+ if ( !(area->blocks = new BlockCacheBlock[blocks_per_area]) )
+ goto cleanup_addralloc;
+ if ( !Memory::MapRange(area->addralloc.from, area->addralloc.size, prot) )
+ goto cleanup_blocks;
+
+ Memory::Flush();
+
+ // Add all our new blocks into the unused block linked list.
+ for ( size_t i = blocks_per_area; i != 0; i-- )
+ {
+ size_t index = i - 1;
+ BlockCacheBlock* block = &area->blocks[index];
+ uintptr_t blockid = MakeBlockId(areas_used, blocks_per_area, index);
+ block->information = MakeBlockInformation(blockid, BCACHE_PRESENT);
+ block->fcache = NULL;
+ block->next_block = unused_block;
+ block->prev_block = NULL;
+ if ( unused_block )
+ unused_block->prev_block = block;
+ unused_block = block;
+ }
+
+ area->data = (uint8_t*) area->addralloc.from;
+ for ( size_t i = 0; i < area_memory_size; i++ )
+ area->data[i] = 0xCC;
+ return areas_used++, true;
+
+cleanup_blocks:
+ delete[] area->blocks;
+cleanup_addralloc:
+ FreeKernelAddress(&area->addralloc);
+cleanup_done:
+ return false;
+}
+
+static BlockCache* kernel_block_cache = NULL;
+
+/*static*/ void FileCache::Init()
+{
+ if ( !(kernel_block_cache = new BlockCache()) )
+ Panic("Unable to allocate kernel block cache");
+}
+
+FileCache::FileCache(/*FileCacheBackend* backend*/)
+{
+ assert(kernel_block_cache);
+ file_size = 0;
+ file_written = 0;
+ blocks = NULL;
+ blocks_used = 0;
+ blocks_length = 0;
+ fcache_mutex = KTHREAD_MUTEX_INITIALIZER;
+ modified = false;
+ modified_size = false;
+}
+
+FileCache::~FileCache()
+{
+ for ( size_t i = 0; i < blocks_used; i++ )
+ kernel_block_cache->ReleaseBlock(blocks[i]);
+ free(blocks);
+}
+
+int FileCache::sync(ioctx_t* /*ctx*/)
+{
+ ScopedLock lock(&fcache_mutex);
+ return Synchronize() ? 0 : -1;
+}
+
+bool FileCache::Synchronize()
+{
+ //if ( !backend )
+ if ( true )
+ return true;
+ if ( !(modified || modified_size ) )
+ return true;
+ // TODO: Write out all dirty blocks and ask the next level to sync as well.
+ return true;
+}
+
+void FileCache::InitializeFileData(off_t to_where)
+{
+ while ( file_written < to_where )
+ {
+ off_t left = to_where - file_written;
+ size_t block_off = (size_t) (file_written % Page::Size());
+ size_t block_num = (size_t) (file_written / Page::Size());
+ size_t block_left = Page::Size() - block_off;
+ size_t amount_to_set = (uintmax_t) left < (uintmax_t) block_left ?
+ (size_t) left : block_left;
+ assert(block_num < blocks_used);
+ BlockCacheBlock* block = blocks[block_num];
+ uint8_t* block_data = kernel_block_cache->BlockData(block);
+ uint8_t* data = block_data + block_off;
+ memset(data, 0, amount_to_set);
+ file_written += (off_t) amount_to_set;
+ kernel_block_cache->MarkModified(block);
+ }
+}
+
+ssize_t FileCache::pread(ioctx_t* ctx, uint8_t* buf, size_t count, off_t off)
+{
+ ScopedLock lock(&fcache_mutex);
+ if ( off < 0 )
+ return errno = EINVAL, -1;
+ if ( file_size <= off )
+ return 0;
+ off_t available_bytes = file_size - off;
+ if ( (uintmax_t) available_bytes < (uintmax_t) count )
+ count = available_bytes;
+ if ( (size_t) SSIZE_MAX < count )
+ count = (size_t) SSIZE_MAX;
+ size_t sofar = 0;
+ while ( sofar < count )
+ {
+ off_t current_off = off + (off_t) sofar;
+ size_t left = count - sofar;
+ size_t block_off = (size_t) (current_off % Page::Size());
+ size_t block_num = (size_t) (current_off / Page::Size());
+ size_t block_left = Page::Size() - block_off;
+ size_t amount_to_copy = left < block_left ? left : block_left;
+ assert(block_num < blocks_used);
+ BlockCacheBlock* block = blocks[block_num];
+ const uint8_t* block_data = kernel_block_cache->BlockData(block);
+ const uint8_t* src_data = block_data + block_off;
+ uint8_t* dest_buf = buf + sofar;
+ off_t end_at = current_off + (off_t) amount_to_copy;
+ if ( file_written < end_at )
+ InitializeFileData(end_at);
+ if ( !ctx->copy_to_dest(dest_buf, src_data, amount_to_copy) )
+ return sofar ? (ssize_t) sofar : -1;
+ sofar += amount_to_copy;
+ kernel_block_cache->MarkUsed(block);
+ }
+ return (ssize_t) sofar;
+}
+
+ssize_t FileCache::pwrite(ioctx_t* ctx, const uint8_t* buf, size_t count, off_t off)
+{
+ ScopedLock lock(&fcache_mutex);
+ if ( off < 0 )
+ return errno = EINVAL, -1;
+ off_t available_growth = OFF_MAX - off;
+ if ( (uintmax_t) available_growth < (uintmax_t) count )
+ count = (size_t) available_growth;
+ // TODO: Rather than doing an EOF - shouldn't errno be set to something like
+ // "Hey, the filesize limit has been reached"?
+ if ( (size_t) SSIZE_MAX < count )
+ count = (size_t) SSIZE_MAX;
+ off_t write_end = off + (off_t) count;
+ if ( file_size < write_end && !ChangeSize(write_end, false) )
+ {
+ if ( file_size < off )
+ return -1;
+ count = (size_t) (file_size - off);
+ write_end = off + (off_t) count;
+ }
+ assert(write_end <= file_size);
+ size_t sofar = 0;
+ while ( sofar < count )
+ {
+ off_t current_off = off + (off_t) sofar;
+ size_t left = count - sofar;
+ size_t block_off = (size_t) (current_off % Page::Size());
+ size_t block_num = (size_t) (current_off / Page::Size());
+ size_t block_left = Page::Size() - block_off;
+ size_t amount_to_copy = left < block_left ? left : block_left;
+ assert(block_num < blocks_used);
+ BlockCacheBlock* block = blocks[block_num];
+ uint8_t* block_data = kernel_block_cache->BlockData(block);
+ uint8_t* data = block_data + block_off;
+ const uint8_t* src_buf = buf + sofar;
+ off_t begin_at = off + (off_t) sofar;
+ off_t end_at = current_off + (off_t) amount_to_copy;
+ if ( file_written < begin_at )
+ InitializeFileData(begin_at);
+ modified = true; /* Unconditionally - copy_from_src can fail midway. */
+ if ( !ctx->copy_from_src(data, src_buf, amount_to_copy) )
+ return sofar ? (ssize_t) sofar : -1;
+ if ( file_written < end_at )
+ file_written = end_at;
+ sofar += amount_to_copy;
+ kernel_block_cache->MarkModified(block);
+ }
+ return (ssize_t) sofar;
+}
+
+int FileCache::truncate(ioctx_t* /*ctx*/, off_t length)
+{
+ ScopedLock lock(&fcache_mutex);
+ return ChangeSize(length, true) ? 0 : -1;
+}
+
+off_t FileCache::GetFileSize()
+{
+ ScopedLock lock(&fcache_mutex);
+ return file_size;
+}
+
+off_t FileCache::lseek(ioctx_t* /*ctx*/, off_t offset, int whence)
+{
+ ScopedLock lock(&fcache_mutex);
+ if ( whence == SEEK_SET )
+ return offset;
+ if ( whence == SEEK_END )
+ return (off_t) file_size + offset;
+ return errno = EINVAL, -1;
+}
+
+//bool FileCache::ChangeBackend(FileCacheBackend* backend, bool sync_old)
+//{
+//}
+
+bool FileCache::ChangeSize(off_t new_size, bool exact)
+{
+ if ( file_size == new_size && !exact )
+ return true;
+
+ off_t numblocks_off_t = (new_size + Page::Size() - 1) / Page::Size();
+ // TODO: An alternative datastructure (perhaps tree like) will decrease the
+ // memory consumption of this pointer list as well as avoid this filesize
+ // restriction on 32-bit platforms.
+ if ( (uintmax_t) SIZE_MAX < (uintmax_t) numblocks_off_t )
+ return errno = EFBIG, false;
+ size_t numblocks = (size_t) numblocks_off_t;
+ if ( !ChangeNumBlocks(numblocks, exact) )
+ return false;
+ if ( new_size < file_written )
+ file_written = new_size;
+ file_size = new_size;
+ modified_size = true;
+ return true;
+}
+
+bool FileCache::ChangeNumBlocks(size_t new_numblocks, bool exact)
+{
+ if ( new_numblocks == blocks_used && !exact )
+ return true;
+
+ // If needed, adapt the length of the array containing block pointers.
+ if ( new_numblocks < blocks_length )
+ exact = true;
+ size_t new_blocks_length = 2 * blocks_length;
+ if ( exact || new_blocks_length < new_numblocks )
+ new_blocks_length = new_numblocks;
+ size_t size = sizeof(BlockCacheBlock*) * new_blocks_length;
+ BlockCacheBlock** new_blocks = (BlockCacheBlock**) realloc(blocks, size);
+ if ( !new_blocks )
+ {
+ if ( new_blocks_length < blocks_length )
+ new_blocks = blocks;
+ else
+ return false;
+ }
+ else
+ blocks = new_blocks,
+ blocks_length = new_blocks_length;
+
+ assert(!blocks_length || blocks);
+
+ // Release blocks if the file has decreased in size.
+ if ( new_numblocks < blocks_used )
+ {
+ for ( size_t i = new_numblocks; i < blocks_used; i++ )
+ kernel_block_cache->ReleaseBlock(blocks[i]);
+ blocks_used = new_numblocks;
+ return true;
+ }
+
+ // Acquire more blocks if the file has increased in size.
+ for ( size_t i = blocks_used; i < new_numblocks; i++ )
+ {
+ if ( !(blocks[i] = kernel_block_cache->AcquireBlock()) )
+ {
+ for ( size_t n = blocks_used; n < i; n++ )
+ kernel_block_cache->ReleaseBlock(blocks[n]);
+ return false;
+ }
+ }
+
+ blocks_used = new_numblocks;
+ return true;
+}
+
+} // namespace Sortix
diff --git a/sortix/fs/kram.cpp b/sortix/fs/kram.cpp
index 0b1780ce..f4908f19 100644
--- a/sortix/fs/kram.cpp
+++ b/sortix/fs/kram.cpp
@@ -49,7 +49,6 @@ File::File(dev_t dev, ino_t ino, uid_t owner, gid_t group, mode_t mode)
dev = (dev_t) this;
if ( !ino )
ino = (ino_t) this;
- filelock = KTHREAD_MUTEX_INITIALIZER;
this->type = S_IFREG;
this->stat_uid = owner;
this->stat_gid = group;
@@ -58,82 +57,42 @@ File::File(dev_t dev, ino_t ino, uid_t owner, gid_t group, mode_t mode)
this->stat_blksize = 1;
this->dev = dev;
this->ino = ino;
- size = 0;
- bufsize = 0;
- buf = NULL;
}
File::~File()
{
- delete[] buf;
}
int File::truncate(ioctx_t* ctx, off_t length)
{
- ScopedLock lock(&filelock);
- return truncate_unlocked(ctx, length);
-}
-
-int File::truncate_unlocked(ioctx_t* /*ctx*/, off_t length)
-{
- if ( SIZE_MAX < (uintmax_t) length ) { errno = EFBIG; return -1; }
- if ( (uintmax_t) length < size )
- memset(buf + length, 0, size - length);
- if ( bufsize < (size_t) length )
+ int ret = fcache.truncate(ctx, length);
+ if ( ret == 0 )
{
- // TODO: Don't go above OFF_MAX (or what it is called)!
- size_t newbufsize = bufsize ? 2UL * bufsize : 128UL;
- if ( newbufsize < (size_t) length )
- newbufsize = (size_t) length;
- uint8_t* newbuf = new uint8_t[newbufsize];
- if ( !newbuf )
- return -1;
- memcpy(newbuf, buf, size);
- delete[] buf; buf = newbuf; bufsize = newbufsize;
+ ScopedLock lock(&metalock);
+ stat_size = fcache.GetFileSize();
}
- kthread_mutex_lock(&metalock);
- size = stat_size = length;
- kthread_mutex_unlock(&metalock);
- return 0;
+ return ret;
}
-off_t File::lseek(ioctx_t* /*ctx*/, off_t offset, int whence)
+off_t File::lseek(ioctx_t* ctx, off_t offset, int whence)
{
- ScopedLock lock(&filelock);
- if ( whence == SEEK_SET )
- return offset;
- if ( whence == SEEK_END )
- return (off_t) size + offset;
- errno = EINVAL;
- return -1;
+ return fcache.lseek(ctx, offset, whence);
}
ssize_t File::pread(ioctx_t* ctx, uint8_t* dest, size_t count, off_t off)
{
- ScopedLock lock(&filelock);
- if ( size < (uintmax_t) off )
- return 0;
- size_t available = size - off;
- if ( available < count )
- count = available;
- if ( !ctx->copy_to_dest(dest, buf + off, count) )
- return -1;
- return count;
+ return fcache.pread(ctx, dest, count, off);
}
ssize_t File::pwrite(ioctx_t* ctx, const uint8_t* src, size_t count, off_t off)
{
- ScopedLock lock(&filelock);
- // TODO: Avoid having off + count overflow!
- if ( size < off + count )
- truncate_unlocked(ctx, off+count);
- if ( size <= (uintmax_t) off )
- return -1;
- size_t available = size - off;
- if ( available < count )
- count = available;
- ctx->copy_from_src(buf + off, src, count);
- return count;
+ ssize_t ret = fcache.pwrite(ctx, src, count, off);
+ if ( 0 < ret )
+ {
+ ScopedLock lock(&metalock);
+ stat_size = fcache.GetFileSize();
+ }
+ return ret;
}
Dir::Dir(dev_t dev, ino_t ino, uid_t owner, gid_t group, mode_t mode)
diff --git a/sortix/fs/kram.h b/sortix/fs/kram.h
index 3dc8360b..be23f4e4 100644
--- a/sortix/fs/kram.h
+++ b/sortix/fs/kram.h
@@ -27,6 +27,7 @@
#include
#include
+#include
namespace Sortix {
namespace KRAMFS {
@@ -50,17 +51,7 @@ public:
off_t off);
private:
- virtual int truncate_unlocked(ioctx_t* ctx, off_t length);
-
-private:
- kthread_mutex_t filelock;
- uid_t owner;
- uid_t group;
- mode_t mode;
- size_t size;
- size_t bufsize;
- uint8_t* buf;
- size_t numlinks;
+ FileCache fcache;
};
diff --git a/sortix/include/sortix/kernel/fcache.h b/sortix/include/sortix/kernel/fcache.h
new file mode 100644
index 00000000..cc7323d9
--- /dev/null
+++ b/sortix/include/sortix/kernel/fcache.h
@@ -0,0 +1,143 @@
+/*******************************************************************************
+
+ Copyright(C) Jonas 'Sortie' Termansen 2013.
+
+ This file is part of Sortix.
+
+ Sortix is free software: you can redistribute it and/or modify it under the
+ terms of the GNU General Public License as published by the Free Software
+ Foundation, either version 3 of the License, or (at your option) any later
+ version.
+
+ Sortix is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ details.
+
+ You should have received a copy of the GNU General Public License along with
+ Sortix. If not, see .
+
+ sortix/kernel/fcache.h
+ Cache mechanism for file contents.
+
+*******************************************************************************/
+
+#ifndef INCLUDE_SORTIX_KERNEL_FCACHE_H
+#define INCLUDE_SORTIX_KERNEL_FCACHE_H
+
+#include
+#include
+#include
+
+namespace Sortix {
+
+struct ioctx_struct;
+typedef struct ioctx_struct ioctx_t;
+
+class BlockCache;
+struct BlockCacheArea;
+struct BlockCacheBlock;
+class FileCache;
+//class FileCacheBackend;
+
+const uintptr_t BCACHE_PRESENT = 1 << 0;
+const uintptr_t BCACHE_USED = 1 << 1;
+const uintptr_t BCACHE_MODIFIED = 1 << 2;
+
+class BlockCache
+{
+public:
+ BlockCache();
+ ~BlockCache();
+ BlockCacheBlock* AcquireBlock();
+ void ReleaseBlock(BlockCacheBlock* block);
+ void MarkUsed(BlockCacheBlock* block);
+ void MarkModified(BlockCacheBlock* block);
+ uint8_t* BlockData(BlockCacheBlock* block);
+
+public:
+ bool AddArea();
+ void UnlinkBlock(BlockCacheBlock* block);
+ void LinkBlock(BlockCacheBlock* block);
+
+private:
+ BlockCacheArea* areas;
+ size_t areas_used;
+ size_t areas_length;
+ size_t blocks_per_area;
+ BlockCacheBlock* mru_block;
+ BlockCacheBlock* lru_block;
+ BlockCacheBlock* unused_block;
+ kthread_mutex_t bcache_mutex;
+
+};
+
+struct BlockCacheArea
+{
+ uint8_t* data;
+ BlockCacheBlock* blocks;
+ struct addralloc_t addralloc;
+
+};
+
+struct BlockCacheBlock
+{
+ uintptr_t information;
+ FileCache* fcache;
+ BlockCacheBlock* next_block;
+ BlockCacheBlock* prev_block;
+ uintptr_t BlockId() { return information >> 12; }
+};
+
+class FileCache
+{
+public:
+ static void Init();
+
+public:
+ FileCache(/*FileCacheBackend* backend = NULL*/);
+ ~FileCache();
+ int sync(ioctx_t* ctx);
+ ssize_t pread(ioctx_t* ctx, uint8_t* buf, size_t count, off_t off);
+ ssize_t pwrite(ioctx_t* ctx, const uint8_t* buf, size_t count, off_t off);
+ int truncate(ioctx_t* ctx, off_t length);
+ off_t lseek(ioctx_t* ctx, off_t offset, int whence);
+ //bool ChangeBackend(FileCacheBackend* backend, bool sync_old);
+ off_t GetFileSize();
+
+private:
+ bool ChangeSize(off_t newsize, bool exact);
+ bool ChangeNumBlocks(size_t numblocks, bool exact);
+ bool Synchronize();
+ void InitializeFileData(off_t to_where);
+
+private:
+ off_t file_size;
+ off_t file_written;
+ BlockCacheBlock** blocks;
+ size_t blocks_used;
+ size_t blocks_length;
+ kthread_mutex_t fcache_mutex;
+ bool modified;
+ bool modified_size;
+ //FileCacheBackend* backend;
+
+};
+
+/*
+class FileCacheBackend
+{
+public:
+ virtual ~FileCacheBackend() { }
+ virtual int fcache_sync(ioctx_t* ctx) = 0;
+ virtual ssize_t fcache_pread(ioctx_t* ctx, uint8_t* buf, size_t count, off_t off) = 0;
+ virtual ssize_t fcache_pwrite(ioctx_t* ctx, const uint8_t* buf, size_t count, off_t off) = 0;
+ virtual int fcache_truncate(ioctx_t* ctx, off_t length) = 0;
+ virtual off_t fcache_lseek(ioctx_t* ctx, off_t offset, int whence) = 0;
+
+};
+*/
+
+} // namespace Sortix
+
+#endif
diff --git a/sortix/kernel.cpp b/sortix/kernel.cpp
index ea5dd659..3e55d1d1 100644
--- a/sortix/kernel.cpp
+++ b/sortix/kernel.cpp
@@ -45,6 +45,7 @@
#include
#include
#include
+#include
#include
#include
@@ -330,6 +331,9 @@ static void BootThread(void* /*user*/)
// Stage 4. Initialize the Filesystem
//
+ // Bring up the filesystem cache.
+ FileCache::Init();
+
Ref dtable(new DescriptorTable());
if ( !dtable )
Panic("Unable to allocate descriptor table");