📄 core.c
字号:
i = EFFS_NOTADIR;
tw(tr(TR_END, TrDirHigh, "} %d\n", i));
return i;
}
// Return name and iref of next entry in directory <dir>. <i> is the last
// entry we returned from this function. In case this is the first call
// after the initial call to dir_open(), <i> equals <dir>.
iref_t dir_next(iref_t dir, iref_t i, char *name, int8 size)
{
struct inode_s *ip = inode_addr(i);
char *p;
tw(tr(TR_BEGIN, TrDirHigh, "dir_next(%d, %d, ?, %d) {\n", dir, i, size));
i = (i == dir ? ip->child : ip->sibling);
while (i != (iref_t) IREF_NULL) {
ip = inode_addr(i);
if (is_object_valid(ip)) {
p = offset2addr(location2offset(ip->location));
while (size-- && (*name++ = *p++))
;
break;
}
i = ip->sibling;
}
if (i == (iref_t) IREF_NULL)
i = 0;
tw(tr(TR_END, TrDirHigh, "} %d\n", i));
return i;
}
// Traverse a directory given by inode reference <i>. If <i> is negative, it
// refers to the actual directory so we start by traversing the child link.
// Otherwise if <i> is positive, it refers to an entry within the directory
// and we only traverse sibling links. Returns iref of last object in
// directory (or negative iref of directory if the child link is empty).
// <entries> is number of non-deleted objects in the dir (only valid if
// traversed from the start, eg. with negative <i>).
iref_t dir_traverse(iref_t i, iref_t *entries)
{
iref_t j = 0, valid = 0, erased = 0, invalid = 0;
struct inode_s *ip;
tw(tr(TR_FUNC, TrDirLow, "dir_traverse(%d, ?) { ", i));
if (i < 0) {
// If directory's child is empty, this is a virgin directory and we
// return negative iref of the directory itself.
j = i;
i = -i;
ip = inode_addr(i);
i = ip->child;
}
if (i != (iref_t) IREF_NULL) {
do {
if (j == i) {
tw(tr(TR_NULL, TrDirLow, "LOOP! "));
return EFFS_SIBLINGLOOP;
}
j = i;
ip = inode_addr(j);
tw(tr(TR_NULL, TrDirLow, "%d/%x ", j, ip->flags));
if (is_object_valid(ip))
valid++;
else if (is_object(ip, OT_ERASED))
erased++;
else
invalid++;
} while ((i = ip->sibling) != (iref_t) IREF_NULL);
}
if (entries != 0)
*entries = valid;
tw(tr(TR_NULL, TrDirLow, "} (valid = %d, erased = %d, invalid = %d) %d\n",
valid, erased, invalid, j));
return j;
}
/******************************************************************************
* Block, Inode and Data Allocation
******************************************************************************/
// Find the youngest free block. Return block index on success. If the
// argument <priority> is zero, this is a normal alloc and it will leave at
// least fs.blocks_free_min spare blocks. Otherwise, if it is non-zero, it
// is a privileged alloc (initiated by a reclaim operation) and it will not
// necessarily leave any spare blocks.
bref_t block_alloc(bref_t priority, uint16 flags)
{
bref_t i, b, b_min, b_max, blocks_free;
struct block_header_s *bhp;
age_t age, age_min, age_max;
tw(tr(TR_BEGIN, TrBlock, "block_alloc(%d, 0x%x) {\n", priority, flags));
ttw(ttr(TTrData, "ba(%d,0x%x) {" NL, priority, flags));
age_min = BLOCK_AGE_MAX;
age_max = 0;
blocks_free = 0;
b_min = b_max = -1;
tw(tr(TR_FUNC, TrBlock, "blocks(age): "));
for (i = dev.numblocks - 1; i >= 0; i--)
{
if (is_block(i, BF_IS_FREE))
{
blocks_free++;
bhp = (struct block_header_s *) offset2addr(dev.binfo[i].offset);
age = bhp->age;
tw(tr(TR_NULL, TrBlock, "%d(%d) ", i, age));
// Remember index of block found. We use '<=' and '>=' operators
// (instead of '<' and '>') to ensure we have both limits
// properly set on exit from this loop.
if (age <= age_min) {
b_min = i;
age_min = age;
}
if (age >= age_max) {
b_max = i;
age_max = age;
}
}
}
tw(tr(TR_NULL, TrBlock, "\n"));
// Handle age wrap around
b = b_min;
if (b_min != -1) {
// Either age_max really is max age, so b_min is youngest block OR
// age_max really is min age, so b_max is youngest block
b = (age_max - age_min) < 0x8000 ? b_min : b_max;
}
// Only privileged allocs will get the last free block
if (blocks_free <= fs.blocks_free_min - priority) {
b = -1;
tw(tr(TR_FUNC, TrBlock, "Only %d block(s) left, required = %d\n",
blocks_free, fs.blocks_free_min - priority));
}
else {
// Prepare/format the block for holding data/inodes
if (flags == BF_DATA) {
bstat[b].used = BHEADER_SIZE;
bstat[b].lost = 0;
bstat[b].objects = 0;
block_flags_write(b, BF_DATA);
}
else if (flags == BF_COPYING) {
// This code is used on a fresh format and when allocating a new
// block for reclaiming inodes
block_flags_write(b, BF_COPYING);
bstat[b].used = 0;
bstat[b].lost = 0;
bstat[b].objects = 1; // first inode to be allocated
}
else {
tw(tr(TR_FUNC, TrBlock, "FATAL: Bad input (flags = 0x%X)\n", flags));
}
}
tw(tr(TR_END, TrBlock, "} (%d) %d\n", blocks_free, b));
ttw(ttr(TTrData, "} 0x%x" NL, b));
return b;
}
// Free and schedule a block for erase.
void block_free(bref_t b)
{
tw(tr(TR_BEGIN, TrBlock, "block_free(%d) {\n", b));
// mark block as invalid and schedule erasure
block_flags_write(b, BF_LOST);
block_reclaim(b);
tw(tr(TR_END, TrBlock, "}\n"));
}
void block_flags_write(uint8 block, uint8 flags)
{
struct block_header_s *bhp =
(struct block_header_s *) offset2addr(dev.binfo[block].offset);
tw(tr(TR_BEGIN, TrBlock, "block_flags_write(%d, 0x%x)\n", block, flags));
bstat[block].flags = BIT_SET(bstat[block].flags, flags);
ffsdrv.write_halfword((uint16 *) &bhp->flags, bstat[block].flags );
tw(tr(TR_END, TrBlock, ""));
}
// Allocate an inode for a new object. We use bstat[fs.inodes].objects to
// start our scan for a free inode instead of starting from the first time
// each time.
iref_t inode_alloc(void)
{
iref_t i;
tw(tr(TR_BEGIN, TrInode, "inode_alloc() {\n", i));
ttw(ttr(TTrInode, "i_a() {" NL, i));
if ((i = inode_alloc_try()) == 0) {
// FIXME NO we are not always of inodes, maybe dos there exist to
// many objects! It will not help to reclaim the inodes in that case!
tw(tr(TR_FUNC, TrInode, "NOTE: Out of free inodes...\n"));
inodes_reclaim();
i = inode_alloc_try();
}
tw(tr(TR_END, TrInode, "} %d\n", i));
ttw(ttr(TTrInode, "} %d" NL, i));
return i;
}
iref_t inode_alloc_try(void)
{
iref_t i = fs.inodes_max;
struct inode_s *ip;
// If we have not yet reached the maximum allowed number of objects,
// search for next free inode...
if (bstat[fs.inodes].used - bstat[fs.inodes].lost < fs.objects_max)
{
ip = inode_addr(bstat[fs.inodes].objects);
for (i = bstat[fs.inodes].objects;
i < fs.inodes_max - FFS_INODES_MARGIN; i++, ip++) {
if (ip->location == FLASH_NULL32) {
bstat[fs.inodes].objects = i;
bstat[fs.inodes].used++;
break;
}
}
}
if (i >= fs.inodes_max - FFS_INODES_MARGIN)
i = 0;
tw(tr(TR_FUNC, TrInode, "inode_alloc_try() %d\n", i));
ttw(ttr(TTrInode, "i_a_t() %d" NL, i));
return i;
}
// NOTEME: Should file data be word aligned to enable faster reads and
// writes in word quantities AND to be more compatible with the inherent
// 16-bit access width of flash memories?
offset_t data_alloc(int size)
{
offset_t offset = 0;
bref_t b;
tw(tr(TR_BEGIN, TrData, "data_alloc(%d) {\n", size));
ttw(ttr(TTrData, "da(%d) {" NL, size));
offset = data_prealloc(size);
// If we did allocate the space, we update bstat[]
if (offset > 0) {
b = offset2block(offset);
bstat[b].used += size;
stats.data_allocated += size; // STATS
}
tw(tr(TR_END, TrData, "} 0x%04x\n", offset));
ttw(ttr(TTrData, "} %x" NL, offset));
return offset;
}
offset_t data_prealloc(int realsize)
{
int result, i, bytes_free;
offset_t offset;
// Is it possible to get this amount of free space and still have enough
// reserved space?
ffs_query(Q_BYTES_FREE_RAW, &bytes_free);
if (realsize > (bytes_free + FFS_FILENAME_MAX + dev.atomsize))
return 0; // Not enough unused space
for (i = 0; i < dev.numblocks; i++) {
if ((offset = data_alloc_try(realsize)) > 0)
return offset; // Space found
if ((result = data_reclaim(realsize)) < 0)
return 0; // Data reclaim failed!
}
return 0; // No space found
}
// Find free data space of size <size>. Return zero if no space available.
// Note that we ensure that we always have space immediately available for a
// privileged data_alloc(), e.g. a data_alloc() that allocates data space
// without performing a data_reclaim(). This is important when
// re-creating/re-locating the journal file.
offset_t data_alloc_try(int size)
{
bref_t b, blocks_free;
int free;
offset_t offset_big = 0, offset_small = 0;
int size_big_ok = 0, size_small_ok = 0;
int size_big, size_small;
int reserved;
tw(tr(TR_FUNC, TrData, "data_alloc_try(%d) { ", size));
ttw(ttr(TTrData, "dat(%d) {" NL, size));
// NOTE when we alloc do we only need to have reserved space for X
// number of journal files, where X is the max number of used journals
// per data reclaim. The only exception is when an object_relocate has
// failed thus we set reserved_space to zero.
reserved = RESERVED_LOW;
if (fs.reserved_space < reserved)
reserved = fs.reserved_space;
// Set size_big to the grater of the sizes and size_small to the lesser.
size_big = (size > reserved ? size : reserved);
size_small = (size > reserved ? reserved : size);
tw(tr(TR_NULL, TrData, "(size_big, small = %d, %d) ", size_big, size_small));
// First search for free space in data blocks
tw(tr(TR_NULL, TrData, "block:free,objects: "));
blocks_free = -fs.blocks_free_min;
for (b = 0; b < dev.numblocks; b++) {
if (is_block(b, BF_IS_FREE)) {
blocks_free++;
}
else if (is_block(b, BF_IS_DATA)) {
free = dev.blocksize - bstat[b].used;
tw(tr(TR_NULL, TrData, "%d:%d,%d ", b, free, bstat[b].objects));
if (bstat[b].objects < fs.block_files_max - fs.block_files_reserved) {
if (!size_big_ok && !size_small_ok &&
(free >= size_big + size_small)) {
size_big_ok = size_small_ok = 1;
offset_big = offset_small =
dev.binfo[b].offset + bstat[b].used;
tw(tr(TR_NULL, TrData, "big/small_ok "));
break;
}
else if (!size_big_ok && free >= size_big) {
size_big_ok = 1;
offset_big = dev.binfo[b].offset + bstat[b].used;
tw(tr(TR_NULL, TrData, "big_ok "));
}
else if (!size_small_ok && free >= size_small) {
size_small_ok = 1;
offset_small = dev.binfo[b].offset + bstat[b].used;
tw(tr(TR_NULL, TrData, "small_ok "));
}
}
}
if (size_small_ok && size_big_ok)
break;
}
// If we have any non-spare free blocks available, we also have the
// big space covered in!
if (!size_big_ok && blocks_free * dev.blocksize > (unsigned)size_big) {
size_big_ok = 1;
b = block_alloc(0, BF_DATA);
if (b >= 0)
offset_big = dev.binfo[b].offset + bstat[b].used;
tw(tr(TR_NULL, TrData, "big_ok(free blocks) "));
// Are there also space for the small?
if (!size_small_ok && b >= 0 &&
dev.blocksize > ((unsigned)size_big + (unsigned)size_small)) {
size_small_ok = 1;
offset_small = dev.binfo[b].offset + bstat[b].used;
tw(tr(TR_NULL, TrData, "small_ok(free blocks) "));
}
}
if (!size_small_ok && blocks_free >= 1) {
b = block_alloc(0, BF_DATA);
if (b >= 0) {
size_small_ok = 1;
offset_small = dev.binfo[b].offset + bstat[b].used;
tw(tr(TR_NULL, TrData, "small_ok(free blocks) "));
}
}
if (size_big_ok && size_small_ok)
offset_big = (size > reserved ? offset_big : offset_small);
else
offset_big = 0;
tw(tr(TR_NULL, TrData, "} 0x%x\n", offset_big));
ttw(ttr(TTrData, "} %x " NL, offset_big));
return offset_big;
}
offset_t data_reserved_alloc(int size)
{
bref_t b;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -