Merge branch 'extk' into 'master'
ext4l: Infrastructure and fixes for write support (part K) See merge request u-boot/u-boot!355
This commit is contained in:
@@ -101,12 +101,12 @@ struct timespec64 {
|
||||
|
||||
/* cmpxchg - compare and exchange, single-threaded version */
|
||||
#define cmpxchg(ptr, old, new) ({ \
|
||||
typeof(*(ptr)) __old = (old); \
|
||||
typeof(*(ptr)) __new = (new); \
|
||||
typeof(*(ptr)) __ret = *(ptr); \
|
||||
if (__ret == __old) \
|
||||
*(ptr) = __new; \
|
||||
__ret; \
|
||||
typeof(*(ptr)) __cmpxchg_old = (old); \
|
||||
typeof(*(ptr)) __cmpxchg_new = (new); \
|
||||
typeof(*(ptr)) __cmpxchg_ret = *(ptr); \
|
||||
if (__cmpxchg_ret == __cmpxchg_old) \
|
||||
*(ptr) = __cmpxchg_new; \
|
||||
__cmpxchg_ret; \
|
||||
})
|
||||
|
||||
/* Reference count type */
|
||||
@@ -328,8 +328,8 @@ extern struct user_namespace init_user_ns;
|
||||
/* Buffer operations - stubs */
|
||||
#define wait_on_buffer(bh) do { } while (0)
|
||||
#define __bforget(bh) do { } while (0)
|
||||
#define mark_buffer_dirty_inode(bh, i) do { } while (0)
|
||||
#define mark_buffer_dirty(bh) do { } while (0)
|
||||
#define mark_buffer_dirty_inode(bh, i) sync_dirty_buffer(bh)
|
||||
#define mark_buffer_dirty(bh) sync_dirty_buffer(bh)
|
||||
#define lock_buffer(bh) set_buffer_locked(bh)
|
||||
#define unlock_buffer(bh) clear_buffer_locked(bh)
|
||||
struct buffer_head *sb_getblk(struct super_block *sb, sector_t block);
|
||||
@@ -349,13 +349,15 @@ struct buffer_head *sb_getblk(struct super_block *sb, sector_t block);
|
||||
* We implement them in interface.c for sandbox.
|
||||
*/
|
||||
|
||||
/* Little-endian bit operations */
|
||||
#define __set_bit_le(nr, addr) ((void)(nr), (void)(addr))
|
||||
#define test_bit_le(nr, addr) ({ (void)(nr); (void)(addr); 0; })
|
||||
/* Little-endian bit operations - use arch-provided find_next_zero_bit */
|
||||
#define find_next_zero_bit_le(addr, size, offset) \
|
||||
({ (void)(addr); (void)(size); (offset); })
|
||||
#define __test_and_clear_bit_le(nr, addr) ({ (void)(nr); (void)(addr); 0; })
|
||||
#define __test_and_set_bit_le(nr, addr) ({ (void)(nr); (void)(addr); 0; })
|
||||
find_next_zero_bit((void *)addr, size, offset)
|
||||
#define __set_bit_le(nr, addr) set_bit(nr, addr)
|
||||
#define test_bit_le(nr, addr) test_bit(nr, addr)
|
||||
#define __test_and_clear_bit_le(nr, addr) \
|
||||
({ int __old = test_bit(nr, addr); clear_bit(nr, addr); __old; })
|
||||
#define __test_and_set_bit_le(nr, addr) \
|
||||
({ int __old = test_bit(nr, addr); set_bit(nr, addr); __old; })
|
||||
|
||||
/* KUNIT stub */
|
||||
#define KUNIT_STATIC_STUB_REDIRECT(...) do { } while (0)
|
||||
@@ -366,14 +368,15 @@ struct buffer_head *sb_getblk(struct super_block *sb, sector_t block);
|
||||
#define percpu_counter_add(fbc, amount) ((fbc)->count += (amount))
|
||||
#define percpu_counter_inc(fbc) ((fbc)->count++)
|
||||
#define percpu_counter_dec(fbc) ((fbc)->count--)
|
||||
#define percpu_counter_initialized(fbc) (1)
|
||||
#define percpu_counter_initialized(fbc) ((fbc)->initialized)
|
||||
|
||||
/* Group permission - stub */
|
||||
#define in_group_p(gid) (0)
|
||||
|
||||
/* Quota operations - stubs (only define if quotaops.h not included) */
|
||||
#ifndef _LINUX_QUOTAOPS_H
|
||||
#define dquot_alloc_block_nofail(inode, nr) ({ (void)(inode); (void)(nr); 0; })
|
||||
#define dquot_alloc_block_nofail(inode, nr) \
|
||||
({ (inode)->i_blocks += (nr) << ((inode)->i_blkbits - 9); 0; })
|
||||
#define dquot_initialize(inode) ({ (void)(inode); 0; })
|
||||
#define dquot_free_inode(inode) do { (void)(inode); } while (0)
|
||||
#define dquot_alloc_inode(inode) ({ (void)(inode); 0; })
|
||||
@@ -397,10 +400,10 @@ struct buffer_head *sb_getblk(struct super_block *sb, sector_t block);
|
||||
|
||||
/* Buffer cache operations */
|
||||
#define sb_find_get_block(sb, block) ((struct buffer_head *)NULL)
|
||||
#define sync_dirty_buffer(bh) ({ (void)(bh); 0; })
|
||||
#define sync_dirty_buffer(bh) submit_bh(REQ_OP_WRITE, bh)
|
||||
|
||||
/* Time functions */
|
||||
#define ktime_get_real_seconds() (0)
|
||||
/* Time functions - use boot-relative time for timestamps */
|
||||
#define ktime_get_real_seconds() (get_timer(0) / 1000)
|
||||
#define time_before32(a, b) (0)
|
||||
|
||||
/* Inode operations - iget_locked and new_inode are in interface.c */
|
||||
@@ -408,7 +411,7 @@ extern struct inode *new_inode(struct super_block *sb);
|
||||
#define i_uid_write(inode, uid) do { } while (0)
|
||||
#define i_gid_write(inode, gid) do { } while (0)
|
||||
#define inode_fsuid_set(inode, idmap) do { } while (0)
|
||||
#define inode_init_owner(idmap, i, dir, mode) do { } while (0)
|
||||
#define inode_init_owner(idmap, i, dir, mode) do { (i)->i_mode = (mode); } while (0)
|
||||
#define insert_inode_locked(inode) (0)
|
||||
#define unlock_new_inode(inode) do { } while (0)
|
||||
#define clear_nlink(inode) do { } while (0)
|
||||
@@ -488,8 +491,8 @@ int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode,
|
||||
/* RCU barrier - stub */
|
||||
#define rcu_barrier() do { } while (0)
|
||||
|
||||
/* inode/dentry operations - stubs */
|
||||
#define iput(inode) do { } while (0)
|
||||
/* inode/dentry operations */
|
||||
void iput(struct inode *inode);
|
||||
|
||||
/* current task - from linux/sched.h */
|
||||
#include <linux/sched.h>
|
||||
@@ -1213,7 +1216,8 @@ static inline ktime_t ktime_add_ns(ktime_t kt, s64 ns)
|
||||
#define write_trylock(lock) ({ (void)(lock); 1; })
|
||||
|
||||
/* percpu counter init/destroy */
|
||||
#define percpu_counter_init(fbc, val, gfp) ({ (fbc)->count = (val); 0; })
|
||||
#define percpu_counter_init(fbc, val, gfp) \
|
||||
({ (fbc)->count = (val); (fbc)->initialized = true; 0; })
|
||||
#define percpu_counter_destroy(fbc) do { } while (0)
|
||||
|
||||
/* ratelimit macros */
|
||||
@@ -1245,16 +1249,21 @@ struct folio_batch {
|
||||
|
||||
/* folio operations - stubs */
|
||||
#define folio_mark_dirty(f) do { (void)(f); } while (0)
|
||||
#define offset_in_folio(f, p) ({ (void)(f); (unsigned int)((unsigned long)(p) & (PAGE_SIZE - 1)); })
|
||||
/*
|
||||
* offset_in_folio - calculate offset of pointer within folio's data
|
||||
* In Linux this uses page alignment, but in U-Boot we use the folio's
|
||||
* actual data pointer since our buffers are malloc'd.
|
||||
*/
|
||||
#define offset_in_folio(f, p) ((f) ? (unsigned int)((uintptr_t)(p) - (uintptr_t)(f)->data) : 0U)
|
||||
#define folio_buffers(f) ({ (void)(f); (struct buffer_head *)NULL; })
|
||||
#define virt_to_folio(p) ({ (void)(p); (struct folio *)NULL; })
|
||||
#define folio_set_bh(bh, f, off) do { (void)(bh); (void)(f); (void)(off); } while (0)
|
||||
#define folio_set_bh(bh, f, off) do { if ((bh) && (f)) { (bh)->b_folio = (f); (bh)->b_data = (char *)(f)->data + (off); } } while (0)
|
||||
#define memcpy_from_folio(dst, f, off, len) do { (void)(dst); (void)(f); (void)(off); (void)(len); } while (0)
|
||||
#define folio_test_uptodate(f) ({ (void)(f); 1; })
|
||||
#define folio_pos(f) ({ (void)(f); 0LL; })
|
||||
#define folio_size(f) ({ (void)(f); PAGE_SIZE; })
|
||||
#define folio_unlock(f) do { (void)(f); } while (0)
|
||||
#define folio_put(f) do { (void)(f); } while (0)
|
||||
/* folio_put and folio_get are implemented in support.c */
|
||||
#define folio_lock(f) do { (void)(f); } while (0)
|
||||
#define folio_batch_init(fb) do { (fb)->nr = 0; } while (0)
|
||||
#define filemap_get_folios(m, i, e, fb) ({ (void)(m); (void)(i); (void)(e); (void)(fb); 0U; })
|
||||
@@ -1355,7 +1364,7 @@ static inline int generic_error_remove_folio(struct address_space *mapping,
|
||||
#define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE)
|
||||
|
||||
/* kmap/kunmap stubs for inline.c */
|
||||
#define kmap_local_folio(folio, off) ({ (void)(folio); (void)(off); (void *)NULL; })
|
||||
#define kmap_local_folio(folio, off) ((folio) ? (char *)(folio)->data + (off) : NULL)
|
||||
#define kunmap_local(addr) do { (void)(addr); } while (0)
|
||||
|
||||
/* Folio zeroing stubs for inline.c */
|
||||
@@ -1365,13 +1374,12 @@ static inline int generic_error_remove_folio(struct address_space *mapping,
|
||||
/* mapping_gfp_mask stub */
|
||||
#define mapping_gfp_mask(m) ({ (void)(m); GFP_KERNEL; })
|
||||
|
||||
/* __filemap_get_folio stub */
|
||||
static inline struct folio *__filemap_get_folio(struct address_space *mapping,
|
||||
pgoff_t index, unsigned int fgp_flags,
|
||||
gfp_t gfp)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
/* Folio operations - implemented in support.c */
|
||||
struct folio *__filemap_get_folio(struct address_space *mapping,
|
||||
pgoff_t index, unsigned int fgp_flags,
|
||||
gfp_t gfp);
|
||||
void folio_put(struct folio *folio);
|
||||
void folio_get(struct folio *folio);
|
||||
|
||||
/* projid_t - project ID type */
|
||||
typedef unsigned int projid_t;
|
||||
@@ -1543,7 +1551,9 @@ static inline char *d_path(const struct path *path, char *buf, int buflen)
|
||||
/* Buffer operations - additional */
|
||||
#define getblk_unmovable(bdev, block, size) sb_getblk(bdev->bd_super, block)
|
||||
#define create_empty_buffers(f, s, flags) ({ (void)(f); (void)(s); (void)(flags); (struct buffer_head *)NULL; })
|
||||
#define bh_offset(bh) (0UL)
|
||||
/* bh_offset returns offset of b_data within the folio */
|
||||
#define bh_offset(bh) ((bh)->b_folio ? \
|
||||
(unsigned long)((char *)(bh)->b_data - (char *)(bh)->b_folio->data) : 0UL)
|
||||
#define block_invalidate_folio(f, o, l) do { } while (0)
|
||||
#define block_write_end(pos, len, copied, folio) ({ (void)(pos); (void)(len); (void)(folio); (copied); })
|
||||
#define block_dirty_folio(m, f) ({ (void)(m); (void)(f); false; })
|
||||
@@ -2067,8 +2077,9 @@ struct fs_context {
|
||||
bool silent;
|
||||
};
|
||||
|
||||
/* ext4 superblock initialisation */
|
||||
/* ext4 superblock initialisation and commit */
|
||||
int ext4_fill_super(struct super_block *sb, struct fs_context *fc);
|
||||
int ext4_commit_super(struct super_block *sb);
|
||||
|
||||
/* fs_parameter stubs */
|
||||
struct fs_parameter {
|
||||
@@ -2173,8 +2184,8 @@ struct blk_holder_ops {
|
||||
};
|
||||
static const struct blk_holder_ops fs_holder_ops;
|
||||
|
||||
/* end_buffer_write_sync */
|
||||
#define end_buffer_write_sync NULL
|
||||
/* end_buffer_write_sync - implemented in support.c */
|
||||
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
|
||||
|
||||
/* File system management time flag */
|
||||
#define FS_MGTIME 0
|
||||
@@ -2540,8 +2551,7 @@ static inline unsigned long ext4_find_next_bit_le(const void *addr,
|
||||
/* WARN_RATELIMIT - just evaluate condition, no warning in U-Boot */
|
||||
#define WARN_RATELIMIT(condition, ...) (condition)
|
||||
|
||||
/* folio_get - increment folio refcount (no-op in U-Boot) */
|
||||
#define folio_get(f) do { (void)(f); } while (0)
|
||||
/* folio_get - now implemented in support.c */
|
||||
|
||||
/* array_index_nospec - bounds checking without speculation (no-op in U-Boot) */
|
||||
#define array_index_nospec(index, size) (index)
|
||||
@@ -2879,7 +2889,9 @@ void free_buffer_head(struct buffer_head *bh);
|
||||
/* ext4l support functions (support.c) */
|
||||
void ext4l_crc32c_init(void);
|
||||
void bh_cache_clear(void);
|
||||
int bh_cache_sync(void);
|
||||
int ext4l_read_block(sector_t block, size_t size, void *buffer);
|
||||
int ext4l_write_block(sector_t block, size_t size, void *buffer);
|
||||
|
||||
/* ext4l interface functions (interface.c) */
|
||||
struct blk_desc *ext4l_get_blk_dev(void);
|
||||
@@ -2914,7 +2926,8 @@ struct membuf *ext4l_get_msg_buf(void);
|
||||
|
||||
/* JBD2 journal.c stubs */
|
||||
struct buffer_head *alloc_buffer_head(gfp_t gfp_mask);
|
||||
#define __getblk(bdev, block, size) ({ (void)(bdev); (void)(block); (void)(size); (struct buffer_head *)NULL; })
|
||||
struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
|
||||
unsigned int size);
|
||||
int bmap(struct inode *inode, sector_t *block);
|
||||
#define trace_jbd2_update_log_tail(j, t, b, f) \
|
||||
do { (void)(j); (void)(t); (void)(b); (void)(f); } while (0)
|
||||
|
||||
@@ -77,26 +77,27 @@ typedef struct journal_s journal_t;
|
||||
|
||||
/*
|
||||
* Bit operations - sandbox declares these extern but doesn't implement them.
|
||||
* These work on bitmaps where nr is the absolute bit number.
|
||||
*/
|
||||
void set_bit(int nr, void *addr)
|
||||
{
|
||||
unsigned long *p = (unsigned long *)addr;
|
||||
unsigned long *p = (unsigned long *)addr + (nr / BITS_PER_LONG);
|
||||
|
||||
*p |= (1UL << nr);
|
||||
*p |= (1UL << (nr % BITS_PER_LONG));
|
||||
}
|
||||
|
||||
void clear_bit(int nr, void *addr)
|
||||
{
|
||||
unsigned long *p = (unsigned long *)addr;
|
||||
unsigned long *p = (unsigned long *)addr + (nr / BITS_PER_LONG);
|
||||
|
||||
*p &= ~(1UL << nr);
|
||||
*p &= ~(1UL << (nr % BITS_PER_LONG));
|
||||
}
|
||||
|
||||
void change_bit(int nr, void *addr)
|
||||
{
|
||||
unsigned long *p = (unsigned long *)addr;
|
||||
unsigned long *p = (unsigned long *)addr + (nr / BITS_PER_LONG);
|
||||
|
||||
*p ^= (1UL << nr);
|
||||
*p ^= (1UL << (nr % BITS_PER_LONG));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -587,6 +588,29 @@ struct dentry *d_make_root(struct inode *inode)
|
||||
return de;
|
||||
}
|
||||
|
||||
/**
|
||||
* iput() - Release a reference to an inode
|
||||
* @inode: Inode to release
|
||||
*
|
||||
* Decrements the inode reference count. When the reference count reaches
|
||||
* zero and the inode has no links, the inode is evicted (freed).
|
||||
*/
|
||||
void iput(struct inode *inode)
|
||||
{
|
||||
if (!inode)
|
||||
return;
|
||||
|
||||
if (atomic_dec_and_test(&inode->i_count)) {
|
||||
/* Last reference - check if inode should be evicted */
|
||||
if (inode->i_nlink == 0 && inode->i_sb &&
|
||||
inode->i_sb->s_op && inode->i_sb->s_op->evict_inode) {
|
||||
inode->i_sb->s_op->evict_inode(inode);
|
||||
/* Sync dirty buffers after eviction */
|
||||
bh_cache_sync();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* percpu init rwsem */
|
||||
int percpu_init_rwsem(struct percpu_rw_semaphore *sem)
|
||||
{
|
||||
@@ -669,11 +693,22 @@ void dquot_free_space_nodirty(struct inode *inode, loff_t size)
|
||||
|
||||
int dquot_alloc_block(struct inode *inode, loff_t nr)
|
||||
{
|
||||
/*
|
||||
* Update i_blocks to reflect the allocated blocks.
|
||||
* i_blocks is in 512-byte units, so convert from fs blocks.
|
||||
*/
|
||||
inode->i_blocks += nr << (inode->i_blkbits - 9);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dquot_free_block(struct inode *inode, loff_t nr)
|
||||
{
|
||||
/*
|
||||
* Update i_blocks to reflect the freed blocks.
|
||||
* i_blocks is in 512-byte units, so convert from fs blocks.
|
||||
*/
|
||||
inode->i_blocks -= nr << (inode->i_blkbits - 9);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -222,6 +222,29 @@ static void bh_cache_insert(struct buffer_head *bh)
|
||||
*
|
||||
* Called on unmount to free all cached buffers.
|
||||
*/
|
||||
/**
|
||||
* bh_clear_stale_jbd() - Clear stale journal_head from buffer_head
|
||||
* @bh: buffer_head to check
|
||||
*
|
||||
* Check if the buffer still has journal_head attached. This should not happen
|
||||
* if the journal was properly destroyed, but warn if it does to help debugging.
|
||||
* Clear the JBD flag and b_private to prevent issues with subsequent mounts.
|
||||
*/
|
||||
static void bh_clear_stale_jbd(struct buffer_head *bh)
|
||||
{
|
||||
if (buffer_jbd(bh)) {
|
||||
log_err("bh %p block %llu still has JBD (b_private %p)\n",
|
||||
bh, (unsigned long long)bh->b_blocknr, bh->b_private);
|
||||
/*
|
||||
* Clear the JBD flag and b_private to prevent issues.
|
||||
* The journal_head itself will be freed when the
|
||||
* journal_head cache is destroyed.
|
||||
*/
|
||||
clear_buffer_jbd(bh);
|
||||
bh->b_private = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void bh_cache_clear(void)
|
||||
{
|
||||
int i;
|
||||
@@ -231,9 +254,12 @@ void bh_cache_clear(void)
|
||||
for (entry = bh_cache[i]; entry; entry = next) {
|
||||
next = entry->next;
|
||||
if (entry->bh) {
|
||||
struct buffer_head *bh = entry->bh;
|
||||
|
||||
bh_clear_stale_jbd(bh);
|
||||
/* Release the cache's reference */
|
||||
if (atomic_dec_and_test(&entry->bh->b_count))
|
||||
free_buffer_head(entry->bh);
|
||||
if (atomic_dec_and_test(&bh->b_count))
|
||||
free_buffer_head(bh);
|
||||
}
|
||||
free(entry);
|
||||
}
|
||||
@@ -241,6 +267,34 @@ void bh_cache_clear(void)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* bh_cache_sync() - Sync all dirty buffers to disk
|
||||
*
|
||||
* U-Boot doesn't have a journal thread, so we need to manually sync
|
||||
* all dirty buffers after write operations.
|
||||
*
|
||||
* Return: 0 on success, negative on first error
|
||||
*/
|
||||
int bh_cache_sync(void)
|
||||
{
|
||||
int i, ret = 0;
|
||||
struct bh_cache_entry *entry;
|
||||
|
||||
for (i = 0; i < BH_CACHE_SIZE; i++) {
|
||||
for (entry = bh_cache[i]; entry; entry = entry->next) {
|
||||
if (entry->bh && buffer_dirty(entry->bh)) {
|
||||
int err = ext4l_write_block(entry->bh->b_blocknr,
|
||||
entry->bh->b_size,
|
||||
entry->bh->b_data);
|
||||
if (err && !ret)
|
||||
ret = err;
|
||||
clear_buffer_dirty(entry->bh);
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* alloc_buffer_head() - Allocate a buffer_head structure
|
||||
* @gfp_mask: Allocation flags (ignored in U-Boot)
|
||||
@@ -302,19 +356,25 @@ static struct buffer_head *alloc_buffer_head_with_data(size_t size)
|
||||
* @bh: Buffer head to free
|
||||
*
|
||||
* Only free b_data if BH_OwnsData is set. Shadow buffers created by
|
||||
* jbd2_journal_write_metadata_buffer() share b_data with the original
|
||||
* buffer and should not free it.
|
||||
* jbd2_journal_write_metadata_buffer() share b_data/b_folio with the original
|
||||
* buffer and should not free them. Shadow buffers are identified by having
|
||||
* b_private set to point to the original buffer.
|
||||
*/
|
||||
void free_buffer_head(struct buffer_head *bh)
|
||||
{
|
||||
if (!bh)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Shadow buffers (b_private != NULL) share their folio with the
|
||||
* original buffer. Don't free the shared folio.
|
||||
*/
|
||||
if (!bh->b_private && bh->b_folio)
|
||||
free(bh->b_folio);
|
||||
|
||||
/* Only free b_data if this buffer owns it */
|
||||
if (bh->b_data && test_bit(BH_OwnsData, &bh->b_state))
|
||||
free(bh->b_data);
|
||||
if (bh->b_folio)
|
||||
free(bh->b_folio);
|
||||
free(bh);
|
||||
}
|
||||
|
||||
@@ -425,6 +485,50 @@ struct buffer_head *sb_getblk(struct super_block *sb, sector_t block)
|
||||
return bh;
|
||||
}
|
||||
|
||||
/**
|
||||
* __getblk() - Get a buffer for a given block device
|
||||
* @bdev: Block device
|
||||
* @block: Block number
|
||||
* @size: Block size
|
||||
* Return: Buffer head or NULL on error
|
||||
*
|
||||
* Similar to sb_getblk but takes a block device instead of superblock.
|
||||
* Used by the journal to allocate descriptor buffers.
|
||||
*/
|
||||
struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
|
||||
unsigned int size)
|
||||
{
|
||||
struct buffer_head *bh;
|
||||
|
||||
if (!bdev || !size)
|
||||
return NULL;
|
||||
|
||||
/* Check cache first - must match block number AND size */
|
||||
bh = bh_cache_lookup(block, size);
|
||||
if (bh)
|
||||
return bh;
|
||||
|
||||
/* Allocate new buffer */
|
||||
bh = alloc_buffer_head_with_data(size);
|
||||
if (!bh)
|
||||
return NULL;
|
||||
|
||||
bh->b_blocknr = block;
|
||||
bh->b_bdev = bdev;
|
||||
bh->b_size = size;
|
||||
|
||||
/* Mark buffer as having a valid disk mapping */
|
||||
set_buffer_mapped(bh);
|
||||
|
||||
/* Don't read - just allocate with zeroed data */
|
||||
memset(bh->b_data, '\0', bh->b_size);
|
||||
|
||||
/* Add to cache */
|
||||
bh_cache_insert(bh);
|
||||
|
||||
return bh;
|
||||
}
|
||||
|
||||
/**
|
||||
* sb_bread() - Read a block via super_block
|
||||
* @sb: Super block
|
||||
@@ -477,12 +581,17 @@ void brelse(struct buffer_head *bh)
|
||||
}
|
||||
|
||||
/**
|
||||
* __brelse() - Release a buffer_head (alternate API)
|
||||
* __brelse() - Release a buffer_head reference without freeing
|
||||
* @bh: Buffer head to release
|
||||
*
|
||||
* Unlike brelse(), this only decrements the reference count without
|
||||
* freeing the buffer when count reaches zero. Used when caller will
|
||||
* explicitly free with free_buffer_head() afterward.
|
||||
*/
|
||||
void __brelse(struct buffer_head *bh)
|
||||
{
|
||||
brelse(bh);
|
||||
if (bh)
|
||||
atomic_dec(&bh->b_count);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -556,6 +665,23 @@ struct buffer_head *__bread(struct block_device *bdev, sector_t block,
|
||||
return bh;
|
||||
}
|
||||
|
||||
/**
|
||||
* end_buffer_write_sync() - Completion handler for synchronous buffer writes
|
||||
* @bh: Buffer head that completed I/O
|
||||
* @uptodate: 1 if I/O succeeded, 0 if failed
|
||||
*
|
||||
* This callback is invoked after a buffer write completes. It sets the
|
||||
* buffer's uptodate state based on the result and unlocks the buffer.
|
||||
*/
|
||||
void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
|
||||
{
|
||||
if (uptodate)
|
||||
set_buffer_uptodate(bh);
|
||||
else
|
||||
clear_buffer_uptodate(bh);
|
||||
unlock_buffer(bh);
|
||||
}
|
||||
|
||||
/**
|
||||
* submit_bh() - Submit a buffer_head for I/O
|
||||
* @op: Operation (REQ_OP_READ, REQ_OP_WRITE, etc.)
|
||||
@@ -564,26 +690,38 @@ struct buffer_head *__bread(struct block_device *bdev, sector_t block,
|
||||
*/
|
||||
int submit_bh(int op, struct buffer_head *bh)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
int op_type = op & REQ_OP_MASK; /* Mask out flags, keep operation type */
|
||||
int uptodate;
|
||||
|
||||
if (op_type == REQ_OP_READ) {
|
||||
ret = ext4l_read_block(bh->b_blocknr, bh->b_size, bh->b_data);
|
||||
if (ret) {
|
||||
clear_buffer_uptodate(bh);
|
||||
return ret;
|
||||
uptodate = 0;
|
||||
} else {
|
||||
set_buffer_uptodate(bh);
|
||||
uptodate = 1;
|
||||
}
|
||||
set_buffer_uptodate(bh);
|
||||
} else if (op_type == REQ_OP_WRITE) {
|
||||
ret = ext4l_write_block(bh->b_blocknr, bh->b_size, bh->b_data);
|
||||
if (ret) {
|
||||
clear_buffer_uptodate(bh);
|
||||
return ret;
|
||||
set_buffer_write_io_error(bh);
|
||||
uptodate = 0;
|
||||
} else {
|
||||
clear_buffer_write_io_error(bh);
|
||||
uptodate = 1;
|
||||
}
|
||||
/* Mark buffer as clean (not dirty) after write */
|
||||
} else {
|
||||
uptodate = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
/* Call b_end_io callback if set - U-Boot does sync I/O */
|
||||
if (bh->b_end_io)
|
||||
bh->b_end_io(bh, uptodate);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -600,3 +738,83 @@ int bh_read(struct buffer_head *bh, int flags)
|
||||
submit_bh(REQ_OP_READ | flags, bh);
|
||||
return buffer_uptodate(bh) ? 0 : -EIO;
|
||||
}
|
||||
|
||||
/**
|
||||
* __filemap_get_folio() - Get or create a folio for a mapping
|
||||
* @mapping: The address_space to search
|
||||
* @index: The page index
|
||||
* @fgp_flags: Flags (FGP_CREAT to create if not found)
|
||||
* @gfp: Memory allocation flags
|
||||
* Return: Folio pointer or ERR_PTR on error
|
||||
*/
|
||||
struct folio *__filemap_get_folio(struct address_space *mapping,
|
||||
pgoff_t index, unsigned int fgp_flags,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct folio *folio;
|
||||
int i;
|
||||
|
||||
/* Search for existing folio in cache */
|
||||
if (mapping) {
|
||||
for (i = 0; i < mapping->folio_cache_count; i++) {
|
||||
folio = mapping->folio_cache[i];
|
||||
if (folio && folio->index == index) {
|
||||
/* Found existing folio, bump refcount */
|
||||
folio->_refcount++;
|
||||
return folio;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* If not creating, return error */
|
||||
if (!(fgp_flags & FGP_CREAT))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
/* Create new folio */
|
||||
folio = kzalloc(sizeof(struct folio), gfp);
|
||||
if (!folio)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
folio->data = kzalloc(PAGE_SIZE, gfp);
|
||||
if (!folio->data) {
|
||||
kfree(folio);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
folio->index = index;
|
||||
folio->mapping = mapping;
|
||||
folio->_refcount = 1;
|
||||
|
||||
/* Add to cache if there's room */
|
||||
if (mapping && mapping->folio_cache_count < FOLIO_CACHE_MAX) {
|
||||
mapping->folio_cache[mapping->folio_cache_count++] = folio;
|
||||
/* Extra ref for cache */
|
||||
folio->_refcount++;
|
||||
}
|
||||
|
||||
return folio;
|
||||
}
|
||||
|
||||
/**
|
||||
* folio_put() - Release a reference to a folio
|
||||
* @folio: The folio to release
|
||||
*/
|
||||
void folio_put(struct folio *folio)
|
||||
{
|
||||
if (!folio)
|
||||
return;
|
||||
if (--folio->_refcount > 0)
|
||||
return;
|
||||
kfree(folio->data);
|
||||
kfree(folio);
|
||||
}
|
||||
|
||||
/**
|
||||
* folio_get() - Acquire a reference to a folio
|
||||
* @folio: The folio to reference
|
||||
*/
|
||||
void folio_get(struct folio *folio)
|
||||
{
|
||||
if (folio)
|
||||
folio->_refcount++;
|
||||
}
|
||||
|
||||
@@ -194,6 +194,19 @@ static inline unsigned long __ffs64(u64 word)
|
||||
return __ffs((unsigned long)word);
|
||||
}
|
||||
|
||||
/**
|
||||
* fns - find N'th set bit in a word
|
||||
* @word: The word to search
|
||||
* @n: Bit to find
|
||||
*/
|
||||
static inline unsigned int fns(unsigned long word, unsigned int n)
|
||||
{
|
||||
while (word && n--)
|
||||
word &= word - 1;
|
||||
|
||||
return word ? __ffs(word) : BITS_PER_LONG;
|
||||
}
|
||||
|
||||
/**
|
||||
* __set_bit - Set a bit in memory
|
||||
* @nr: the bit to set
|
||||
|
||||
176
include/linux/find.h
Normal file
176
include/linux/find.h
Normal file
@@ -0,0 +1,176 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LINUX_FIND_H_
|
||||
#define __LINUX_FIND_H_
|
||||
|
||||
#include <linux/bitops.h>
|
||||
|
||||
unsigned long _find_next_bit(const unsigned long *addr1, unsigned long nbits,
|
||||
unsigned long start);
|
||||
unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
|
||||
unsigned long start);
|
||||
extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
|
||||
extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
|
||||
extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
|
||||
|
||||
#ifndef find_next_bit
|
||||
/**
|
||||
* find_next_bit - find the next set bit in a memory region
|
||||
* @addr: The address to base the search on
|
||||
* @size: The bitmap size in bits
|
||||
* @offset: The bitnumber to start searching at
|
||||
*
|
||||
* Returns the bit number for the next set bit
|
||||
* If no bits are set, returns @size.
|
||||
*/
|
||||
static __always_inline
|
||||
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset)
|
||||
{
|
||||
if (small_const_nbits(size)) {
|
||||
unsigned long val;
|
||||
|
||||
if (unlikely(offset >= size))
|
||||
return size;
|
||||
|
||||
val = *addr & GENMASK(size - 1, offset);
|
||||
return val ? __ffs(val) : size;
|
||||
}
|
||||
|
||||
return _find_next_bit(addr, size, offset);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef find_next_zero_bit
|
||||
/**
|
||||
* find_next_zero_bit - find the next cleared bit in a memory region
|
||||
* @addr: The address to base the search on
|
||||
* @size: The bitmap size in bits
|
||||
* @offset: The bitnumber to start searching at
|
||||
*
|
||||
* Returns the bit number of the next zero bit
|
||||
* If no bits are zero, returns @size.
|
||||
*/
|
||||
static __always_inline
|
||||
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset)
|
||||
{
|
||||
if (small_const_nbits(size)) {
|
||||
unsigned long val;
|
||||
|
||||
if (unlikely(offset >= size))
|
||||
return size;
|
||||
|
||||
val = *addr | ~GENMASK(size - 1, offset);
|
||||
return val == ~0UL ? size : ffz(val);
|
||||
}
|
||||
|
||||
return _find_next_zero_bit(addr, size, offset);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef find_first_bit
|
||||
/**
|
||||
* find_first_bit - find the first set bit in a memory region
|
||||
* @addr: The address to start the search at
|
||||
* @size: The maximum number of bits to search
|
||||
*
|
||||
* Returns the bit number of the first set bit.
|
||||
* If no bits are set, returns @size.
|
||||
*/
|
||||
static __always_inline
|
||||
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
|
||||
{
|
||||
if (small_const_nbits(size)) {
|
||||
unsigned long val = *addr & GENMASK(size - 1, 0);
|
||||
|
||||
return val ? __ffs(val) : size;
|
||||
}
|
||||
|
||||
return _find_first_bit(addr, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef find_first_zero_bit
|
||||
/**
|
||||
* find_first_zero_bit - find the first cleared bit in a memory region
|
||||
* @addr: The address to start the search at
|
||||
* @size: The maximum number of bits to search
|
||||
*
|
||||
* Returns the bit number of the first cleared bit.
|
||||
* If no bits are zero, returns @size.
|
||||
*/
|
||||
static __always_inline
|
||||
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
|
||||
{
|
||||
if (small_const_nbits(size)) {
|
||||
unsigned long val = *addr | ~GENMASK(size - 1, 0);
|
||||
|
||||
return val == ~0UL ? size : ffz(val);
|
||||
}
|
||||
|
||||
return _find_first_zero_bit(addr, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef find_last_bit
|
||||
/**
|
||||
* find_last_bit - find the last set bit in a memory region
|
||||
* @addr: The address to start the search at
|
||||
* @size: The number of bits to search
|
||||
*
|
||||
* Returns the bit number of the last set bit, or size.
|
||||
*/
|
||||
static __always_inline
|
||||
unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
|
||||
{
|
||||
if (small_const_nbits(size)) {
|
||||
unsigned long val = *addr & GENMASK(size - 1, 0);
|
||||
|
||||
return val ? __fls(val) : size;
|
||||
}
|
||||
|
||||
return _find_last_bit(addr, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(__LITTLE_ENDIAN)
|
||||
|
||||
static __always_inline
|
||||
unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, unsigned long offset)
|
||||
{
|
||||
return find_next_zero_bit(addr, size, offset);
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
unsigned long find_next_bit_le(const void *addr, unsigned long size, unsigned long offset)
|
||||
{
|
||||
return find_next_bit(addr, size, offset);
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
|
||||
{
|
||||
return find_first_zero_bit(addr, size);
|
||||
}
|
||||
|
||||
#else
|
||||
#error "Please fix <asm/byteorder.h>"
|
||||
#endif
|
||||
|
||||
#define for_each_set_bit(bit, addr, size) \
|
||||
for ((bit) = 0; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
|
||||
|
||||
/* same as for_each_set_bit() but use bit as value to start with */
|
||||
#define for_each_set_bit_from(bit, addr, size) \
|
||||
for (; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
|
||||
|
||||
#define for_each_clear_bit(bit, addr, size) \
|
||||
for ((bit) = 0; \
|
||||
(bit) = find_next_zero_bit((addr), (size), (bit)), (bit) < (size); \
|
||||
(bit)++)
|
||||
|
||||
/* same as for_each_clear_bit() but use bit as value to start with */
|
||||
#define for_each_clear_bit_from(bit, addr, size) \
|
||||
for (; (bit) = find_next_zero_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
|
||||
|
||||
#endif /*__LINUX_FIND_H_ */
|
||||
@@ -84,15 +84,30 @@ static inline void *krealloc(const void *p, size_t new_size, gfp_t flags)
|
||||
|
||||
void *kmemdup(const void *src, size_t len, gfp_t gfp);
|
||||
|
||||
/* kmem_cache stubs */
|
||||
/* kmem_cache implementation / stubs */
|
||||
struct kmem_cache {
|
||||
int sz;
|
||||
};
|
||||
|
||||
struct kmem_cache *get_mem(int element_sz);
|
||||
#define kmem_cache_create(a, sz, c, d, e) ({ (void)(a); (void)(e); get_mem(sz); })
|
||||
#define kmem_cache_create(a, sz, c, d, e) get_mem(sz)
|
||||
void *kmem_cache_alloc(struct kmem_cache *obj, gfp_t flag);
|
||||
|
||||
#if CONFIG_IS_ENABLED(LIB_KMEM_CACHE)
|
||||
void kmem_cache_free(struct kmem_cache *cachep, void *obj);
|
||||
void kmem_cache_destroy(struct kmem_cache *cachep);
|
||||
#else
|
||||
static inline void kmem_cache_free(struct kmem_cache *cachep, void *obj)
|
||||
{
|
||||
free(obj);
|
||||
}
|
||||
|
||||
static inline void kmem_cache_destroy(struct kmem_cache *cachep)
|
||||
{
|
||||
free(cachep);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void *kmem_cache_zalloc(struct kmem_cache *obj, gfp_t flags)
|
||||
{
|
||||
void *ret = kmem_cache_alloc(obj, flags);
|
||||
@@ -102,14 +117,4 @@ static inline void *kmem_cache_zalloc(struct kmem_cache *obj, gfp_t flags)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void kmem_cache_free(struct kmem_cache *cachep, void *obj)
|
||||
{
|
||||
free(obj);
|
||||
}
|
||||
|
||||
static inline void kmem_cache_destroy(struct kmem_cache *cachep)
|
||||
{
|
||||
free(cachep);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_SLAB_H */
|
||||
|
||||
@@ -396,6 +396,14 @@ config TPL_TINY_MEMSET
|
||||
config RBTREE
|
||||
bool
|
||||
|
||||
config LIB_KMEM_CACHE
|
||||
bool "Enable full kmem_cache implementation"
|
||||
help
|
||||
Provide a proper kmem_cache implementation in lib/linux_compat.c
|
||||
that tracks allocated objects. This is needed by subsystems like
|
||||
ext4 that require cache management. When disabled, simple inline
|
||||
stubs are used instead.
|
||||
|
||||
config BITREVERSE
|
||||
bool "Bit reverse library from Linux"
|
||||
|
||||
|
||||
@@ -58,6 +58,7 @@ obj-$(CONFIG_PHYSMEM) += physmem.o
|
||||
obj-y += rc4.o
|
||||
obj-$(CONFIG_RBTREE) += rbtree.o
|
||||
obj-$(CONFIG_BITREVERSE) += bitrev.o
|
||||
obj-$(CONFIG_SANDBOX) += find_bit.o
|
||||
obj-y += list_sort.o
|
||||
endif
|
||||
|
||||
@@ -135,6 +136,7 @@ obj-$(CONFIG_$(PHASE_)OF_LIBFDT) += fdtdec.o fdtdec_common.o fdt_print.o
|
||||
obj-y += hang.o
|
||||
obj-y += linux_compat.o
|
||||
obj-y += linux_string.o
|
||||
obj-$(CONFIG_$(PHASE_)LIB_KMEM_CACHE) += kmem_cache.o
|
||||
obj-$(CONFIG_$(PHASE_)LMB) += lmb.o
|
||||
obj-y += membuf.o
|
||||
obj-$(CONFIG_REGEX) += slre.o
|
||||
|
||||
142
lib/find_bit.c
Normal file
142
lib/find_bit.c
Normal file
@@ -0,0 +1,142 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/* bit search implementation
|
||||
*
|
||||
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* Copyright (C) 2008 IBM Corporation
|
||||
* 'find_last_bit' is written by Rusty Russell <rusty@rustcorp.com.au>
|
||||
* (Inspired by David Howell's find_next_bit implementation)
|
||||
*
|
||||
* Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
|
||||
* size and improve performance, 2015.
|
||||
*/
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
/*
|
||||
* Common helper for find_bit() function family
|
||||
* @FETCH: The expression that fetches and pre-processes each word of bitmap(s)
|
||||
* @MUNGE: The expression that post-processes a word containing found bit (may be empty)
|
||||
* @size: The bitmap size in bits
|
||||
*/
|
||||
#define FIND_FIRST_BIT(FETCH, MUNGE, size) \
|
||||
({ \
|
||||
unsigned long idx, val, sz = (size); \
|
||||
\
|
||||
for (idx = 0; idx * BITS_PER_LONG < sz; idx++) { \
|
||||
val = (FETCH); \
|
||||
if (val) { \
|
||||
sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(val)), sz); \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
sz; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Common helper for find_next_bit() function family
|
||||
* @FETCH: The expression that fetches and pre-processes each word of bitmap(s)
|
||||
* @MUNGE: The expression that post-processes a word containing found bit (may be empty)
|
||||
* @size: The bitmap size in bits
|
||||
* @start: The bitnumber to start searching at
|
||||
*/
|
||||
#define FIND_NEXT_BIT(FETCH, MUNGE, size, start) \
|
||||
({ \
|
||||
unsigned long mask, idx, tmp, sz = (size), __start = (start); \
|
||||
\
|
||||
if (unlikely(__start >= sz)) \
|
||||
goto out; \
|
||||
\
|
||||
mask = MUNGE(BITMAP_FIRST_WORD_MASK(__start)); \
|
||||
idx = __start / BITS_PER_LONG; \
|
||||
\
|
||||
for (tmp = (FETCH) & mask; !tmp; tmp = (FETCH)) { \
|
||||
if ((idx + 1) * BITS_PER_LONG >= sz) \
|
||||
goto out; \
|
||||
idx++; \
|
||||
} \
|
||||
\
|
||||
sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(tmp)), sz); \
|
||||
out: \
|
||||
sz; \
|
||||
})
|
||||
|
||||
#ifndef find_first_bit
|
||||
/*
|
||||
* Find the first set bit in a memory region.
|
||||
*/
|
||||
unsigned long _find_first_bit(const unsigned long *addr, unsigned long size)
|
||||
{
|
||||
return FIND_FIRST_BIT(addr[idx], /* nop */, size);
|
||||
}
|
||||
EXPORT_SYMBOL(_find_first_bit);
|
||||
#endif
|
||||
|
||||
#ifndef find_first_zero_bit
|
||||
/*
|
||||
* Find the first cleared bit in a memory region.
|
||||
*/
|
||||
unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size)
|
||||
{
|
||||
return FIND_FIRST_BIT(~addr[idx], /* nop */, size);
|
||||
}
|
||||
EXPORT_SYMBOL(_find_first_zero_bit);
|
||||
#endif
|
||||
|
||||
#ifndef find_next_bit
|
||||
unsigned long _find_next_bit(const unsigned long *addr, unsigned long nbits, unsigned long start)
|
||||
{
|
||||
return FIND_NEXT_BIT(addr[idx], /* nop */, nbits, start);
|
||||
}
|
||||
EXPORT_SYMBOL(_find_next_bit);
|
||||
#endif
|
||||
|
||||
#ifndef find_next_zero_bit
|
||||
unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
|
||||
unsigned long start)
|
||||
{
|
||||
return FIND_NEXT_BIT(~addr[idx], /* nop */, nbits, start);
|
||||
}
|
||||
EXPORT_SYMBOL(_find_next_zero_bit);
|
||||
#endif
|
||||
|
||||
#ifndef find_last_bit
|
||||
unsigned long _find_last_bit(const unsigned long *addr, unsigned long size)
|
||||
{
|
||||
if (size) {
|
||||
unsigned long val = BITMAP_LAST_WORD_MASK(size);
|
||||
unsigned long idx = (size-1) / BITS_PER_LONG;
|
||||
|
||||
do {
|
||||
val &= addr[idx];
|
||||
if (val)
|
||||
return idx * BITS_PER_LONG + __fls(val);
|
||||
|
||||
val = ~0ul;
|
||||
} while (idx--);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
EXPORT_SYMBOL(_find_last_bit);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Wrapper functions matching sandbox's asm/bitops.h declarations.
|
||||
* These provide the expected function signatures for sandbox platform.
|
||||
*/
|
||||
int find_first_zero_bit(void *addr, unsigned int size)
|
||||
{
|
||||
return _find_first_zero_bit(addr, size);
|
||||
}
|
||||
EXPORT_SYMBOL(find_first_zero_bit);
|
||||
|
||||
int find_next_zero_bit(void *addr, int size, int offset)
|
||||
{
|
||||
return _find_next_zero_bit(addr, size, offset);
|
||||
}
|
||||
EXPORT_SYMBOL(find_next_zero_bit);
|
||||
20
lib/kmem_cache.c
Normal file
20
lib/kmem_cache.c
Normal file
@@ -0,0 +1,20 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* kmem_cache implementation for U-Boot
|
||||
*
|
||||
* Copyright 2025 Canonical Ltd
|
||||
* Written by Simon Glass <simon.glass@canonical.com>
|
||||
*/
|
||||
|
||||
#include <malloc.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
void kmem_cache_free(struct kmem_cache *cachep, void *obj)
|
||||
{
|
||||
free(obj);
|
||||
}
|
||||
|
||||
void kmem_cache_destroy(struct kmem_cache *cachep)
|
||||
{
|
||||
free(cachep);
|
||||
}
|
||||
Reference in New Issue
Block a user