Merge tag 'vfs-6.19-rc5.fixes' of gitolite.kernel.org:pub/scm/linux/kernel/git/vfs/vfs

Pull vfs fixes from Christian Brauner:

 - Remove incorrect __user annotation from struct xattr_args::value

 - Documentation fix: Add missing kernel-doc description for the @isnew
   parameter in ilookup5_nowait() to silence Sphinx warnings

 - Documentation fix: Fix kernel-doc comment for __start_dirop() - the
   function name in the comment was wrong and the @state parameter was
   undocumented

 - Replace dynamic folio_batch allocation with stack allocation in
   iomap_zero_range(). The dynamic allocation was problematic for
   ext4-on-iomap work (didn't handle allocation failure properly) and
   triggered lockdep complaints. Uses a flag instead to control batch
   usage

 - Re-add #ifdef guards around PIDFD_GET_<ns-type>_NAMESPACE ioctls.
   When a namespace type is disabled, ns->ops is NULL, causes crashes
   during inode eviction when closing the fd. The ifdefs were removed in
   a recent simplification but are still needed

 - Fixe a race where a folio could be unlocked before the trailing zeros
   (for EOF within the page) were written

 - Split out a dedicated lease_dispose_list() helper since lease code
   paths always know they're disposing of leases. Removes unnecessary
   runtime flag checks and prepares for upcoming lease_manager
   enhancements

 - Fix userland delegation requests succeeding despite conflicting
   opens. Previously, FL_LAYOUT and FL_DELEG leases bypassed conflict
   checks (a hack for nfsd). Adds new ->lm_open_conflict() lease_manager
   operation so userland delegations get proper conflict checking while
   nfsd can continue its own conflict handling

 - Fix LOOKUP_CACHED path lookups incorrectly falling through to the
   slow path. After legitimize_links() calls were conditionally elided,
   the routine would always fail with LOOKUP_CACHED regardless of
   whether there were any links. Now the flag is checked at the two
   callsites before calling legitimize_links()

 - Fix bug in media fd allocation in media_request_alloc()

 - Fix mismatched API calls in ecryptfs_mknod(): was calling
   end_removing() instead of end_creating() after
   ecryptfs_start_creating_dentry()

 - Fix dentry reference count leak in ecryptfs_mkdir(): a dget() of the
   lower parent dir was added but never dput()'d, causing BUG during
   lower filesystem unmount due to the still-in-use dentry

* tag 'vfs-6.19-rc5.fixes' of gitolite.kernel.org:pub/scm/linux/kernel/git/vfs/vfs:
  pidfs: protect PIDFD_GET_* ioctls() via ifdef
  ecryptfs: Release lower parent dentry after creating dir
  ecryptfs: Fix improper mknod pairing of start_creating()/end_removing()
  get rid of bogus __user in struct xattr_args::value
  VFS: fix __start_dirop() kernel-doc warnings
  fs: Describe @isnew parameter in ilookup5_nowait()
  fs: make sure to fail try_to_unlazy() and try_to_unlazy() for LOOKUP_CACHED
  netfs: Fix early read unlock of page with EOF in middle
  filelock: allow lease_managers to dictate what qualifies as a conflict
  filelock: add lease_dispose_list() helper
  iomap: replace folio_batch allocation with stack allocation
  media: mc: fix potential use-after-free in media_request_alloc()
This commit is contained in:
Linus Torvalds
2026-01-09 05:57:57 -10:00
16 changed files with 196 additions and 97 deletions

View File

@@ -416,6 +416,7 @@ lm_change yes no no
lm_breaker_owns_lease: yes no no
lm_lock_expirable yes no no
lm_expire_lock no no yes
lm_open_conflict yes no no
====================== ============= ================= =========
buffer_head

View File

@@ -315,12 +315,12 @@ int media_request_alloc(struct media_device *mdev, int *alloc_fd)
fd_prepare_file(fdf)->private_data = req;
*alloc_fd = fd_publish(fdf);
snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
atomic_inc_return(&mdev->request_id), *alloc_fd);
atomic_inc_return(&mdev->request_id), fd_prepare_fd(fdf));
dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
*alloc_fd = fd_publish(fdf);
return 0;
err_free_req:

View File

@@ -533,6 +533,7 @@ static struct dentry *ecryptfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
fsstack_copy_inode_size(dir, lower_dir);
set_nlink(dir, lower_dir->i_nlink);
out:
dput(lower_dir_dentry);
end_creating(lower_dentry);
if (d_really_is_negative(dentry))
d_drop(dentry);
@@ -584,7 +585,7 @@ ecryptfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
fsstack_copy_attr_times(dir, lower_dir);
fsstack_copy_inode_size(dir, lower_dir);
out:
end_removing(lower_dentry);
end_creating(lower_dentry);
if (d_really_is_negative(dentry))
d_drop(dentry);
return rc;

View File

@@ -1593,6 +1593,9 @@ EXPORT_SYMBOL(igrab);
* @hashval: hash value (usually inode number) to search for
* @test: callback used for comparisons between inodes
* @data: opaque data pointer to pass to @test
* @isnew: return argument telling whether I_NEW was set when
* the inode was found in hash (the caller needs to
* wait for I_NEW to clear)
*
* Search for the inode specified by @hashval and @data in the inode cache.
* If the inode is in the cache, the inode is returned with an incremented

View File

@@ -832,7 +832,7 @@ static struct folio *__iomap_get_folio(struct iomap_iter *iter,
if (!mapping_large_folio_support(iter->inode->i_mapping))
len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
if (iter->fbatch) {
if (iter->iomap.flags & IOMAP_F_FOLIO_BATCH) {
struct folio *folio = folio_batch_next(iter->fbatch);
if (!folio)
@@ -929,7 +929,7 @@ static int iomap_write_begin(struct iomap_iter *iter,
* process so return and let the caller iterate and refill the batch.
*/
if (!folio) {
WARN_ON_ONCE(!iter->fbatch);
WARN_ON_ONCE(!(iter->iomap.flags & IOMAP_F_FOLIO_BATCH));
return 0;
}
@@ -1544,23 +1544,39 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
return status;
}
loff_t
/**
* iomap_fill_dirty_folios - fill a folio batch with dirty folios
* @iter: Iteration structure
* @start: Start offset of range. Updated based on lookup progress.
* @end: End offset of range
* @iomap_flags: Flags to set on the associated iomap to track the batch.
*
* Returns the folio count directly. Also returns the associated control flag if
* the the batch lookup is performed and the expected offset of a subsequent
* lookup via out params. The caller is responsible to set the flag on the
* associated iomap.
*/
unsigned int
iomap_fill_dirty_folios(
struct iomap_iter *iter,
loff_t offset,
loff_t length)
loff_t *start,
loff_t end,
unsigned int *iomap_flags)
{
struct address_space *mapping = iter->inode->i_mapping;
pgoff_t start = offset >> PAGE_SHIFT;
pgoff_t end = (offset + length - 1) >> PAGE_SHIFT;
pgoff_t pstart = *start >> PAGE_SHIFT;
pgoff_t pend = (end - 1) >> PAGE_SHIFT;
unsigned int count;
iter->fbatch = kmalloc(sizeof(struct folio_batch), GFP_KERNEL);
if (!iter->fbatch)
return offset + length;
folio_batch_init(iter->fbatch);
if (!iter->fbatch) {
*start = end;
return 0;
}
filemap_get_folios_dirty(mapping, &start, end, iter->fbatch);
return (start << PAGE_SHIFT);
count = filemap_get_folios_dirty(mapping, &pstart, pend, iter->fbatch);
*start = (pstart << PAGE_SHIFT);
*iomap_flags |= IOMAP_F_FOLIO_BATCH;
return count;
}
EXPORT_SYMBOL_GPL(iomap_fill_dirty_folios);
@@ -1569,17 +1585,21 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops, void *private)
{
struct folio_batch fbatch;
struct iomap_iter iter = {
.inode = inode,
.pos = pos,
.len = len,
.flags = IOMAP_ZERO,
.private = private,
.fbatch = &fbatch,
};
struct address_space *mapping = inode->i_mapping;
int ret;
bool range_dirty;
folio_batch_init(&fbatch);
/*
* To avoid an unconditional flush, check pagecache state and only flush
* if dirty and the fs returns a mapping that might convert on
@@ -1590,11 +1610,11 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
while ((ret = iomap_iter(&iter, ops)) > 0) {
const struct iomap *srcmap = iomap_iter_srcmap(&iter);
if (WARN_ON_ONCE(iter.fbatch &&
if (WARN_ON_ONCE((iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
srcmap->type != IOMAP_UNWRITTEN))
return -EIO;
if (!iter.fbatch &&
if (!(iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
(srcmap->type == IOMAP_HOLE ||
srcmap->type == IOMAP_UNWRITTEN)) {
s64 status;

View File

@@ -8,10 +8,10 @@
static inline void iomap_iter_reset_iomap(struct iomap_iter *iter)
{
if (iter->fbatch) {
if (iter->iomap.flags & IOMAP_F_FOLIO_BATCH) {
folio_batch_release(iter->fbatch);
kfree(iter->fbatch);
iter->fbatch = NULL;
folio_batch_reinit(iter->fbatch);
iter->iomap.flags &= ~IOMAP_F_FOLIO_BATCH;
}
iter->status = 0;

View File

@@ -369,10 +369,19 @@ locks_dispose_list(struct list_head *dispose)
while (!list_empty(dispose)) {
flc = list_first_entry(dispose, struct file_lock_core, flc_list);
list_del_init(&flc->flc_list);
if (flc->flc_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
locks_free_lease(file_lease(flc));
else
locks_free_lock(file_lock(flc));
locks_free_lock(file_lock(flc));
}
}
static void
lease_dispose_list(struct list_head *dispose)
{
struct file_lock_core *flc;
while (!list_empty(dispose)) {
flc = list_first_entry(dispose, struct file_lock_core, flc_list);
list_del_init(&flc->flc_list);
locks_free_lease(file_lease(flc));
}
}
@@ -576,10 +585,50 @@ lease_setup(struct file_lease *fl, void **priv)
__f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
}
/**
* lease_open_conflict - see if the given file points to an inode that has
* an existing open that would conflict with the
* desired lease.
* @filp: file to check
* @arg: type of lease that we're trying to acquire
*
* Check to see if there's an existing open fd on this file that would
* conflict with the lease we're trying to set.
*/
static int
lease_open_conflict(struct file *filp, const int arg)
{
struct inode *inode = file_inode(filp);
int self_wcount = 0, self_rcount = 0;
if (arg == F_RDLCK)
return inode_is_open_for_write(inode) ? -EAGAIN : 0;
else if (arg != F_WRLCK)
return 0;
/*
* Make sure that only read/write count is from lease requestor.
* Note that this will result in denying write leases when i_writecount
* is negative, which is what we want. (We shouldn't grant write leases
* on files open for execution.)
*/
if (filp->f_mode & FMODE_WRITE)
self_wcount = 1;
else if (filp->f_mode & FMODE_READ)
self_rcount = 1;
if (atomic_read(&inode->i_writecount) != self_wcount ||
atomic_read(&inode->i_readcount) != self_rcount)
return -EAGAIN;
return 0;
}
static const struct lease_manager_operations lease_manager_ops = {
.lm_break = lease_break_callback,
.lm_change = lease_modify,
.lm_setup = lease_setup,
.lm_open_conflict = lease_open_conflict,
};
/*
@@ -1620,7 +1669,7 @@ restart:
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
lease_dispose_list(&dispose);
error = wait_event_interruptible_timeout(new_fl->c.flc_wait,
list_empty(&new_fl->c.flc_blocked_member),
break_time);
@@ -1643,7 +1692,7 @@ restart:
out:
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
lease_dispose_list(&dispose);
free_lock:
locks_free_lease(new_fl);
return error;
@@ -1727,7 +1776,7 @@ static int __fcntl_getlease(struct file *filp, unsigned int flavor)
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
lease_dispose_list(&dispose);
}
return type;
}
@@ -1745,52 +1794,6 @@ int fcntl_getdeleg(struct file *filp, struct delegation *deleg)
return 0;
}
/**
* check_conflicting_open - see if the given file points to an inode that has
* an existing open that would conflict with the
* desired lease.
* @filp: file to check
* @arg: type of lease that we're trying to acquire
* @flags: current lock flags
*
* Check to see if there's an existing open fd on this file that would
* conflict with the lease we're trying to set.
*/
static int
check_conflicting_open(struct file *filp, const int arg, int flags)
{
struct inode *inode = file_inode(filp);
int self_wcount = 0, self_rcount = 0;
if (flags & FL_LAYOUT)
return 0;
if (flags & FL_DELEG)
/* We leave these checks to the caller */
return 0;
if (arg == F_RDLCK)
return inode_is_open_for_write(inode) ? -EAGAIN : 0;
else if (arg != F_WRLCK)
return 0;
/*
* Make sure that only read/write count is from lease requestor.
* Note that this will result in denying write leases when i_writecount
* is negative, which is what we want. (We shouldn't grant write leases
* on files open for execution.)
*/
if (filp->f_mode & FMODE_WRITE)
self_wcount = 1;
else if (filp->f_mode & FMODE_READ)
self_rcount = 1;
if (atomic_read(&inode->i_writecount) != self_wcount ||
atomic_read(&inode->i_readcount) != self_rcount)
return -EAGAIN;
return 0;
}
static int
generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **priv)
{
@@ -1827,7 +1830,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **pr
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
error = check_conflicting_open(filp, arg, lease->c.flc_flags);
error = lease->fl_lmops->lm_open_conflict(filp, arg);
if (error)
goto out;
@@ -1884,7 +1887,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **pr
* precedes these checks.
*/
smp_mb();
error = check_conflicting_open(filp, arg, lease->c.flc_flags);
error = lease->fl_lmops->lm_open_conflict(filp, arg);
if (error) {
locks_unlink_lock_ctx(&lease->c);
goto out;
@@ -1896,7 +1899,7 @@ out_setup:
out:
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
lease_dispose_list(&dispose);
if (is_deleg)
inode_unlock(inode);
if (!error && !my_fl)
@@ -1932,7 +1935,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
lease_dispose_list(&dispose);
return error;
}
@@ -2735,7 +2738,7 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
lease_dispose_list(&dispose);
}
/*

View File

@@ -830,11 +830,9 @@ static inline bool legitimize_path(struct nameidata *nd,
static bool legitimize_links(struct nameidata *nd)
{
int i;
if (unlikely(nd->flags & LOOKUP_CACHED)) {
drop_links(nd);
nd->depth = 0;
return false;
}
VFS_BUG_ON(nd->flags & LOOKUP_CACHED);
for (i = 0; i < nd->depth; i++) {
struct saved *last = nd->stack + i;
if (unlikely(!legitimize_path(nd, &last->link, last->seq))) {
@@ -883,6 +881,11 @@ static bool try_to_unlazy(struct nameidata *nd)
BUG_ON(!(nd->flags & LOOKUP_RCU));
if (unlikely(nd->flags & LOOKUP_CACHED)) {
drop_links(nd);
nd->depth = 0;
goto out1;
}
if (unlikely(nd->depth && !legitimize_links(nd)))
goto out1;
if (unlikely(!legitimize_path(nd, &nd->path, nd->seq)))
@@ -918,6 +921,11 @@ static bool try_to_unlazy_next(struct nameidata *nd, struct dentry *dentry)
int res;
BUG_ON(!(nd->flags & LOOKUP_RCU));
if (unlikely(nd->flags & LOOKUP_CACHED)) {
drop_links(nd);
nd->depth = 0;
goto out2;
}
if (unlikely(nd->depth && !legitimize_links(nd)))
goto out2;
res = __legitimize_mnt(nd->path.mnt, nd->m_seq);
@@ -2836,10 +2844,11 @@ static int filename_parentat(int dfd, struct filename *name,
}
/**
* start_dirop - begin a create or remove dirop, performing locking and lookup
* __start_dirop - begin a create or remove dirop, performing locking and lookup
* @parent: the dentry of the parent in which the operation will occur
* @name: a qstr holding the name within that parent
* @lookup_flags: intent and other lookup flags.
* @state: task state bitmask
*
* The lookup is performed and necessary locks are taken so that, on success,
* the returned dentry can be operated on safely.

View File

@@ -137,7 +137,7 @@ static void netfs_read_unlock_folios(struct netfs_io_request *rreq,
rreq->front_folio_order = order;
fsize = PAGE_SIZE << order;
fpos = folio_pos(folio);
fend = umin(fpos + fsize, rreq->i_size);
fend = fpos + fsize;
trace_netfs_collect_folio(rreq, folio, fend, collected_to);

View File

@@ -764,9 +764,28 @@ nfsd4_layout_lm_change(struct file_lease *onlist, int arg,
return lease_modify(onlist, arg, dispose);
}
/**
* nfsd4_layout_lm_open_conflict - see if the given file points to an inode that has
* an existing open that would conflict with the
* desired lease.
* @filp: file to check
* @arg: type of lease that we're trying to acquire
*
* The kernel will call into this operation to determine whether there
* are conflicting opens that may prevent the layout from being granted.
* For nfsd, that check is done at a higher level, so this trivially
* returns 0.
*/
static int
nfsd4_layout_lm_open_conflict(struct file *filp, int arg)
{
return 0;
}
static const struct lease_manager_operations nfsd4_layouts_lm_ops = {
.lm_break = nfsd4_layout_lm_break,
.lm_change = nfsd4_layout_lm_change,
.lm_break = nfsd4_layout_lm_break,
.lm_change = nfsd4_layout_lm_change,
.lm_open_conflict = nfsd4_layout_lm_open_conflict,
};
int

View File

@@ -5555,10 +5555,29 @@ nfsd_change_deleg_cb(struct file_lease *onlist, int arg,
return -EAGAIN;
}
/**
* nfsd4_deleg_lm_open_conflict - see if the given file points to an inode that has
* an existing open that would conflict with the
* desired lease.
* @filp: file to check
* @arg: type of lease that we're trying to acquire
*
* The kernel will call into this operation to determine whether there
* are conflicting opens that may prevent the deleg from being granted.
* For nfsd, that check is done at a higher level, so this trivially
* returns 0.
*/
static int
nfsd4_deleg_lm_open_conflict(struct file *filp, int arg)
{
return 0;
}
static const struct lease_manager_operations nfsd_lease_mng_ops = {
.lm_breaker_owns_lease = nfsd_breaker_owns_lease,
.lm_break = nfsd_break_deleg_cb,
.lm_change = nfsd_change_deleg_cb,
.lm_open_conflict = nfsd4_deleg_lm_open_conflict,
};
static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)

View File

@@ -517,14 +517,18 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (cmd) {
/* Namespaces that hang of nsproxy. */
case PIDFD_GET_CGROUP_NAMESPACE:
#ifdef CONFIG_CGROUPS
if (!ns_ref_get(nsp->cgroup_ns))
break;
ns_common = to_ns_common(nsp->cgroup_ns);
#endif
break;
case PIDFD_GET_IPC_NAMESPACE:
#ifdef CONFIG_IPC_NS
if (!ns_ref_get(nsp->ipc_ns))
break;
ns_common = to_ns_common(nsp->ipc_ns);
#endif
break;
case PIDFD_GET_MNT_NAMESPACE:
if (!ns_ref_get(nsp->mnt_ns))
@@ -532,32 +536,43 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ns_common = to_ns_common(nsp->mnt_ns);
break;
case PIDFD_GET_NET_NAMESPACE:
#ifdef CONFIG_NET_NS
if (!ns_ref_get(nsp->net_ns))
break;
ns_common = to_ns_common(nsp->net_ns);
#endif
break;
case PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE:
#ifdef CONFIG_PID_NS
if (!ns_ref_get(nsp->pid_ns_for_children))
break;
ns_common = to_ns_common(nsp->pid_ns_for_children);
#endif
break;
case PIDFD_GET_TIME_NAMESPACE:
#ifdef CONFIG_TIME_NS
if (!ns_ref_get(nsp->time_ns))
break;
ns_common = to_ns_common(nsp->time_ns);
#endif
break;
case PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE:
#ifdef CONFIG_TIME_NS
if (!ns_ref_get(nsp->time_ns_for_children))
break;
ns_common = to_ns_common(nsp->time_ns_for_children);
#endif
break;
case PIDFD_GET_UTS_NAMESPACE:
#ifdef CONFIG_UTS_NS
if (!ns_ref_get(nsp->uts_ns))
break;
ns_common = to_ns_common(nsp->uts_ns);
#endif
break;
/* Namespaces that don't hang of nsproxy. */
case PIDFD_GET_USER_NAMESPACE:
#ifdef CONFIG_USER_NS
scoped_guard(rcu) {
struct user_namespace *user_ns;
@@ -566,8 +581,10 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
ns_common = to_ns_common(user_ns);
}
#endif
break;
case PIDFD_GET_PID_NAMESPACE:
#ifdef CONFIG_PID_NS
scoped_guard(rcu) {
struct pid_namespace *pid_ns;
@@ -576,6 +593,7 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
ns_common = to_ns_common(pid_ns);
}
#endif
break;
default:
return -ENOIOCTLCMD;

View File

@@ -1831,7 +1831,6 @@ xfs_buffered_write_iomap_begin(
*/
if (flags & IOMAP_ZERO) {
xfs_fileoff_t eof_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
u64 end;
if (isnullstartblock(imap.br_startblock) &&
offset_fsb >= eof_fsb)
@@ -1851,12 +1850,14 @@ xfs_buffered_write_iomap_begin(
*/
if (imap.br_state == XFS_EXT_UNWRITTEN &&
offset_fsb < eof_fsb) {
loff_t len = min(count,
XFS_FSB_TO_B(mp, imap.br_blockcount));
loff_t foffset = offset, fend;
end = iomap_fill_dirty_folios(iter, offset, len);
fend = offset +
min(count, XFS_FSB_TO_B(mp, imap.br_blockcount));
iomap_fill_dirty_folios(iter, &foffset, fend,
&iomap_flags);
end_fsb = min_t(xfs_fileoff_t, end_fsb,
XFS_B_TO_FSB(mp, end));
XFS_B_TO_FSB(mp, foffset));
}
xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);

View File

@@ -49,6 +49,7 @@ struct lease_manager_operations {
int (*lm_change)(struct file_lease *, int, struct list_head *);
void (*lm_setup)(struct file_lease *, void **);
bool (*lm_breaker_owns_lease)(struct file_lease *);
int (*lm_open_conflict)(struct file *, int);
};
struct lock_manager {

View File

@@ -88,6 +88,9 @@ struct vm_fault;
/*
* Flags set by the core iomap code during operations:
*
* IOMAP_F_FOLIO_BATCH indicates that the folio batch mechanism is active
* for this operation, set by iomap_fill_dirty_folios().
*
* IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
* has changed as the result of this write operation.
*
@@ -95,6 +98,7 @@ struct vm_fault;
* range it covers needs to be remapped by the high level before the operation
* can proceed.
*/
#define IOMAP_F_FOLIO_BATCH (1U << 13)
#define IOMAP_F_SIZE_CHANGED (1U << 14)
#define IOMAP_F_STALE (1U << 15)
@@ -352,8 +356,8 @@ bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops);
loff_t iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t offset,
loff_t length);
unsigned int iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t *start,
loff_t end, unsigned int *iomap_flags);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
bool *did_zero, const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops, void *private);

View File

@@ -23,7 +23,7 @@
#define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */
struct xattr_args {
__aligned_u64 __user value;
__aligned_u64 value;
__u32 size;
__u32 flags;
};