@@ -92,6 +92,11 @@ static inline bool IS_LEASE(struct file_lock_core *flc)
#define IS_REMOTELCK(fl) (fl->fl_core.fl_pid <= 0)
+struct file_lock *file_lock(struct file_lock_core *flc)
+{
+ return container_of(flc, struct file_lock, fl_core);
+}
+
static bool lease_breaking(struct file_lock *fl)
{
return fl->fl_core.fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
@@ -677,31 +682,35 @@ static void locks_delete_global_blocked(struct file_lock_core *waiter)
*
* Must be called with blocked_lock_lock held.
*/
-static void __locks_delete_block(struct file_lock *waiter)
+static void __locks_delete_block(struct file_lock_core *waiter)
{
- locks_delete_global_blocked(&waiter->fl_core);
- list_del_init(&waiter->fl_core.fl_blocked_member);
+ locks_delete_global_blocked(waiter);
+ list_del_init(&waiter->fl_blocked_member);
}
-static void __locks_wake_up_blocks(struct file_lock *blocker)
+static void __locks_wake_up_blocks(struct file_lock_core *blocker)
{
- while (!list_empty(&blocker->fl_core.fl_blocked_requests)) {
- struct file_lock *waiter;
+ while (!list_empty(&blocker->fl_blocked_requests)) {
+ struct file_lock_core *waiter;
+ struct file_lock *fl;
+
+ waiter = list_first_entry(&blocker->fl_blocked_requests,
+ struct file_lock_core, fl_blocked_member);
- waiter = list_first_entry(&blocker->fl_core.fl_blocked_requests,
- struct file_lock, fl_core.fl_blocked_member);
+ fl = file_lock(waiter);
__locks_delete_block(waiter);
- if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
- waiter->fl_lmops->lm_notify(waiter);
+ if ((IS_POSIX(waiter) || IS_FLOCK(waiter)) &&
+ fl->fl_lmops && fl->fl_lmops->lm_notify)
+ fl->fl_lmops->lm_notify(fl);
else
- wake_up(&waiter->fl_core.fl_wait);
+ wake_up(&waiter->fl_wait);
/*
* The setting of fl_blocker to NULL marks the "done"
* point in deleting a block. Paired with acquire at the top
* of locks_delete_block().
*/
- smp_store_release(&waiter->fl_core.fl_blocker, NULL);
+ smp_store_release(&waiter->fl_blocker, NULL);
}
}
@@ -743,8 +752,8 @@ int locks_delete_block(struct file_lock *waiter)
spin_lock(&blocked_lock_lock);
if (waiter->fl_core.fl_blocker)
status = 0;
- __locks_wake_up_blocks(waiter);
- __locks_delete_block(waiter);
+ __locks_wake_up_blocks(&waiter->fl_core);
+ __locks_delete_block(&waiter->fl_core);
/*
* The setting of fl_blocker to NULL marks the "done" point in deleting
@@ -799,7 +808,7 @@ static void __locks_insert_block(struct file_lock *blocker,
* waiter, but might not conflict with blocker, or the requests
* and lock which block it. So they all need to be woken.
*/
- __locks_wake_up_blocks(waiter);
+ __locks_wake_up_blocks(&waiter->fl_core);
}
/* Must be called with flc_lock held. */
@@ -831,7 +840,7 @@ static void locks_wake_up_blocks(struct file_lock *blocker)
return;
spin_lock(&blocked_lock_lock);
- __locks_wake_up_blocks(blocker);
+ __locks_wake_up_blocks(&blocker->fl_core);
spin_unlock(&blocked_lock_lock);
}
@@ -1186,7 +1195,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
* Ensure that we don't find any locks blocked on this
* request during deadlock detection.
*/
- __locks_wake_up_blocks(request);
+ __locks_wake_up_blocks(&request->fl_core);
if (likely(!posix_locks_deadlock(request, fl))) {
error = FILE_LOCK_DEFERRED;
__locks_insert_block(fl, request,
Convert __locks_delete_block and __locks_wake_up_blocks to take a struct file_lock_core pointer. Note that to accomodate this, we need to add a new file_lock() wrapper to go from file_lock_core to file_lock. Signed-off-by: Jeff Layton <jlayton@kernel.org> --- fs/locks.c | 43 ++++++++++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 17 deletions(-)