From 14359e854fec7fcab8e29bdd1377eda88c0b8e6b Mon Sep 17 00:00:00 2001 From: Peter Jung Date: Thu, 5 May 2022 20:28:06 +0200 Subject: [PATCH 15/17] futex-winesync Signed-off-by: Peter Jung --- Documentation/admin-guide/devices.txt | 3 +- Documentation/userspace-api/index.rst | 1 + .../userspace-api/ioctl/ioctl-number.rst.rej | 11 + Documentation/userspace-api/winesync.rst | 444 +++++ MAINTAINERS | 9 + drivers/misc/Kconfig | 11 + drivers/misc/Makefile | 1 + drivers/misc/winesync.c | 1212 ++++++++++++++ include/linux/miscdevice.h | 1 + include/uapi/linux/futex.h | 13 + include/uapi/linux/winesync.h | 71 + kernel/futex/syscalls.c | 75 +- tools/testing/selftests/Makefile | 1 + .../selftests/drivers/winesync/Makefile | 8 + .../testing/selftests/drivers/winesync/config | 1 + .../selftests/drivers/winesync/winesync.c | 1479 +++++++++++++++++ 16 files changed, 3339 insertions(+), 2 deletions(-) create mode 100644 Documentation/userspace-api/ioctl/ioctl-number.rst.rej create mode 100644 Documentation/userspace-api/winesync.rst create mode 100644 drivers/misc/winesync.c create mode 100644 include/uapi/linux/winesync.h create mode 100644 tools/testing/selftests/drivers/winesync/Makefile create mode 100644 tools/testing/selftests/drivers/winesync/config create mode 100644 tools/testing/selftests/drivers/winesync/winesync.c diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt index c07dc0ee860e..4e5abe508426 100644 --- a/Documentation/admin-guide/devices.txt +++ b/Documentation/admin-guide/devices.txt @@ -376,8 +376,9 @@ 240 = /dev/userio Serio driver testing device 241 = /dev/vhost-vsock Host kernel driver for virtio vsock 242 = /dev/rfkill Turning off radio transmissions (rfkill) + 243 = /dev/winesync Wine synchronization primitive device - 243-254 Reserved for local use + 244-254 Reserved for local use 255 Reserved for MISC_DYNAMIC_MINOR 11 char Raw keyboard device (Linux/SPARC only) diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst index a61eac0c73f8..0bf697ddcb09 100644 --- a/Documentation/userspace-api/index.rst +++ b/Documentation/userspace-api/index.rst @@ -29,6 +29,7 @@ place where this information is gathered. sysfs-platform_profile vduse futex2 + winesync .. only:: subproject and html diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst.rej b/Documentation/userspace-api/ioctl/ioctl-number.rst.rej new file mode 100644 index 000000000000..3b80cf1e7b53 --- /dev/null +++ b/Documentation/userspace-api/ioctl/ioctl-number.rst.rej @@ -0,0 +1,11 @@ +--- Documentation/userspace-api/ioctl/ioctl-number.rst ++++ Documentation/userspace-api/ioctl/ioctl-number.rst +@@ -371,6 +371,8 @@ Code Seq# Include File Comments + + 0xF6 all LTTng Linux Trace Toolkit Next Generation + ++0xF7 00-0F uapi/linux/winesync.h Wine synchronization primitives ++ + 0xF8 all arch/x86/include/uapi/asm/amd_hsmp.h AMD HSMP EPYC system management interface driver + + 0xFD all linux/dm-ioctl.h diff --git a/Documentation/userspace-api/winesync.rst b/Documentation/userspace-api/winesync.rst new file mode 100644 index 000000000000..f0110d2744c7 --- /dev/null +++ b/Documentation/userspace-api/winesync.rst @@ -0,0 +1,444 @@ +===================================== +Wine synchronization primitive driver +===================================== + +This page documents the user-space API for the winesync driver. + +winesync is a support driver for emulation of NT synchronization +primitives by the Wine project or other NT emulators. It exists +because implementation in user-space, using existing tools, cannot +simultaneously satisfy performance, correctness, and security +constraints. It is implemented entirely in software, and does not +drive any hardware device. + +This interface is meant as a compatibility tool only, and should not +be used for general synchronization. Instead use generic, versatile +interfaces such as futex(2) and poll(2). + +Synchronization primitives +========================== + +The winesync driver exposes three types of synchronization primitives: +semaphores, mutexes, and events. + +A semaphore holds a single volatile 32-bit counter, and a static +32-bit integer denoting the maximum value. It is considered signaled +when the counter is nonzero. The counter is decremented by one when a +wait is satisfied. Both the initial and maximum count are established +when the semaphore is created. + +A mutex holds a volatile 32-bit recursion count, and a volatile 32-bit +identifier denoting its owner. A mutex is considered signaled when its +owner is zero (indicating that it is not owned). The recursion count +is incremented when a wait is satisfied, and ownership is set to the +given identifier. + +A mutex also holds an internal flag denoting whether its previous +owner has died; such a mutex is said to be inconsistent. Owner death +is not tracked automatically based on thread death, but rather must be +communicated using ``WINESYNC_IOC_KILL_OWNER``. An inconsistent mutex +is inherently considered unowned. + +Except for the "unowned" semantics of zero, the actual value of the +owner identifier is not interpreted by the winesync driver at all. The +intended use is to store a thread identifier; however, the winesync +driver does not actually validate that a calling thread provides +consistent or unique identifiers. + +An event holds a volatile boolean state denoting whether it is +signaled or not. There are two types of events, auto-reset and +manual-reset. An auto-reset event is designaled when a wait is +satisfied; a manual-reset event is not. The event type is specified +when the event is created. + +Unless specified otherwise, all operations on an object are atomic and +totally ordered with respect to other operations on the same object. + +Objects are represented by unsigned 32-bit integers. + +Char device +=========== + +The winesync driver creates a single char device /dev/winesync. Each +file description opened on the device represents a unique namespace. +That is, objects created on one open file description are shared +across all its individual descriptors, but are not shared with other +open() calls on the same device. The same file description may be +shared across multiple processes. + +ioctl reference +=============== + +All operations on the device are done through ioctls. There are three +structures used in ioctl calls:: + + struct winesync_sem_args { + __u32 sem; + __u32 count; + __u32 max; + }; + + struct winesync_mutex_args { + __u32 mutex; + __u32 owner; + __u32 count; + }; + + struct winesync_event_args { + __u32 event; + __u32 signaled; + __u32 manual; + }; + + struct winesync_wait_args { + __u64 timeout; + __u64 objs; + __u32 count; + __u32 owner; + __u32 index; + __u32 pad; + }; + +Depending on the ioctl, members of the structure may be used as input, +output, or not at all. All ioctls return 0 on success. + +The ioctls are as follows: + +.. c:macro:: WINESYNC_IOC_CREATE_SEM + + Create a semaphore object. Takes a pointer to struct + :c:type:`winesync_sem_args`, which is used as follows: + + .. list-table:: + + * - ``sem`` + - On output, contains the identifier of the created semaphore. + * - ``count`` + - Initial count of the semaphore. + * - ``max`` + - Maximum count of the semaphore. + + Fails with ``EINVAL`` if ``count`` is greater than ``max``. + +.. c:macro:: WINESYNC_IOC_CREATE_MUTEX + + Create a mutex object. Takes a pointer to struct + :c:type:`winesync_mutex_args`, which is used as follows: + + .. list-table:: + + * - ``mutex`` + - On output, contains the identifier of the created mutex. + * - ``count`` + - Initial recursion count of the mutex. + * - ``owner`` + - Initial owner of the mutex. + + If ``owner`` is nonzero and ``count`` is zero, or if ``owner`` is + zero and ``count`` is nonzero, the function fails with ``EINVAL``. + +.. c:macro:: WINESYNC_IOC_CREATE_EVENT + + Create an event object. Takes a pointer to struct + :c:type:`winesync_event_args`, which is used as follows: + + .. list-table:: + + * - ``event`` + - On output, contains the identifier of the created event. + * - ``signaled`` + - If nonzero, the event is initially signaled, otherwise + nonsignaled. + * - ``manual`` + - If nonzero, the event is a manual-reset event, otherwise + auto-reset. + +.. c:macro:: WINESYNC_IOC_DELETE + + Delete an object of any type. Takes an input-only pointer to a + 32-bit integer denoting the object to delete. + + Wait ioctls currently in progress are not interrupted, and behave as + if the object remains valid. + +.. c:macro:: WINESYNC_IOC_PUT_SEM + + Post to a semaphore object. Takes a pointer to struct + :c:type:`winesync_sem_args`, which is used as follows: + + .. list-table:: + + * - ``sem`` + - Semaphore object to post to. + * - ``count`` + - Count to add to the semaphore. On output, contains the + previous count of the semaphore. + * - ``max`` + - Not used. + + If adding ``count`` to the semaphore's current count would raise the + latter past the semaphore's maximum count, the ioctl fails with + ``EOVERFLOW`` and the semaphore is not affected. If raising the + semaphore's count causes it to become signaled, eligible threads + waiting on this semaphore will be woken and the semaphore's count + decremented appropriately. + +.. c:macro:: WINESYNC_IOC_PUT_MUTEX + + Release a mutex object. Takes a pointer to struct + :c:type:`winesync_mutex_args`, which is used as follows: + + .. list-table:: + + * - ``mutex`` + - Mutex object to release. + * - ``owner`` + - Mutex owner identifier. + * - ``count`` + - On output, contains the previous recursion count. + + If ``owner`` is zero, the ioctl fails with ``EINVAL``. If ``owner`` + is not the current owner of the mutex, the ioctl fails with + ``EPERM``. + + The mutex's count will be decremented by one. If decrementing the + mutex's count causes it to become zero, the mutex is marked as + unowned and signaled, and eligible threads waiting on it will be + woken as appropriate. + +.. c:macro:: WINESYNC_IOC_SET_EVENT + + Signal an event object. Takes a pointer to struct + :c:type:`winesync_event_args`, which is used as follows: + + .. list-table:: + + * - ``event`` + - Event object to set. + * - ``signaled`` + - On output, contains the previous state of the event. + * - ``manual`` + - Unused. + + Eligible threads will be woken, and auto-reset events will be + designaled appropriately. + +.. c:macro:: WINESYNC_IOC_RESET_EVENT + + Designal an event object. Takes a pointer to struct + :c:type:`winesync_event_args`, which is used as follows: + + .. list-table:: + + * - ``event`` + - Event object to reset. + * - ``signaled`` + - On output, contains the previous state of the event. + * - ``manual`` + - Unused. + +.. c:macro:: WINESYNC_IOC_PULSE_EVENT + + Wake threads waiting on an event object without leaving it in a + signaled state. Takes a pointer to struct + :c:type:`winesync_event_args`, which is used as follows: + + .. list-table:: + + * - ``event`` + - Event object to pulse. + * - ``signaled`` + - On output, contains the previous state of the event. + * - ``manual`` + - Unused. + + A pulse operation can be thought of as a set followed by a reset, + performed as a single atomic operation. If two threads are waiting + on an auto-reset event which is pulsed, only one will be woken. If + two threads are waiting a manual-reset event which is pulsed, both + will be woken. However, in both cases, the event will be unsignaled + afterwards, and a simultaneous read operation will always report the + event as unsignaled. + +.. c:macro:: WINESYNC_IOC_READ_SEM + + Read the current state of a semaphore object. Takes a pointer to + struct :c:type:`winesync_sem_args`, which is used as follows: + + .. list-table:: + + * - ``sem`` + - Semaphore object to read. + * - ``count`` + - On output, contains the current count of the semaphore. + * - ``max`` + - On output, contains the maximum count of the semaphore. + +.. c:macro:: WINESYNC_IOC_READ_MUTEX + + Read the current state of a mutex object. Takes a pointer to struct + :c:type:`winesync_mutex_args`, which is used as follows: + + .. list-table:: + + * - ``mutex`` + - Mutex object to read. + * - ``owner`` + - On output, contains the current owner of the mutex, or zero + if the mutex is not currently owned. + * - ``count`` + - On output, contains the current recursion count of the mutex. + + If the mutex is marked as inconsistent, the function fails with + ``EOWNERDEAD``. In this case, ``count`` and ``owner`` are set to + zero. + +.. c:macro:: WINESYNC_IOC_READ_EVENT + + Read the current state of an event object. Takes a pointer to struct + :c:type:`winesync_event_args`, which is used as follows: + + .. list-table:: + + * - ``event`` + - Event object. + * - ``signaled`` + - On output, contains the current state of the event. + * - ``manual`` + - On output, contains 1 if the event is a manual-reset event, + and 0 otherwise. + +.. c:macro:: WINESYNC_IOC_KILL_OWNER + + Mark any mutexes owned by the given owner as unowned and + inconsistent. Takes an input-only pointer to a 32-bit integer + denoting the owner. If the owner is zero, the ioctl fails with + ``EINVAL``. + + For each mutex currently owned by the given owner, eligible threads + waiting on said mutex will be woken as appropriate (and such waits + will fail with ``EOWNERDEAD``, as described below). + + The operation as a whole is not atomic; however, the modification of + each mutex is atomic and totally ordered with respect to other + operations on the same mutex. + +.. c:macro:: WINESYNC_IOC_WAIT_ANY + + Poll on any of a list of objects, atomically acquiring at most one. + Takes a pointer to struct :c:type:`winesync_wait_args`, which is + used as follows: + + .. list-table:: + + * - ``timeout`` + - Optional pointer to a 64-bit struct :c:type:`timespec` + (specified as an integer so that the structure has the same + size regardless of architecture). The timeout is specified in + absolute format, as measured against the MONOTONIC clock. If + the timeout is equal to or earlier than the current time, the + function returns immediately without sleeping. If ``timeout`` + is zero, i.e. NULL, the function will sleep until an object + is signaled, and will not fail with ``ETIMEDOUT``. + * - ``objs`` + - Pointer to an array of ``count`` 32-bit object identifiers + (specified as an integer so that the structure has the same + size regardless of architecture). If any identifier is + invalid, the function fails with ``EINVAL``. + * - ``count`` + - Number of object identifiers specified in the ``objs`` array. + * - ``owner`` + - Mutex owner identifier. If any object in ``objs`` is a mutex, + the ioctl will attempt to acquire that mutex on behalf of + ``owner``. If ``owner`` is zero, the ioctl fails with + ``EINVAL``. + * - ``index`` + - On success, contains the index (into ``objs``) of the object + which was signaled. If ``alert`` was signaled instead, + this contains ``count``. + * - ``alert`` + - Optional event object identifier. If nonzero, this specifies + an "alert" event object which, if signaled, will terminate + the wait. If nonzero, the identifier must point to a valid + event. + + This function attempts to acquire one of the given objects. If + unable to do so, it sleeps until an object becomes signaled, + subsequently acquiring it, or the timeout expires. In the latter + case the ioctl fails with ``ETIMEDOUT``. The function only acquires + one object, even if multiple objects are signaled. + + A semaphore is considered to be signaled if its count is nonzero, + and is acquired by decrementing its count by one. A mutex is + considered to be signaled if it is unowned or if its owner matches + the ``owner`` argument, and is acquired by incrementing its + recursion count by one and setting its owner to the ``owner`` + argument. An auto-reset event is acquired by designaling it; a + manual-reset event is not affected by acquisition. + + Acquisition is atomic and totally ordered with respect to other + operations on the same object. If two wait operations (with + different ``owner`` identifiers) are queued on the same mutex, only + one is signaled. If two wait operations are queued on the same + semaphore, and a value of one is posted to it, only one is signaled. + The order in which threads are signaled is not specified. + + If an inconsistent mutex is acquired, the ioctl fails with + ``EOWNERDEAD``. Although this is a failure return, the function may + otherwise be considered successful. The mutex is marked as owned by + the given owner (with a recursion count of 1) and as no longer + inconsistent, and ``index`` is still set to the index of the mutex. + + The ``alert`` argument is an "extra" event which can terminate the + wait, independently of all other objects. If members of ``objs`` and + ``alert`` are both simultaneously signaled, a member of ``objs`` + will always be given priority and acquired first. Aside from this, + for "any" waits, there is no difference between passing an event as + this parameter, and passing it as an additional object at the end of + the ``objs`` array. For "all" waits, there is an additional + difference, as described below. + + It is valid to pass the same object more than once, including by + passing the same event in the ``objs`` array and in ``alert``. If a + wakeup occurs due to that object being signaled, ``index`` is set to + the lowest index corresponding to that object. + + The function may fail with ``EINTR`` if a signal is received. + +.. c:macro:: WINESYNC_IOC_WAIT_ALL + + Poll on a list of objects, atomically acquiring all of them. Takes a + pointer to struct :c:type:`winesync_wait_args`, which is used + identically to ``WINESYNC_IOC_WAIT_ANY``, except that ``index`` is + always filled with zero on success if not woken via alert. + + This function attempts to simultaneously acquire all of the given + objects. If unable to do so, it sleeps until all objects become + simultaneously signaled, subsequently acquiring them, or the timeout + expires. In the latter case the ioctl fails with ``ETIMEDOUT`` and + no objects are modified. + + Objects may become signaled and subsequently designaled (through + acquisition by other threads) while this thread is sleeping. Only + once all objects are simultaneously signaled does the ioctl acquire + them and return. The entire acquisition is atomic and totally + ordered with respect to other operations on any of the given + objects. + + If an inconsistent mutex is acquired, the ioctl fails with + ``EOWNERDEAD``. Similarly to ``WINESYNC_IOC_WAIT_ANY``, all objects + are nevertheless marked as acquired. Note that if multiple mutex + objects are specified, there is no way to know which were marked as + inconsistent. + + As with "any" waits, the ``alert`` argument is an "extra" event + which can terminate the wait. Critically, however, an "all" wait + will succeed if all members in ``objs`` are signaled, *or* if + ``alert`` is signaled. In the latter case ``index`` will be set to + ``count``. As with "any" waits, if both conditions are filled, the + former takes priority, and objects in ``objs`` will be acquired. + + Unlike ``WINESYNC_IOC_WAIT_ANY``, it is not valid to pass the same + object more than once, nor is it valid to pass the same object in + ``objs`` and in ``alert`` If this is attempted, the function fails + with ``EINVAL``. diff --git a/MAINTAINERS b/MAINTAINERS index febbbd60e91f..aebac72e80dd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -20859,6 +20859,15 @@ M: David Härdeman S: Maintained F: drivers/media/rc/winbond-cir.c +WINESYNC SYNCHRONIZATION PRIMITIVE DRIVER +M: Zebediah Figura +L: wine-devel@winehq.org +S: Supported +F: Documentation/userspace-api/winesync.rst +F: drivers/misc/winesync.c +F: include/uapi/linux/winesync.h +F: tools/testing/selftests/drivers/winesync/ + WINSYSTEMS EBC-C384 WATCHDOG DRIVER M: William Breathitt Gray L: linux-watchdog@vger.kernel.org diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 0f5a49fc7c9e..e21e4424d6a2 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -470,6 +470,17 @@ config HISI_HIKEY_USB switching between the dual-role USB-C port and the USB-A host ports using only one USB controller. +config WINESYNC + tristate "Synchronization primitives for Wine" + help + This module provides kernel support for synchronization primitives + used by Wine. It is not a hardware driver. + + To compile this driver as a module, choose M here: the + module will be called winesync. + + If unsure, say N. + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index a086197af544..1fb39bc4637b 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -58,4 +58,5 @@ obj-$(CONFIG_HABANA_AI) += habanalabs/ obj-$(CONFIG_UACCE) += uacce/ obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o obj-$(CONFIG_HISI_HIKEY_USB) += hisi_hikey_usb.o +obj-$(CONFIG_WINESYNC) += winesync.o obj-$(CONFIG_HI6421V600_IRQ) += hi6421v600-irq.o diff --git a/drivers/misc/winesync.c b/drivers/misc/winesync.c new file mode 100644 index 000000000000..7a28f58dbbf2 --- /dev/null +++ b/drivers/misc/winesync.c @@ -0,0 +1,1212 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * winesync.c - Kernel driver for Wine synchronization primitives + * + * Copyright (C) 2021 Zebediah Figura + */ + +#include +#include +#include +#include +#include +#include + +#define WINESYNC_NAME "winesync" + +enum winesync_type { + WINESYNC_TYPE_SEM, + WINESYNC_TYPE_MUTEX, + WINESYNC_TYPE_EVENT, +}; + +struct winesync_obj { + struct rcu_head rhead; + struct kref refcount; + spinlock_t lock; + + /* + * any_waiters is protected by the object lock, but all_waiters is + * protected by the device wait_all_lock. + */ + struct list_head any_waiters; + struct list_head all_waiters; + + /* + * Hint describing how many tasks are queued on this object in a + * wait-all operation. + * + * Any time we do a wake, we may need to wake "all" waiters as well as + * "any" waiters. In order to atomically wake "all" waiters, we must + * lock all of the objects, and that means grabbing the wait_all_lock + * below (and, due to lock ordering rules, before locking this object). + * However, wait-all is a rare operation, and grabbing the wait-all + * lock for every wake would create unnecessary contention. Therefore we + * first check whether all_hint is zero, and, if it is, we skip trying + * to wake "all" waiters. + * + * This hint isn't protected by any lock. It might change during the + * course of a wake, but there's no meaningful race there; it's only a + * hint. + * + * Since wait requests must originate from user-space threads, we're + * limited here by PID_MAX_LIMIT, so there's no risk of saturation. + */ + atomic_t all_hint; + + enum winesync_type type; + + /* The following fields are protected by the object lock. */ + union { + struct { + __u32 count; + __u32 max; + } sem; + struct { + __u32 count; + __u32 owner; + bool ownerdead; + } mutex; + struct { + bool manual; + bool signaled; + } event; + } u; +}; + +struct winesync_q_entry { + struct list_head node; + struct winesync_q *q; + struct winesync_obj *obj; + __u32 index; +}; + +struct winesync_q { + struct task_struct *task; + __u32 owner; + + /* + * Protected via atomic_cmpxchg(). Only the thread that wins the + * compare-and-swap may actually change object states and wake this + * task. + */ + atomic_t signaled; + + bool all; + bool ownerdead; + __u32 count; + struct winesync_q_entry entries[]; +}; + +struct winesync_device { + /* + * Wait-all operations must atomically grab all objects, and be totally + * ordered with respect to each other and wait-any operations. If one + * thread is trying to acquire several objects, another thread cannot + * touch the object at the same time. + * + * We achieve this by grabbing multiple object locks at the same time. + * However, this creates a lock ordering problem. To solve that problem, + * wait_all_lock is taken first whenever multiple objects must be locked + * at the same time. + */ + spinlock_t wait_all_lock; + + struct xarray objects; +}; + +static struct winesync_obj *get_obj(struct winesync_device *dev, __u32 id) +{ + struct winesync_obj *obj; + + rcu_read_lock(); + obj = xa_load(&dev->objects, id); + if (obj && !kref_get_unless_zero(&obj->refcount)) + obj = NULL; + rcu_read_unlock(); + + return obj; +} + +static void destroy_obj(struct kref *ref) +{ + struct winesync_obj *obj = container_of(ref, struct winesync_obj, refcount); + + kfree_rcu(obj, rhead); +} + +static void put_obj(struct winesync_obj *obj) +{ + kref_put(&obj->refcount, destroy_obj); +} + +static struct winesync_obj *get_obj_typed(struct winesync_device *dev, __u32 id, + enum winesync_type type) +{ + struct winesync_obj *obj = get_obj(dev, id); + + if (obj && obj->type != type) { + put_obj(obj); + return NULL; + } + return obj; +} + +static int winesync_char_open(struct inode *inode, struct file *file) +{ + struct winesync_device *dev; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + spin_lock_init(&dev->wait_all_lock); + + xa_init_flags(&dev->objects, XA_FLAGS_ALLOC); + + file->private_data = dev; + return nonseekable_open(inode, file); +} + +static int winesync_char_release(struct inode *inode, struct file *file) +{ + struct winesync_device *dev = file->private_data; + struct winesync_obj *obj; + unsigned long id; + + xa_for_each(&dev->objects, id, obj) + put_obj(obj); + + xa_destroy(&dev->objects); + + kfree(dev); + + return 0; +} + +static void init_obj(struct winesync_obj *obj) +{ + kref_init(&obj->refcount); + atomic_set(&obj->all_hint, 0); + spin_lock_init(&obj->lock); + INIT_LIST_HEAD(&obj->any_waiters); + INIT_LIST_HEAD(&obj->all_waiters); +} + +static bool is_signaled(struct winesync_obj *obj, __u32 owner) +{ + lockdep_assert_held(&obj->lock); + + switch (obj->type) { + case WINESYNC_TYPE_SEM: + return !!obj->u.sem.count; + case WINESYNC_TYPE_MUTEX: + if (obj->u.mutex.owner && obj->u.mutex.owner != owner) + return false; + return obj->u.mutex.count < UINT_MAX; + case WINESYNC_TYPE_EVENT: + return obj->u.event.signaled; + } + + WARN(1, "bad object type %#x\n", obj->type); + return false; +} + +/* + * "locked_obj" is an optional pointer to an object which is already locked and + * should not be locked again. This is necessary so that changing an object's + * state and waking it can be a single atomic operation. + */ +static void try_wake_all(struct winesync_device *dev, struct winesync_q *q, + struct winesync_obj *locked_obj) +{ + __u32 count = q->count; + bool can_wake = true; + __u32 i; + + lockdep_assert_held(&dev->wait_all_lock); + if (locked_obj) + lockdep_assert_held(&locked_obj->lock); + + for (i = 0; i < count; i++) { + if (q->entries[i].obj != locked_obj) + spin_lock(&q->entries[i].obj->lock); + } + + for (i = 0; i < count; i++) { + if (!is_signaled(q->entries[i].obj, q->owner)) { + can_wake = false; + break; + } + } + + if (can_wake && atomic_cmpxchg(&q->signaled, -1, 0) == -1) { + for (i = 0; i < count; i++) { + struct winesync_obj *obj = q->entries[i].obj; + + switch (obj->type) { + case WINESYNC_TYPE_SEM: + obj->u.sem.count--; + break; + case WINESYNC_TYPE_MUTEX: + if (obj->u.mutex.ownerdead) + q->ownerdead = true; + obj->u.mutex.ownerdead = false; + obj->u.mutex.count++; + obj->u.mutex.owner = q->owner; + break; + case WINESYNC_TYPE_EVENT: + if (!obj->u.event.manual) + obj->u.event.signaled = false; + break; + } + } + wake_up_process(q->task); + } + + for (i = 0; i < count; i++) { + if (q->entries[i].obj != locked_obj) + spin_unlock(&q->entries[i].obj->lock); + } +} + +static void try_wake_all_obj(struct winesync_device *dev, + struct winesync_obj *obj) +{ + struct winesync_q_entry *entry; + + lockdep_assert_held(&dev->wait_all_lock); + lockdep_assert_held(&obj->lock); + + list_for_each_entry(entry, &obj->all_waiters, node) + try_wake_all(dev, entry->q, obj); +} + +static void try_wake_any_sem(struct winesync_obj *sem) +{ + struct winesync_q_entry *entry; + + lockdep_assert_held(&sem->lock); + + list_for_each_entry(entry, &sem->any_waiters, node) { + struct winesync_q *q = entry->q; + + if (!sem->u.sem.count) + break; + + if (atomic_cmpxchg(&q->signaled, -1, entry->index) == -1) { + sem->u.sem.count--; + wake_up_process(q->task); + } + } +} + +static void try_wake_any_mutex(struct winesync_obj *mutex) +{ + struct winesync_q_entry *entry; + + lockdep_assert_held(&mutex->lock); + + list_for_each_entry(entry, &mutex->any_waiters, node) { + struct winesync_q *q = entry->q; + + if (mutex->u.mutex.count == UINT_MAX) + break; + if (mutex->u.mutex.owner && mutex->u.mutex.owner != q->owner) + continue; + + if (atomic_cmpxchg(&q->signaled, -1, entry->index) == -1) { + if (mutex->u.mutex.ownerdead) + q->ownerdead = true; + mutex->u.mutex.ownerdead = false; + mutex->u.mutex.count++; + mutex->u.mutex.owner = q->owner; + wake_up_process(q->task); + } + } +} + +static void try_wake_any_event(struct winesync_obj *event) +{ + struct winesync_q_entry *entry; + + lockdep_assert_held(&event->lock); + + list_for_each_entry(entry, &event->any_waiters, node) { + struct winesync_q *q = entry->q; + + if (!event->u.event.signaled) + break; + + if (atomic_cmpxchg(&q->signaled, -1, entry->index) == -1) { + if (!event->u.event.manual) + event->u.event.signaled = false; + wake_up_process(q->task); + } + } +} + +static int winesync_create_sem(struct winesync_device *dev, void __user *argp) +{ + struct winesync_sem_args __user *user_args = argp; + struct winesync_sem_args args; + struct winesync_obj *sem; + __u32 id; + int ret; + + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + + if (args.count > args.max) + return -EINVAL; + + sem = kzalloc(sizeof(*sem), GFP_KERNEL); + if (!sem) + return -ENOMEM; + + init_obj(sem); + sem->type = WINESYNC_TYPE_SEM; + sem->u.sem.count = args.count; + sem->u.sem.max = args.max; + + ret = xa_alloc(&dev->objects, &id, sem, xa_limit_32b, GFP_KERNEL); + if (ret < 0) { + kfree(sem); + return ret; + } + + return put_user(id, &user_args->sem); +} + +static int winesync_create_mutex(struct winesync_device *dev, void __user *argp) +{ + struct winesync_mutex_args __user *user_args = argp; + struct winesync_mutex_args args; + struct winesync_obj *mutex; + __u32 id; + int ret; + + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + + if (!args.owner != !args.count) + return -EINVAL; + + mutex = kzalloc(sizeof(*mutex), GFP_KERNEL); + if (!mutex) + return -ENOMEM; + + init_obj(mutex); + mutex->type = WINESYNC_TYPE_MUTEX; + mutex->u.mutex.count = args.count; + mutex->u.mutex.owner = args.owner; + + ret = xa_alloc(&dev->objects, &id, mutex, xa_limit_32b, GFP_KERNEL); + if (ret < 0) { + kfree(mutex); + return ret; + } + + return put_user(id, &user_args->mutex); +} + +static int winesync_create_event(struct winesync_device *dev, void __user *argp) +{ + struct winesync_event_args __user *user_args = argp; + struct winesync_event_args args; + struct winesync_obj *event; + __u32 id; + int ret; + + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) + return -ENOMEM; + + init_obj(event); + event->type = WINESYNC_TYPE_EVENT; + event->u.event.manual = args.manual; + event->u.event.signaled = args.signaled; + + ret = xa_alloc(&dev->objects, &id, event, xa_limit_32b, GFP_KERNEL); + if (ret < 0) { + kfree(event); + return ret; + } + + return put_user(id, &user_args->event); +} + +static int winesync_delete(struct winesync_device *dev, void __user *argp) +{ + struct winesync_obj *obj; + __u32 id; + + if (get_user(id, (__u32 __user *)argp)) + return -EFAULT; + + obj = xa_erase(&dev->objects, id); + if (!obj) + return -EINVAL; + + put_obj(obj); + return 0; +} + +/* + * Actually change the semaphore state, returning -EOVERFLOW if it is made + * invalid. + */ +static int put_sem_state(struct winesync_obj *sem, __u32 count) +{ + lockdep_assert_held(&sem->lock); + + if (sem->u.sem.count + count < sem->u.sem.count || + sem->u.sem.count + count > sem->u.sem.max) + return -EOVERFLOW; + + sem->u.sem.count += count; + return 0; +} + +static int winesync_put_sem(struct winesync_device *dev, void __user *argp) +{ + struct winesync_sem_args __user *user_args = argp; + struct winesync_sem_args args; + struct winesync_obj *sem; + __u32 prev_count; + int ret; + + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + + sem = get_obj_typed(dev, args.sem, WINESYNC_TYPE_SEM); + if (!sem) + return -EINVAL; + + if (atomic_read(&sem->all_hint) > 0) { + spin_lock(&dev->wait_all_lock); + spin_lock(&sem->lock); + + prev_count = sem->u.sem.count; + ret = put_sem_state(sem, args.count); + if (!ret) { + try_wake_all_obj(dev, sem); + try_wake_any_sem(sem); + } + + spin_unlock(&sem->lock); + spin_unlock(&dev->wait_all_lock); + } else { + spin_lock(&sem->lock); + + prev_count = sem->u.sem.count; + ret = put_sem_state(sem, args.count); + if (!ret) + try_wake_any_sem(sem); + + spin_unlock(&sem->lock); + } + + put_obj(sem); + + if (!ret && put_user(prev_count, &user_args->count)) + ret = -EFAULT; + + return ret; +} + +/* + * Actually change the mutex state, returning -EPERM if not the owner. + */ +static int put_mutex_state(struct winesync_obj *mutex, + const struct winesync_mutex_args *args) +{ + lockdep_assert_held(&mutex->lock); + + if (mutex->u.mutex.owner != args->owner) + return -EPERM; + + if (!--mutex->u.mutex.count) + mutex->u.mutex.owner = 0; + return 0; +} + +static int winesync_put_mutex(struct winesync_device *dev, void __user *argp) +{ + struct winesync_mutex_args __user *user_args = argp; + struct winesync_mutex_args args; + struct winesync_obj *mutex; + __u32 prev_count; + int ret; + + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + if (!args.owner) + return -EINVAL; + + mutex = get_obj_typed(dev, args.mutex, WINESYNC_TYPE_MUTEX); + if (!mutex) + return -EINVAL; + + if (atomic_read(&mutex->all_hint) > 0) { + spin_lock(&dev->wait_all_lock); + spin_lock(&mutex->lock); + + prev_count = mutex->u.mutex.count; + ret = put_mutex_state(mutex, &args); + if (!ret) { + try_wake_all_obj(dev, mutex); + try_wake_any_mutex(mutex); + } + + spin_unlock(&mutex->lock); + spin_unlock(&dev->wait_all_lock); + } else { + spin_lock(&mutex->lock); + + prev_count = mutex->u.mutex.count; + ret = put_mutex_state(mutex, &args); + if (!ret) + try_wake_any_mutex(mutex); + + spin_unlock(&mutex->lock); + } + + put_obj(mutex); + + if (!ret && put_user(prev_count, &user_args->count)) + ret = -EFAULT; + + return ret; +} + +static int winesync_read_sem(struct winesync_device *dev, void __user *argp) +{ + struct winesync_sem_args __user *user_args = argp; + struct winesync_sem_args args; + struct winesync_obj *sem; + __u32 id; + + if (get_user(id, &user_args->sem)) + return -EFAULT; + + sem = get_obj_typed(dev, id, WINESYNC_TYPE_SEM); + if (!sem) + return -EINVAL; + + args.sem = id; + spin_lock(&sem->lock); + args.count = sem->u.sem.count; + args.max = sem->u.sem.max; + spin_unlock(&sem->lock); + + put_obj(sem); + + if (copy_to_user(user_args, &args, sizeof(args))) + return -EFAULT; + return 0; +} + +static int winesync_read_mutex(struct winesync_device *dev, void __user *argp) +{ + struct winesync_mutex_args __user *user_args = argp; + struct winesync_mutex_args args; + struct winesync_obj *mutex; + __u32 id; + int ret; + + if (get_user(id, &user_args->mutex)) + return -EFAULT; + + mutex = get_obj_typed(dev, id, WINESYNC_TYPE_MUTEX); + if (!mutex) + return -EINVAL; + + args.mutex = id; + spin_lock(&mutex->lock); + args.count = mutex->u.mutex.count; + args.owner = mutex->u.mutex.owner; + ret = mutex->u.mutex.ownerdead ? -EOWNERDEAD : 0; + spin_unlock(&mutex->lock); + + put_obj(mutex); + + if (copy_to_user(user_args, &args, sizeof(args))) + return -EFAULT; + return ret; +} + +static int winesync_read_event(struct winesync_device *dev, void __user *argp) +{ + struct winesync_event_args __user *user_args = argp; + struct winesync_event_args args; + struct winesync_obj *event; + __u32 id; + + if (get_user(id, &user_args->event)) + return -EFAULT; + + event = get_obj_typed(dev, id, WINESYNC_TYPE_EVENT); + if (!event) + return -EINVAL; + + args.event = id; + spin_lock(&event->lock); + args.manual = event->u.event.manual; + args.signaled = event->u.event.signaled; + spin_unlock(&event->lock); + + put_obj(event); + + if (copy_to_user(user_args, &args, sizeof(args))) + return -EFAULT; + return 0; +} + +/* + * Actually change the mutex state to mark its owner as dead. + */ +static void put_mutex_ownerdead_state(struct winesync_obj *mutex) +{ + lockdep_assert_held(&mutex->lock); + + mutex->u.mutex.ownerdead = true; + mutex->u.mutex.owner = 0; + mutex->u.mutex.count = 0; +} + +static int winesync_kill_owner(struct winesync_device *dev, void __user *argp) +{ + struct winesync_obj *obj; + unsigned long id; + __u32 owner; + + if (get_user(owner, (__u32 __user *)argp)) + return -EFAULT; + if (!owner) + return -EINVAL; + + rcu_read_lock(); + + xa_for_each(&dev->objects, id, obj) { + if (!kref_get_unless_zero(&obj->refcount)) + continue; + + if (obj->type != WINESYNC_TYPE_MUTEX) { + put_obj(obj); + continue; + } + + if (atomic_read(&obj->all_hint) > 0) { + spin_lock(&dev->wait_all_lock); + spin_lock(&obj->lock); + + if (obj->u.mutex.owner == owner) { + put_mutex_ownerdead_state(obj); + try_wake_all_obj(dev, obj); + try_wake_any_mutex(obj); + } + + spin_unlock(&obj->lock); + spin_unlock(&dev->wait_all_lock); + } else { + spin_lock(&obj->lock); + + if (obj->u.mutex.owner == owner) { + put_mutex_ownerdead_state(obj); + try_wake_any_mutex(obj); + } + + spin_unlock(&obj->lock); + } + + put_obj(obj); + } + + rcu_read_unlock(); + + return 0; +} + +static int winesync_set_event(struct winesync_device *dev, void __user *argp, + bool pulse) +{ + struct winesync_event_args __user *user_args = argp; + struct winesync_event_args args; + struct winesync_obj *event; + bool prev_state; + + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + + event = get_obj_typed(dev, args.event, WINESYNC_TYPE_EVENT); + if (!event) + return -EINVAL; + + if (atomic_read(&event->all_hint) > 0) { + spin_lock(&dev->wait_all_lock); + spin_lock(&event->lock); + + prev_state = event->u.event.signaled; + event->u.event.signaled = true; + try_wake_all_obj(dev, event); + try_wake_any_event(event); + if (pulse) + event->u.event.signaled = false; + + spin_unlock(&event->lock); + spin_unlock(&dev->wait_all_lock); + } else { + spin_lock(&event->lock); + + prev_state = event->u.event.signaled; + event->u.event.signaled = true; + try_wake_any_event(event); + if (pulse) + event->u.event.signaled = false; + + spin_unlock(&event->lock); + } + + put_obj(event); + + if (put_user(prev_state, &user_args->signaled)) + return -EFAULT; + + return 0; +} + +static int winesync_reset_event(struct winesync_device *dev, void __user *argp) +{ + struct winesync_event_args __user *user_args = argp; + struct winesync_event_args args; + struct winesync_obj *event; + bool prev_state; + + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + + event = get_obj_typed(dev, args.event, WINESYNC_TYPE_EVENT); + if (!event) + return -EINVAL; + + spin_lock(&event->lock); + + prev_state = event->u.event.signaled; + event->u.event.signaled = false; + + spin_unlock(&event->lock); + + put_obj(event); + + if (put_user(prev_state, &user_args->signaled)) + return -EFAULT; + + return 0; +} + +static int winesync_schedule(const struct winesync_q *q, ktime_t *timeout) +{ + int ret = 0; + + do { + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + + set_current_state(TASK_INTERRUPTIBLE); + if (atomic_read(&q->signaled) != -1) { + ret = 0; + break; + } + ret = schedule_hrtimeout(timeout, HRTIMER_MODE_ABS); + } while (ret < 0); + __set_current_state(TASK_RUNNING); + + return ret; +} + +/* + * Allocate and initialize the winesync_q structure, but do not queue us yet. + * Also, calculate the relative timeout. + */ +static int setup_wait(struct winesync_device *dev, + const struct winesync_wait_args *args, bool all, + ktime_t *ret_timeout, struct winesync_q **ret_q) +{ + const __u32 count = args->count; + struct winesync_q *q; + ktime_t timeout = 0; + __u32 total_count; + __u32 *ids; + __u32 i, j; + + if (!args->owner) + return -EINVAL; + + if (args->timeout) { + struct timespec64 to; + + if (get_timespec64(&to, u64_to_user_ptr(args->timeout))) + return -EFAULT; + if (!timespec64_valid(&to)) + return -EINVAL; + + timeout = timespec64_to_ns(&to); + } + + total_count = count; + if (args->alert) + total_count++; + + ids = kmalloc_array(total_count, sizeof(*ids), GFP_KERNEL); + if (!ids) + return -ENOMEM; + if (copy_from_user(ids, u64_to_user_ptr(args->objs), + array_size(count, sizeof(*ids)))) { + kfree(ids); + return -EFAULT; + } + if (args->alert) + ids[count] = args->alert; + + q = kmalloc(struct_size(q, entries, total_count), GFP_KERNEL); + if (!q) { + kfree(ids); + return -ENOMEM; + } + q->task = current; + q->owner = args->owner; + atomic_set(&q->signaled, -1); + q->all = all; + q->ownerdead = false; + q->count = count; + + for (i = 0; i < total_count; i++) { + struct winesync_q_entry *entry = &q->entries[i]; + struct winesync_obj *obj = get_obj(dev, ids[i]); + + if (!obj) + goto err; + + if (all) { + /* Check that the objects are all distinct. */ + for (j = 0; j < i; j++) { + if (obj == q->entries[j].obj) { + put_obj(obj); + goto err; + } + } + } + + entry->obj = obj; + entry->q = q; + entry->index = i; + } + + kfree(ids); + + *ret_q = q; + *ret_timeout = timeout; + return 0; + +err: + for (j = 0; j < i; j++) + put_obj(q->entries[j].obj); + kfree(ids); + kfree(q); + return -EINVAL; +} + +static void try_wake_any_obj(struct winesync_obj *obj) +{ + switch (obj->type) { + case WINESYNC_TYPE_SEM: + try_wake_any_sem(obj); + break; + case WINESYNC_TYPE_MUTEX: + try_wake_any_mutex(obj); + break; + case WINESYNC_TYPE_EVENT: + try_wake_any_event(obj); + break; + } +} + +static int winesync_wait_any(struct winesync_device *dev, void __user *argp) +{ + struct winesync_wait_args args; + struct winesync_q *q; + __u32 i, total_count; + ktime_t timeout; + int signaled; + int ret; + + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + + ret = setup_wait(dev, &args, false, &timeout, &q); + if (ret < 0) + return ret; + + total_count = args.count; + if (args.alert) + total_count++; + + /* queue ourselves */ + + for (i = 0; i < total_count; i++) { + struct winesync_q_entry *entry = &q->entries[i]; + struct winesync_obj *obj = entry->obj; + + spin_lock(&obj->lock); + list_add_tail(&entry->node, &obj->any_waiters); + spin_unlock(&obj->lock); + } + + /* + * Check if we are already signaled. + * + * Note that the API requires that normal objects are checked before + * the alert event. Hence we queue the alert event last, and check + * objects in order. + */ + + for (i = 0; i < total_count; i++) { + struct winesync_obj *obj = q->entries[i].obj; + + if (atomic_read(&q->signaled) != -1) + break; + + spin_lock(&obj->lock); + try_wake_any_obj(obj); + spin_unlock(&obj->lock); + } + + /* sleep */ + + ret = winesync_schedule(q, args.timeout ? &timeout : NULL); + + /* and finally, unqueue */ + + for (i = 0; i < total_count; i++) { + struct winesync_q_entry *entry = &q->entries[i]; + struct winesync_obj *obj = entry->obj; + + spin_lock(&obj->lock); + list_del(&entry->node); + spin_unlock(&obj->lock); + + put_obj(obj); + } + + signaled = atomic_read(&q->signaled); + if (signaled != -1) { + struct winesync_wait_args __user *user_args = argp; + + /* even if we caught a signal, we need to communicate success */ + ret = q->ownerdead ? -EOWNERDEAD : 0; + + if (put_user(signaled, &user_args->index)) + ret = -EFAULT; + } else if (!ret) { + ret = -ETIMEDOUT; + } + + kfree(q); + return ret; +} + +static int winesync_wait_all(struct winesync_device *dev, void __user *argp) +{ + struct winesync_wait_args args; + struct winesync_q *q; + ktime_t timeout; + int signaled; + __u32 i; + int ret; + + if (copy_from_user(&args, argp, sizeof(args))) + return -EFAULT; + + ret = setup_wait(dev, &args, true, &timeout, &q); + if (ret < 0) + return ret; + + /* queue ourselves */ + + spin_lock(&dev->wait_all_lock); + + for (i = 0; i < args.count; i++) { + struct winesync_q_entry *entry = &q->entries[i]; + struct winesync_obj *obj = entry->obj; + + atomic_inc(&obj->all_hint); + + /* + * obj->all_waiters is protected by dev->wait_all_lock rather + * than obj->lock, so there is no need to acquire it here. + */ + list_add_tail(&entry->node, &obj->all_waiters); + } + if (args.alert) { + struct winesync_q_entry *entry = &q->entries[args.count]; + struct winesync_obj *obj = entry->obj; + + spin_lock(&obj->lock); + list_add_tail(&entry->node, &obj->any_waiters); + spin_unlock(&obj->lock); + } + + /* check if we are already signaled */ + + try_wake_all(dev, q, NULL); + + spin_unlock(&dev->wait_all_lock); + + /* + * Check if the alert event is signaled, making sure to do so only + * after checking if the other objects are signaled. + */ + + if (args.alert) { + struct winesync_obj *obj = q->entries[args.count].obj; + + if (atomic_read(&q->signaled) == -1) { + spin_lock(&obj->lock); + try_wake_any_obj(obj); + spin_unlock(&obj->lock); + } + } + + /* sleep */ + + ret = winesync_schedule(q, args.timeout ? &timeout : NULL); + + /* and finally, unqueue */ + + spin_lock(&dev->wait_all_lock); + + for (i = 0; i < args.count; i++) { + struct winesync_q_entry *entry = &q->entries[i]; + struct winesync_obj *obj = entry->obj; + + /* + * obj->all_waiters is protected by dev->wait_all_lock rather + * than obj->lock, so there is no need to acquire it here. + */ + list_del(&entry->node); + + atomic_dec(&obj->all_hint); + + put_obj(obj); + } + if (args.alert) { + struct winesync_q_entry *entry = &q->entries[args.count]; + struct winesync_obj *obj = entry->obj; + + spin_lock(&obj->lock); + list_del(&entry->node); + spin_unlock(&obj->lock); + + put_obj(obj); + } + + spin_unlock(&dev->wait_all_lock); + + signaled = atomic_read(&q->signaled); + if (signaled != -1) { + struct winesync_wait_args __user *user_args = argp; + + /* even if we caught a signal, we need to communicate success */ + ret = q->ownerdead ? -EOWNERDEAD : 0; + + if (put_user(signaled, &user_args->index)) + ret = -EFAULT; + } else if (!ret) { + ret = -ETIMEDOUT; + } + + kfree(q); + return ret; +} + +static long winesync_char_ioctl(struct file *file, unsigned int cmd, + unsigned long parm) +{ + struct winesync_device *dev = file->private_data; + void __user *argp = (void __user *)parm; + + switch (cmd) { + case WINESYNC_IOC_CREATE_EVENT: + return winesync_create_event(dev, argp); + case WINESYNC_IOC_CREATE_MUTEX: + return winesync_create_mutex(dev, argp); + case WINESYNC_IOC_CREATE_SEM: + return winesync_create_sem(dev, argp); + case WINESYNC_IOC_DELETE: + return winesync_delete(dev, argp); + case WINESYNC_IOC_KILL_OWNER: + return winesync_kill_owner(dev, argp); + case WINESYNC_IOC_PULSE_EVENT: + return winesync_set_event(dev, argp, true); + case WINESYNC_IOC_PUT_MUTEX: + return winesync_put_mutex(dev, argp); + case WINESYNC_IOC_PUT_SEM: + return winesync_put_sem(dev, argp); + case WINESYNC_IOC_READ_EVENT: + return winesync_read_event(dev, argp); + case WINESYNC_IOC_READ_MUTEX: + return winesync_read_mutex(dev, argp); + case WINESYNC_IOC_READ_SEM: + return winesync_read_sem(dev, argp); + case WINESYNC_IOC_RESET_EVENT: + return winesync_reset_event(dev, argp); + case WINESYNC_IOC_SET_EVENT: + return winesync_set_event(dev, argp, false); + case WINESYNC_IOC_WAIT_ALL: + return winesync_wait_all(dev, argp); + case WINESYNC_IOC_WAIT_ANY: + return winesync_wait_any(dev, argp); + default: + return -ENOSYS; + } +} + +static const struct file_operations winesync_fops = { + .owner = THIS_MODULE, + .open = winesync_char_open, + .release = winesync_char_release, + .unlocked_ioctl = winesync_char_ioctl, + .compat_ioctl = winesync_char_ioctl, + .llseek = no_llseek, +}; + +static struct miscdevice winesync_misc = { + .minor = WINESYNC_MINOR, + .name = WINESYNC_NAME, + .fops = &winesync_fops, +}; + +static int __init winesync_init(void) +{ + return misc_register(&winesync_misc); +} + +static void __exit winesync_exit(void) +{ + misc_deregister(&winesync_misc); +} + +module_init(winesync_init); +module_exit(winesync_exit); + +MODULE_AUTHOR("Zebediah Figura"); +MODULE_DESCRIPTION("Kernel driver for Wine synchronization primitives"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("devname:" WINESYNC_NAME); +MODULE_ALIAS_MISCDEV(WINESYNC_MINOR); diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 0676f18093f9..350aecfcfb29 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -71,6 +71,7 @@ #define USERIO_MINOR 240 #define VHOST_VSOCK_MINOR 241 #define RFKILL_MINOR 242 +#define WINESYNC_MINOR 243 #define MISC_DYNAMIC_MINOR 255 struct device; diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h index 71a5df8d2689..d375ab21cbf8 100644 --- a/include/uapi/linux/futex.h +++ b/include/uapi/linux/futex.h @@ -22,6 +22,7 @@ #define FUTEX_WAIT_REQUEUE_PI 11 #define FUTEX_CMP_REQUEUE_PI 12 #define FUTEX_LOCK_PI2 13 +#define FUTEX_WAIT_MULTIPLE 31 #define FUTEX_PRIVATE_FLAG 128 #define FUTEX_CLOCK_REALTIME 256 @@ -68,6 +69,18 @@ struct futex_waitv { __u32 __reserved; }; +/** + * struct futex_wait_block - Block of futexes to be waited for + * @uaddr: User address of the futex + * @val: Futex value expected by userspace + * @bitset: Bitset for the optional bitmasked wakeup + */ +struct futex_wait_block { + __u32 __user *uaddr; + __u32 val; + __u32 bitset; +}; + /* * Support for robust futexes: the kernel cleans up held futexes at * thread exit time. diff --git a/include/uapi/linux/winesync.h b/include/uapi/linux/winesync.h new file mode 100644 index 000000000000..5b4e369f7469 --- /dev/null +++ b/include/uapi/linux/winesync.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Kernel support for Wine synchronization primitives + * + * Copyright (C) 2021 Zebediah Figura + */ + +#ifndef __LINUX_WINESYNC_H +#define __LINUX_WINESYNC_H + +#include + +struct winesync_sem_args { + __u32 sem; + __u32 count; + __u32 max; +}; + +struct winesync_mutex_args { + __u32 mutex; + __u32 owner; + __u32 count; +}; + +struct winesync_event_args { + __u32 event; + __u32 manual; + __u32 signaled; +}; + +struct winesync_wait_args { + __u64 timeout; + __u64 objs; + __u32 count; + __u32 owner; + __u32 index; + __u32 alert; +}; + +#define WINESYNC_IOC_BASE 0xf7 + +#define WINESYNC_IOC_CREATE_SEM _IOWR(WINESYNC_IOC_BASE, 0, \ + struct winesync_sem_args) +#define WINESYNC_IOC_DELETE _IOW (WINESYNC_IOC_BASE, 1, __u32) +#define WINESYNC_IOC_PUT_SEM _IOWR(WINESYNC_IOC_BASE, 2, \ + struct winesync_sem_args) +#define WINESYNC_IOC_WAIT_ANY _IOWR(WINESYNC_IOC_BASE, 3, \ + struct winesync_wait_args) +#define WINESYNC_IOC_WAIT_ALL _IOWR(WINESYNC_IOC_BASE, 4, \ + struct winesync_wait_args) +#define WINESYNC_IOC_CREATE_MUTEX _IOWR(WINESYNC_IOC_BASE, 5, \ + struct winesync_mutex_args) +#define WINESYNC_IOC_PUT_MUTEX _IOWR(WINESYNC_IOC_BASE, 6, \ + struct winesync_mutex_args) +#define WINESYNC_IOC_KILL_OWNER _IOW (WINESYNC_IOC_BASE, 7, __u32) +#define WINESYNC_IOC_READ_SEM _IOWR(WINESYNC_IOC_BASE, 8, \ + struct winesync_sem_args) +#define WINESYNC_IOC_READ_MUTEX _IOWR(WINESYNC_IOC_BASE, 9, \ + struct winesync_mutex_args) +#define WINESYNC_IOC_CREATE_EVENT _IOWR(WINESYNC_IOC_BASE, 10, \ + struct winesync_event_args) +#define WINESYNC_IOC_SET_EVENT _IOWR(WINESYNC_IOC_BASE, 11, \ + struct winesync_event_args) +#define WINESYNC_IOC_RESET_EVENT _IOWR(WINESYNC_IOC_BASE, 12, \ + struct winesync_event_args) +#define WINESYNC_IOC_PULSE_EVENT _IOWR(WINESYNC_IOC_BASE, 13, \ + struct winesync_event_args) +#define WINESYNC_IOC_READ_EVENT _IOWR(WINESYNC_IOC_BASE, 14, \ + struct winesync_event_args) + +#endif diff --git a/kernel/futex/syscalls.c b/kernel/futex/syscalls.c index 086a22d1adb7..c6f5f1e84e09 100644 --- a/kernel/futex/syscalls.c +++ b/kernel/futex/syscalls.c @@ -142,6 +142,7 @@ static __always_inline bool futex_cmd_has_timeout(u32 cmd) case FUTEX_LOCK_PI2: case FUTEX_WAIT_BITSET: case FUTEX_WAIT_REQUEUE_PI: + case FUTEX_WAIT_MULTIPLE: return true; } return false; @@ -154,13 +155,79 @@ futex_init_timeout(u32 cmd, u32 op, struct timespec64 *ts, ktime_t *t) return -EINVAL; *t = timespec64_to_ktime(*ts); - if (cmd == FUTEX_WAIT) + if (cmd == FUTEX_WAIT || cmd == FUTEX_WAIT_MULTIPLE) *t = ktime_add_safe(ktime_get(), *t); else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME)) *t = timens_ktime_to_host(CLOCK_MONOTONIC, *t); return 0; } +/** + * futex_read_wait_block - Read an array of futex_wait_block from userspace + * @uaddr: Userspace address of the block + * @count: Number of blocks to be read + * + * This function creates and allocate an array of futex_q (we zero it to + * initialize the fields) and then, for each futex_wait_block element from + * userspace, fill a futex_q element with proper values. + */ +inline struct futex_vector *futex_read_wait_block(u32 __user *uaddr, u32 count) +{ + unsigned int i; + struct futex_vector *futexv; + struct futex_wait_block fwb; + struct futex_wait_block __user *entry = + (struct futex_wait_block __user *)uaddr; + + if (!count || count > FUTEX_WAITV_MAX) + return ERR_PTR(-EINVAL); + + futexv = kcalloc(count, sizeof(*futexv), GFP_KERNEL); + if (!futexv) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < count; i++) { + if (copy_from_user(&fwb, &entry[i], sizeof(fwb))) { + kfree(futexv); + return ERR_PTR(-EFAULT); + } + + futexv[i].w.flags = FUTEX_32; + futexv[i].w.val = fwb.val; + futexv[i].w.uaddr = (uintptr_t) (fwb.uaddr); + futexv[i].q = futex_q_init; + } + + return futexv; +} + +int futex_wait_multiple(struct futex_vector *vs, unsigned int count, + struct hrtimer_sleeper *to); + +int futex_opcode_31(ktime_t *abs_time, u32 __user *uaddr, int count) +{ + int ret; + struct futex_vector *vs; + struct hrtimer_sleeper *to = NULL, timeout; + + to = futex_setup_timer(abs_time, &timeout, 0, 0); + + vs = futex_read_wait_block(uaddr, count); + + if (IS_ERR(vs)) + return PTR_ERR(vs); + + ret = futex_wait_multiple(vs, count, abs_time ? to : NULL); + kfree(vs); + + if (to) { + hrtimer_cancel(&to->timer); + destroy_hrtimer_on_stack(&to->timer); + } + + return ret; +} + SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, const struct __kernel_timespec __user *, utime, u32 __user *, uaddr2, u32, val3) @@ -180,6 +247,9 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, tp = &t; } + if (cmd == FUTEX_WAIT_MULTIPLE) + return futex_opcode_31(tp, uaddr, val); + return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3); } @@ -370,6 +440,9 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val, tp = &t; } + if (cmd == FUTEX_WAIT_MULTIPLE) + return futex_opcode_31(tp, uaddr, val); + return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3); } #endif /* CONFIG_COMPAT_32BIT_TIME */ diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index d08fe4cfe811..43643147e91a 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -10,6 +10,7 @@ TARGETS += core TARGETS += cpufreq TARGETS += cpu-hotplug TARGETS += drivers/dma-buf +TARGETS += drivers/winesync TARGETS += efivarfs TARGETS += exec TARGETS += filesystems diff --git a/tools/testing/selftests/drivers/winesync/Makefile b/tools/testing/selftests/drivers/winesync/Makefile new file mode 100644 index 000000000000..43b39fdeea10 --- /dev/null +++ b/tools/testing/selftests/drivers/winesync/Makefile @@ -0,0 +1,8 @@ +# SPDX-LICENSE-IDENTIFIER: GPL-2.0-only +TEST_GEN_PROGS := winesync + +top_srcdir =../../../../.. +CFLAGS += -I$(top_srcdir)/usr/include +LDLIBS += -lpthread + +include ../../lib.mk diff --git a/tools/testing/selftests/drivers/winesync/config b/tools/testing/selftests/drivers/winesync/config new file mode 100644 index 000000000000..60539c826d06 --- /dev/null +++ b/tools/testing/selftests/drivers/winesync/config @@ -0,0 +1 @@ +CONFIG_WINESYNC=y diff --git a/tools/testing/selftests/drivers/winesync/winesync.c b/tools/testing/selftests/drivers/winesync/winesync.c new file mode 100644 index 000000000000..169e922484b0 --- /dev/null +++ b/tools/testing/selftests/drivers/winesync/winesync.c @@ -0,0 +1,1479 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Various unit tests for the "winesync" synchronization primitive driver. + * + * Copyright (C) 2021 Zebediah Figura + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include "../../kselftest_harness.h" + +static int read_sem_state(int fd, __u32 sem, __u32 *count, __u32 *max) +{ + struct winesync_sem_args args; + int ret; + + args.sem = sem; + args.count = 0xdeadbeef; + args.max = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &args); + *count = args.count; + *max = args.max; + return ret; +} + +#define check_sem_state(fd, sem, count, max) \ + ({ \ + __u32 __count, __max; \ + int ret = read_sem_state((fd), (sem), &__count, &__max); \ + EXPECT_EQ(0, ret); \ + EXPECT_EQ((count), __count); \ + EXPECT_EQ((max), __max); \ + }) + +static int put_sem(int fd, __u32 sem, __u32 *count) +{ + struct winesync_sem_args args; + int ret; + + args.sem = sem; + args.count = *count; + ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &args); + *count = args.count; + return ret; +} + +static int read_mutex_state(int fd, __u32 mutex, __u32 *count, __u32 *owner) +{ + struct winesync_mutex_args args; + int ret; + + args.mutex = mutex; + args.count = 0xdeadbeef; + args.owner = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &args); + *count = args.count; + *owner = args.owner; + return ret; +} + +#define check_mutex_state(fd, mutex, count, owner) \ + ({ \ + __u32 __count, __owner; \ + int ret = read_mutex_state((fd), (mutex), &__count, &__owner); \ + EXPECT_EQ(0, ret); \ + EXPECT_EQ((count), __count); \ + EXPECT_EQ((owner), __owner); \ + }) + +static int put_mutex(int fd, __u32 mutex, __u32 owner, __u32 *count) +{ + struct winesync_mutex_args args; + int ret; + + args.mutex = mutex; + args.owner = owner; + args.count = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_PUT_MUTEX, &args); + *count = args.count; + return ret; +} + +static int read_event_state(int fd, __u32 event, __u32 *signaled, __u32 *manual) +{ + struct winesync_event_args args; + int ret; + + args.event = event; + args.signaled = 0xdeadbeef; + args.manual = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_READ_EVENT, &args); + *signaled = args.signaled; + *manual = args.manual; + return ret; +} + +#define check_event_state(fd, event, signaled, manual) \ + ({ \ + __u32 __signaled, __manual; \ + int ret = read_event_state((fd), (event), \ + &__signaled, &__manual); \ + EXPECT_EQ(0, ret); \ + EXPECT_EQ((signaled), __signaled); \ + EXPECT_EQ((manual), __manual); \ + }) + +static int wait_objs(int fd, unsigned long request, __u32 count, + const __u32 *objs, __u32 owner, __u32 alert, __u32 *index) +{ + struct winesync_wait_args args = {0}; + struct timespec timeout; + int ret; + + clock_gettime(CLOCK_MONOTONIC, &timeout); + + args.timeout = (uintptr_t)&timeout; + args.count = count; + args.objs = (uintptr_t)objs; + args.owner = owner; + args.index = 0xdeadbeef; + args.alert = alert; + ret = ioctl(fd, request, &args); + *index = args.index; + return ret; +} + +static int wait_any(int fd, __u32 count, const __u32 *objs, + __u32 owner, __u32 *index) +{ + return wait_objs(fd, WINESYNC_IOC_WAIT_ANY, + count, objs, owner, 0, index); +} + +static int wait_all(int fd, __u32 count, const __u32 *objs, + __u32 owner, __u32 *index) +{ + return wait_objs(fd, WINESYNC_IOC_WAIT_ALL, + count, objs, owner, 0, index); +} + +static int wait_any_alert(int fd, __u32 count, const __u32 *objs, + __u32 owner, __u32 alert, __u32 *index) +{ + return wait_objs(fd, WINESYNC_IOC_WAIT_ANY, + count, objs, owner, alert, index); +} + +static int wait_all_alert(int fd, __u32 count, const __u32 *objs, + __u32 owner, __u32 alert, __u32 *index) +{ + return wait_objs(fd, WINESYNC_IOC_WAIT_ALL, + count, objs, owner, alert, index); +} + +TEST(semaphore_state) +{ + struct winesync_sem_args sem_args; + struct timespec timeout; + __u32 sem, count, index; + int fd, ret; + + clock_gettime(CLOCK_MONOTONIC, &timeout); + + fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); + ASSERT_LE(0, fd); + + sem_args.count = 3; + sem_args.max = 2; + sem_args.sem = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + sem_args.count = 2; + sem_args.max = 2; + sem_args.sem = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); + EXPECT_EQ(0, ret); + EXPECT_NE(0xdeadbeef, sem_args.sem); + check_sem_state(fd, sem, 2, 2); + + count = 0; + ret = put_sem(fd, sem, &count); + EXPECT_EQ(0, ret); + EXPECT_EQ(2, count); + check_sem_state(fd, sem, 2, 2); + + count = 1; + ret = put_sem(fd, sem, &count); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EOVERFLOW, errno); + check_sem_state(fd, sem, 2, 2); + + ret = wait_any(fd, 1, &sem, 123, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + check_sem_state(fd, sem, 1, 2); + + ret = wait_any(fd, 1, &sem, 123, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + check_sem_state(fd, sem, 0, 2); + + ret = wait_any(fd, 1, &sem, 123, &index); + EXPECT_EQ(-1, ret); + EXPECT_EQ(ETIMEDOUT, errno); + + count = 3; + ret = put_sem(fd, sem, &count); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EOVERFLOW, errno); + check_sem_state(fd, sem, 0, 2); + + count = 2; + ret = put_sem(fd, sem, &count); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, count); + check_sem_state(fd, sem, 2, 2); + + ret = wait_any(fd, 1, &sem, 123, &index); + EXPECT_EQ(0, ret); + ret = wait_any(fd, 1, &sem, 123, &index); + EXPECT_EQ(0, ret); + + count = 1; + ret = put_sem(fd, sem, &count); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, count); + check_sem_state(fd, sem, 1, 2); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &sem); + EXPECT_EQ(0, ret); + + close(fd); +} + +TEST(mutex_state) +{ + struct winesync_mutex_args mutex_args; + __u32 mutex, owner, count, index; + struct timespec timeout; + int fd, ret; + + clock_gettime(CLOCK_MONOTONIC, &timeout); + + fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); + ASSERT_LE(0, fd); + + mutex_args.owner = 123; + mutex_args.count = 0; + ret = ioctl(fd, WINESYNC_IOC_CREATE_MUTEX, &mutex_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + mutex_args.owner = 0; + mutex_args.count = 2; + ret = ioctl(fd, WINESYNC_IOC_CREATE_MUTEX, &mutex_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + mutex_args.owner = 123; + mutex_args.count = 2; + mutex_args.mutex = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_CREATE_MUTEX, &mutex_args); + EXPECT_EQ(0, ret); + EXPECT_NE(0xdeadbeef, mutex_args.mutex); + mutex = mutex_args.mutex; + check_mutex_state(fd, mutex, 2, 123); + + ret = put_mutex(fd, mutex, 0, &count); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + ret = put_mutex(fd, mutex, 456, &count); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EPERM, errno); + check_mutex_state(fd, mutex, 2, 123); + + ret = put_mutex(fd, mutex, 123, &count); + EXPECT_EQ(0, ret); + EXPECT_EQ(2, count); + check_mutex_state(fd, mutex, 1, 123); + + ret = put_mutex(fd, mutex, 123, &count); + EXPECT_EQ(0, ret); + EXPECT_EQ(1, count); + check_mutex_state(fd, mutex, 0, 0); + + ret = put_mutex(fd, mutex, 123, &count); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EPERM, errno); + + ret = wait_any(fd, 1, &mutex, 456, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + check_mutex_state(fd, mutex, 1, 456); + + ret = wait_any(fd, 1, &mutex, 456, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + check_mutex_state(fd, mutex, 2, 456); + + ret = put_mutex(fd, mutex, 456, &count); + EXPECT_EQ(0, ret); + EXPECT_EQ(2, count); + check_mutex_state(fd, mutex, 1, 456); + + ret = wait_any(fd, 1, &mutex, 123, &index); + EXPECT_EQ(-1, ret); + EXPECT_EQ(ETIMEDOUT, errno); + + owner = 0; + ret = ioctl(fd, WINESYNC_IOC_KILL_OWNER, &owner); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + owner = 123; + ret = ioctl(fd, WINESYNC_IOC_KILL_OWNER, &owner); + EXPECT_EQ(0, ret); + check_mutex_state(fd, mutex, 1, 456); + + owner = 456; + ret = ioctl(fd, WINESYNC_IOC_KILL_OWNER, &owner); + EXPECT_EQ(0, ret); + + mutex_args.count = 0xdeadbeef; + mutex_args.owner = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EOWNERDEAD, errno); + EXPECT_EQ(0, mutex_args.count); + EXPECT_EQ(0, mutex_args.owner); + + mutex_args.count = 0xdeadbeef; + mutex_args.owner = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EOWNERDEAD, errno); + EXPECT_EQ(0, mutex_args.count); + EXPECT_EQ(0, mutex_args.owner); + + ret = wait_any(fd, 1, &mutex, 123, &index); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EOWNERDEAD, errno); + EXPECT_EQ(0, index); + check_mutex_state(fd, mutex, 1, 123); + + owner = 123; + ret = ioctl(fd, WINESYNC_IOC_KILL_OWNER, &owner); + EXPECT_EQ(0, ret); + + mutex_args.count = 0xdeadbeef; + mutex_args.owner = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EOWNERDEAD, errno); + EXPECT_EQ(0, mutex_args.count); + EXPECT_EQ(0, mutex_args.owner); + + ret = wait_any(fd, 1, &mutex, 123, &index); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EOWNERDEAD, errno); + EXPECT_EQ(0, index); + check_mutex_state(fd, mutex, 1, 123); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &mutex); + EXPECT_EQ(0, ret); + + mutex_args.owner = 0; + mutex_args.count = 0; + mutex_args.mutex = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_CREATE_MUTEX, &mutex_args); + EXPECT_EQ(0, ret); + EXPECT_NE(0xdeadbeef, mutex_args.mutex); + mutex = mutex_args.mutex; + check_mutex_state(fd, mutex, 0, 0); + + ret = wait_any(fd, 1, &mutex, 123, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + check_mutex_state(fd, mutex, 1, 123); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &mutex_args.mutex); + EXPECT_EQ(0, ret); + + close(fd); +} + +TEST(manual_event_state) +{ + struct winesync_event_args event_args; + __u32 index; + int fd, ret; + + fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); + ASSERT_LE(0, fd); + + event_args.manual = 1; + event_args.signaled = 0; + event_args.event = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_CREATE_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_NE(0xdeadbeef, event_args.event); + check_event_state(fd, event_args.event, 0, 1); + + event_args.signaled = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_SET_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, event_args.signaled); + check_event_state(fd, event_args.event, 1, 1); + + ret = ioctl(fd, WINESYNC_IOC_SET_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(1, event_args.signaled); + check_event_state(fd, event_args.event, 1, 1); + + ret = wait_any(fd, 1, &event_args.event, 123, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + check_event_state(fd, event_args.event, 1, 1); + + event_args.signaled = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_RESET_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(1, event_args.signaled); + check_event_state(fd, event_args.event, 0, 1); + + ret = ioctl(fd, WINESYNC_IOC_RESET_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, event_args.signaled); + check_event_state(fd, event_args.event, 0, 1); + + ret = wait_any(fd, 1, &event_args.event, 123, &index); + EXPECT_EQ(-1, ret); + EXPECT_EQ(ETIMEDOUT, errno); + + ret = ioctl(fd, WINESYNC_IOC_SET_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, event_args.signaled); + + ret = ioctl(fd, WINESYNC_IOC_PULSE_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(1, event_args.signaled); + check_event_state(fd, event_args.event, 0, 1); + + ret = ioctl(fd, WINESYNC_IOC_PULSE_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, event_args.signaled); + check_event_state(fd, event_args.event, 0, 1); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &event_args.event); + EXPECT_EQ(0, ret); + + close(fd); +} + +TEST(auto_event_state) +{ + struct winesync_event_args event_args; + __u32 index; + int fd, ret; + + fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); + ASSERT_LE(0, fd); + + event_args.manual = 0; + event_args.signaled = 1; + event_args.event = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_CREATE_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_NE(0xdeadbeef, event_args.event); + + check_event_state(fd, event_args.event, 1, 0); + + event_args.signaled = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_SET_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(1, event_args.signaled); + check_event_state(fd, event_args.event, 1, 0); + + ret = wait_any(fd, 1, &event_args.event, 123, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + check_event_state(fd, event_args.event, 0, 0); + + event_args.signaled = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_RESET_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, event_args.signaled); + check_event_state(fd, event_args.event, 0, 0); + + ret = wait_any(fd, 1, &event_args.event, 123, &index); + EXPECT_EQ(-1, ret); + EXPECT_EQ(ETIMEDOUT, errno); + + ret = ioctl(fd, WINESYNC_IOC_SET_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, event_args.signaled); + + ret = ioctl(fd, WINESYNC_IOC_PULSE_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(1, event_args.signaled); + check_event_state(fd, event_args.event, 0, 0); + + ret = ioctl(fd, WINESYNC_IOC_PULSE_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, event_args.signaled); + check_event_state(fd, event_args.event, 0, 0); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &event_args.event); + EXPECT_EQ(0, ret); + + close(fd); +} + +TEST(test_wait_any) +{ + struct winesync_mutex_args mutex_args = {0}; + struct winesync_wait_args wait_args = {0}; + struct winesync_sem_args sem_args = {0}; + __u32 objs[2], owner, index; + struct timespec timeout; + int fd, ret; + + clock_gettime(CLOCK_MONOTONIC, &timeout); + + fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); + ASSERT_LE(0, fd); + + sem_args.count = 2; + sem_args.max = 3; + sem_args.sem = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); + EXPECT_EQ(0, ret); + EXPECT_NE(0xdeadbeef, sem_args.sem); + + mutex_args.owner = 0; + mutex_args.count = 0; + mutex_args.mutex = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_CREATE_MUTEX, &mutex_args); + EXPECT_EQ(0, ret); + EXPECT_NE(0xdeadbeef, mutex_args.mutex); + + objs[0] = sem_args.sem; + objs[1] = mutex_args.mutex; + + ret = wait_any(fd, 2, objs, 123, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + check_sem_state(fd, sem_args.sem, 1, 3); + check_mutex_state(fd, mutex_args.mutex, 0, 0); + + ret = wait_any(fd, 2, objs, 123, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + check_sem_state(fd, sem_args.sem, 0, 3); + check_mutex_state(fd, mutex_args.mutex, 0, 0); + + ret = wait_any(fd, 2, objs, 123, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(1, index); + check_sem_state(fd, sem_args.sem, 0, 3); + check_mutex_state(fd, mutex_args.mutex, 1, 123); + + sem_args.count = 1; + ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, sem_args.count); + + ret = wait_any(fd, 2, objs, 123, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + check_sem_state(fd, sem_args.sem, 0, 3); + check_mutex_state(fd, mutex_args.mutex, 1, 123); + + ret = wait_any(fd, 2, objs, 123, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(1, index); + check_sem_state(fd, sem_args.sem, 0, 3); + check_mutex_state(fd, mutex_args.mutex, 2, 123); + + ret = wait_any(fd, 2, objs, 456, &index); + EXPECT_EQ(-1, ret); + EXPECT_EQ(ETIMEDOUT, errno); + + owner = 123; + ret = ioctl(fd, WINESYNC_IOC_KILL_OWNER, &owner); + EXPECT_EQ(0, ret); + + ret = wait_any(fd, 2, objs, 456, &index); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EOWNERDEAD, errno); + EXPECT_EQ(1, index); + + ret = wait_any(fd, 2, objs, 456, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(1, index); + + /* test waiting on the same object twice */ + sem_args.count = 2; + ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, sem_args.count); + + objs[0] = objs[1] = sem_args.sem; + ret = wait_any(fd, 2, objs, 456, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, wait_args.index); + check_sem_state(fd, sem_args.sem, 1, 3); + + ret = wait_any(fd, 0, NULL, 456, &index); + EXPECT_EQ(-1, ret); + EXPECT_EQ(ETIMEDOUT, errno); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &sem_args.sem); + EXPECT_EQ(0, ret); + ret = ioctl(fd, WINESYNC_IOC_DELETE, &mutex_args.mutex); + EXPECT_EQ(0, ret); + + close(fd); +} + +TEST(test_wait_all) +{ + struct winesync_event_args event_args = {0}; + struct winesync_mutex_args mutex_args = {0}; + struct winesync_sem_args sem_args = {0}; + __u32 objs[2], owner, index; + int fd, ret; + + fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); + ASSERT_LE(0, fd); + + sem_args.count = 2; + sem_args.max = 3; + sem_args.sem = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); + EXPECT_EQ(0, ret); + EXPECT_NE(0xdeadbeef, sem_args.sem); + + mutex_args.owner = 0; + mutex_args.count = 0; + mutex_args.mutex = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_CREATE_MUTEX, &mutex_args); + EXPECT_EQ(0, ret); + EXPECT_NE(0xdeadbeef, mutex_args.mutex); + + event_args.manual = true; + event_args.signaled = true; + ret = ioctl(fd, WINESYNC_IOC_CREATE_EVENT, &event_args); + EXPECT_EQ(0, ret); + + objs[0] = sem_args.sem; + objs[1] = mutex_args.mutex; + + ret = wait_all(fd, 2, objs, 123, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + check_sem_state(fd, sem_args.sem, 1, 3); + check_mutex_state(fd, mutex_args.mutex, 1, 123); + + ret = wait_all(fd, 2, objs, 456, &index); + EXPECT_EQ(-1, ret); + EXPECT_EQ(ETIMEDOUT, errno); + check_sem_state(fd, sem_args.sem, 1, 3); + check_mutex_state(fd, mutex_args.mutex, 1, 123); + + ret = wait_all(fd, 2, objs, 123, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + check_sem_state(fd, sem_args.sem, 0, 3); + check_mutex_state(fd, mutex_args.mutex, 2, 123); + + ret = wait_all(fd, 2, objs, 123, &index); + EXPECT_EQ(-1, ret); + EXPECT_EQ(ETIMEDOUT, errno); + check_sem_state(fd, sem_args.sem, 0, 3); + check_mutex_state(fd, mutex_args.mutex, 2, 123); + + sem_args.count = 3; + ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, sem_args.count); + + ret = wait_all(fd, 2, objs, 123, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + check_sem_state(fd, sem_args.sem, 2, 3); + check_mutex_state(fd, mutex_args.mutex, 3, 123); + + owner = 123; + ret = ioctl(fd, WINESYNC_IOC_KILL_OWNER, &owner); + EXPECT_EQ(0, ret); + + ret = wait_all(fd, 2, objs, 123, &index); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EOWNERDEAD, errno); + check_sem_state(fd, sem_args.sem, 1, 3); + check_mutex_state(fd, mutex_args.mutex, 1, 123); + + objs[0] = sem_args.sem; + objs[1] = event_args.event; + ret = wait_all(fd, 2, objs, 123, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + check_sem_state(fd, sem_args.sem, 0, 3); + check_event_state(fd, event_args.event, 1, 1); + + /* test waiting on the same object twice */ + objs[0] = objs[1] = sem_args.sem; + ret = wait_all(fd, 2, objs, 123, &index); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &sem_args.sem); + EXPECT_EQ(0, ret); + ret = ioctl(fd, WINESYNC_IOC_DELETE, &mutex_args.mutex); + EXPECT_EQ(0, ret); + ret = ioctl(fd, WINESYNC_IOC_DELETE, &event_args.event); + EXPECT_EQ(0, ret); + + close(fd); +} + +TEST(invalid_objects) +{ + struct winesync_event_args event_args = {0}; + struct winesync_mutex_args mutex_args = {0}; + struct winesync_wait_args wait_args = {0}; + struct winesync_sem_args sem_args = {0}; + __u32 objs[2] = {0}; + int fd, ret; + + fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); + ASSERT_LE(0, fd); + + ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + ret = ioctl(fd, WINESYNC_IOC_PUT_MUTEX, &mutex_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + ret = ioctl(fd, WINESYNC_IOC_SET_EVENT, &event_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + ret = ioctl(fd, WINESYNC_IOC_RESET_EVENT, &event_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + ret = ioctl(fd, WINESYNC_IOC_PULSE_EVENT, &event_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + ret = ioctl(fd, WINESYNC_IOC_READ_EVENT, &event_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + wait_args.objs = (uintptr_t)objs; + wait_args.count = 1; + ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + ret = ioctl(fd, WINESYNC_IOC_WAIT_ALL, &wait_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &objs[0]); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + sem_args.max = 1; + ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); + EXPECT_EQ(0, ret); + + mutex_args.mutex = sem_args.sem; + ret = ioctl(fd, WINESYNC_IOC_PUT_MUTEX, &mutex_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + ret = ioctl(fd, WINESYNC_IOC_READ_MUTEX, &mutex_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + event_args.event = sem_args.sem; + ret = ioctl(fd, WINESYNC_IOC_SET_EVENT, &event_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + ret = ioctl(fd, WINESYNC_IOC_RESET_EVENT, &event_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + ret = ioctl(fd, WINESYNC_IOC_PULSE_EVENT, &event_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + ret = ioctl(fd, WINESYNC_IOC_READ_EVENT, &event_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + objs[0] = sem_args.sem; + objs[1] = sem_args.sem + 1; + wait_args.count = 2; + ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + ret = ioctl(fd, WINESYNC_IOC_WAIT_ALL, &wait_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + objs[0] = sem_args.sem + 1; + objs[1] = sem_args.sem; + ret = ioctl(fd, WINESYNC_IOC_WAIT_ANY, &wait_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + ret = ioctl(fd, WINESYNC_IOC_WAIT_ALL, &wait_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &sem_args.sem); + EXPECT_EQ(0, ret); + + ret = ioctl(fd, WINESYNC_IOC_CREATE_MUTEX, &mutex_args); + EXPECT_EQ(0, ret); + + sem_args.sem = mutex_args.mutex; + ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + ret = ioctl(fd, WINESYNC_IOC_READ_SEM, &sem_args); + EXPECT_EQ(-1, ret); + EXPECT_EQ(EINVAL, errno); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &mutex_args.mutex); + EXPECT_EQ(0, ret); + + close(fd); +} + +struct wake_args +{ + int fd; + __u32 obj; +}; + +struct wait_args +{ + int fd; + unsigned long request; + struct winesync_wait_args *args; + int ret; + int err; +}; + +static void *wait_thread(void *arg) +{ + struct wait_args *args = arg; + + args->ret = ioctl(args->fd, args->request, args->args); + args->err = errno; + return NULL; +} + +static void get_abs_timeout(struct timespec *timeout, clockid_t clock, + unsigned int ms) +{ + clock_gettime(clock, timeout); + timeout->tv_nsec += ms * 1000000; + timeout->tv_sec += (timeout->tv_nsec / 1000000000); + timeout->tv_nsec %= 1000000000; +} + +static int wait_for_thread(pthread_t thread, unsigned int ms) +{ + struct timespec timeout; + get_abs_timeout(&timeout, CLOCK_REALTIME, ms); + return pthread_timedjoin_np(thread, NULL, &timeout); +} + +TEST(wake_any) +{ + struct winesync_event_args event_args = {0}; + struct winesync_mutex_args mutex_args = {0}; + struct winesync_wait_args wait_args = {0}; + struct winesync_sem_args sem_args = {0}; + struct wait_args thread_args; + __u32 objs[2], count, index; + struct timespec timeout; + pthread_t thread; + int fd, ret; + + fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); + ASSERT_LE(0, fd); + + sem_args.count = 0; + sem_args.max = 3; + sem_args.sem = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); + EXPECT_EQ(0, ret); + EXPECT_NE(0xdeadbeef, sem_args.sem); + + mutex_args.owner = 123; + mutex_args.count = 1; + mutex_args.mutex = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_CREATE_MUTEX, &mutex_args); + EXPECT_EQ(0, ret); + EXPECT_NE(0xdeadbeef, mutex_args.mutex); + + objs[0] = sem_args.sem; + objs[1] = mutex_args.mutex; + + /* test waking the semaphore */ + + get_abs_timeout(&timeout, CLOCK_MONOTONIC, 1000); + wait_args.timeout = (uintptr_t)&timeout; + wait_args.objs = (uintptr_t)objs; + wait_args.count = 2; + wait_args.owner = 456; + wait_args.index = 0xdeadbeef; + thread_args.fd = fd; + thread_args.args = &wait_args; + thread_args.request = WINESYNC_IOC_WAIT_ANY; + ret = pthread_create(&thread, NULL, wait_thread, &thread_args); + EXPECT_EQ(0, ret); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(ETIMEDOUT, ret); + + sem_args.count = 1; + ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, sem_args.count); + check_sem_state(fd, sem_args.sem, 0, 3); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, thread_args.ret); + EXPECT_EQ(0, wait_args.index); + + /* test waking the mutex */ + + /* first grab it again for owner 123 */ + ret = wait_any(fd, 1, &mutex_args.mutex, 123, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + + get_abs_timeout(&timeout, CLOCK_MONOTONIC, 1000); + wait_args.owner = 456; + ret = pthread_create(&thread, NULL, wait_thread, &thread_args); + EXPECT_EQ(0, ret); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(ETIMEDOUT, ret); + + ret = put_mutex(fd, mutex_args.mutex, 123, &count); + EXPECT_EQ(0, ret); + EXPECT_EQ(2, count); + + ret = pthread_tryjoin_np(thread, NULL); + EXPECT_EQ(EBUSY, ret); + + ret = put_mutex(fd, mutex_args.mutex, 123, &count); + EXPECT_EQ(0, ret); + EXPECT_EQ(1, mutex_args.count); + check_mutex_state(fd, mutex_args.mutex, 1, 456); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, thread_args.ret); + EXPECT_EQ(1, wait_args.index); + + /* test waking events */ + + event_args.manual = false; + event_args.signaled = false; + ret = ioctl(fd, WINESYNC_IOC_CREATE_EVENT, &event_args); + EXPECT_EQ(0, ret); + + objs[1] = event_args.event; + get_abs_timeout(&timeout, CLOCK_MONOTONIC, 1000); + ret = pthread_create(&thread, NULL, wait_thread, &thread_args); + EXPECT_EQ(0, ret); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(ETIMEDOUT, ret); + + ret = ioctl(fd, WINESYNC_IOC_SET_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, event_args.signaled); + check_event_state(fd, event_args.event, 0, 0); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, thread_args.ret); + EXPECT_EQ(1, wait_args.index); + + get_abs_timeout(&timeout, CLOCK_MONOTONIC, 1000); + ret = pthread_create(&thread, NULL, wait_thread, &thread_args); + EXPECT_EQ(0, ret); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(ETIMEDOUT, ret); + + ret = ioctl(fd, WINESYNC_IOC_PULSE_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, event_args.signaled); + check_event_state(fd, event_args.event, 0, 0); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, thread_args.ret); + EXPECT_EQ(1, wait_args.index); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &event_args.event); + EXPECT_EQ(0, ret); + + event_args.manual = true; + event_args.signaled = false; + ret = ioctl(fd, WINESYNC_IOC_CREATE_EVENT, &event_args); + EXPECT_EQ(0, ret); + + objs[1] = event_args.event; + get_abs_timeout(&timeout, CLOCK_MONOTONIC, 1000); + ret = pthread_create(&thread, NULL, wait_thread, &thread_args); + EXPECT_EQ(0, ret); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(ETIMEDOUT, ret); + + ret = ioctl(fd, WINESYNC_IOC_SET_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, event_args.signaled); + check_event_state(fd, event_args.event, 1, 1); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, thread_args.ret); + EXPECT_EQ(1, wait_args.index); + + ret = ioctl(fd, WINESYNC_IOC_RESET_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(1, event_args.signaled); + + get_abs_timeout(&timeout, CLOCK_MONOTONIC, 1000); + ret = pthread_create(&thread, NULL, wait_thread, &thread_args); + EXPECT_EQ(0, ret); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(ETIMEDOUT, ret); + + ret = ioctl(fd, WINESYNC_IOC_PULSE_EVENT, &event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, event_args.signaled); + check_event_state(fd, event_args.event, 0, 1); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, thread_args.ret); + EXPECT_EQ(1, wait_args.index); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &event_args.event); + EXPECT_EQ(0, ret); + + /* delete an object while it's being waited on */ + + get_abs_timeout(&timeout, CLOCK_MONOTONIC, 200); + wait_args.owner = 123; + objs[1] = mutex_args.mutex; + ret = pthread_create(&thread, NULL, wait_thread, &thread_args); + EXPECT_EQ(0, ret); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(ETIMEDOUT, ret); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &sem_args.sem); + EXPECT_EQ(0, ret); + ret = ioctl(fd, WINESYNC_IOC_DELETE, &mutex_args.mutex); + EXPECT_EQ(0, ret); + + ret = wait_for_thread(thread, 200); + EXPECT_EQ(0, ret); + EXPECT_EQ(-1, thread_args.ret); + EXPECT_EQ(ETIMEDOUT, thread_args.err); + + close(fd); +} + +TEST(wake_all) +{ + struct winesync_event_args manual_event_args = {0}; + struct winesync_event_args auto_event_args = {0}; + struct winesync_mutex_args mutex_args = {0}; + struct winesync_wait_args wait_args = {0}; + struct winesync_sem_args sem_args = {0}; + struct wait_args thread_args; + __u32 objs[4], count, index; + struct timespec timeout; + pthread_t thread; + int fd, ret; + + fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); + ASSERT_LE(0, fd); + + sem_args.count = 0; + sem_args.max = 3; + sem_args.sem = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); + EXPECT_EQ(0, ret); + EXPECT_NE(0xdeadbeef, sem_args.sem); + + mutex_args.owner = 123; + mutex_args.count = 1; + mutex_args.mutex = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_CREATE_MUTEX, &mutex_args); + EXPECT_EQ(0, ret); + EXPECT_NE(0xdeadbeef, mutex_args.mutex); + + manual_event_args.manual = true; + manual_event_args.signaled = true; + ret = ioctl(fd, WINESYNC_IOC_CREATE_EVENT, &manual_event_args); + EXPECT_EQ(0, ret); + + auto_event_args.manual = false; + auto_event_args.signaled = true; + ret = ioctl(fd, WINESYNC_IOC_CREATE_EVENT, &auto_event_args); + EXPECT_EQ(0, ret); + + objs[0] = sem_args.sem; + objs[1] = mutex_args.mutex; + objs[2] = manual_event_args.event; + objs[3] = auto_event_args.event; + + get_abs_timeout(&timeout, CLOCK_MONOTONIC, 1000); + wait_args.timeout = (uintptr_t)&timeout; + wait_args.objs = (uintptr_t)objs; + wait_args.count = 4; + wait_args.owner = 456; + thread_args.fd = fd; + thread_args.args = &wait_args; + thread_args.request = WINESYNC_IOC_WAIT_ALL; + ret = pthread_create(&thread, NULL, wait_thread, &thread_args); + EXPECT_EQ(0, ret); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(ETIMEDOUT, ret); + + sem_args.count = 1; + ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, sem_args.count); + + ret = pthread_tryjoin_np(thread, NULL); + EXPECT_EQ(EBUSY, ret); + + check_sem_state(fd, sem_args.sem, 1, 3); + + ret = wait_any(fd, 1, &sem_args.sem, 123, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + + ret = put_mutex(fd, mutex_args.mutex, 123, &count); + EXPECT_EQ(0, ret); + EXPECT_EQ(1, count); + + ret = pthread_tryjoin_np(thread, NULL); + EXPECT_EQ(EBUSY, ret); + + check_mutex_state(fd, mutex_args.mutex, 0, 0); + + ret = ioctl(fd, WINESYNC_IOC_RESET_EVENT, &manual_event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(1, manual_event_args.signaled); + + sem_args.count = 2; + ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, sem_args.count); + check_sem_state(fd, sem_args.sem, 2, 3); + + ret = ioctl(fd, WINESYNC_IOC_RESET_EVENT, &auto_event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(1, auto_event_args.signaled); + + ret = ioctl(fd, WINESYNC_IOC_SET_EVENT, &manual_event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, manual_event_args.signaled); + + ret = ioctl(fd, WINESYNC_IOC_SET_EVENT, &auto_event_args); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, auto_event_args.signaled); + + check_sem_state(fd, sem_args.sem, 1, 3); + check_mutex_state(fd, mutex_args.mutex, 1, 456); + check_event_state(fd, manual_event_args.event, 1, 1); + check_event_state(fd, auto_event_args.event, 0, 0); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, thread_args.ret); + + /* delete an object while it's being waited on */ + + get_abs_timeout(&timeout, CLOCK_MONOTONIC, 200); + wait_args.owner = 123; + ret = pthread_create(&thread, NULL, wait_thread, &thread_args); + EXPECT_EQ(0, ret); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(ETIMEDOUT, ret); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &sem_args.sem); + EXPECT_EQ(0, ret); + ret = ioctl(fd, WINESYNC_IOC_DELETE, &mutex_args.mutex); + EXPECT_EQ(0, ret); + ret = ioctl(fd, WINESYNC_IOC_DELETE, &manual_event_args.event); + EXPECT_EQ(0, ret); + ret = ioctl(fd, WINESYNC_IOC_DELETE, &auto_event_args.event); + EXPECT_EQ(0, ret); + + ret = wait_for_thread(thread, 200); + EXPECT_EQ(0, ret); + EXPECT_EQ(-1, thread_args.ret); + EXPECT_EQ(ETIMEDOUT, thread_args.err); + + close(fd); +} + +TEST(alert_any) +{ + struct winesync_event_args event_args = {0}; + struct winesync_wait_args wait_args = {0}; + struct winesync_sem_args sem_args = {0}; + struct wait_args thread_args; + struct timespec timeout; + __u32 objs[2], index; + pthread_t thread; + int fd, ret; + + fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); + ASSERT_LE(0, fd); + + sem_args.count = 0; + sem_args.max = 2; + sem_args.sem = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); + EXPECT_EQ(0, ret); + EXPECT_NE(0xdeadbeef, sem_args.sem); + objs[0] = sem_args.sem; + + sem_args.count = 1; + sem_args.max = 2; + sem_args.sem = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); + EXPECT_EQ(0, ret); + EXPECT_NE(0xdeadbeef, sem_args.sem); + objs[1] = sem_args.sem; + + event_args.manual = true; + event_args.signaled = true; + ret = ioctl(fd, WINESYNC_IOC_CREATE_EVENT, &event_args); + EXPECT_EQ(0, ret); + + ret = wait_any_alert(fd, 0, NULL, 123, event_args.event, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + + ret = ioctl(fd, WINESYNC_IOC_RESET_EVENT, &event_args); + EXPECT_EQ(0, ret); + + ret = wait_any_alert(fd, 0, NULL, 123, event_args.event, &index); + EXPECT_EQ(-1, ret); + EXPECT_EQ(ETIMEDOUT, errno); + + ret = ioctl(fd, WINESYNC_IOC_SET_EVENT, &event_args); + EXPECT_EQ(0, ret); + + ret = wait_any_alert(fd, 2, objs, 123, event_args.event, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(1, index); + + ret = wait_any_alert(fd, 2, objs, 123, event_args.event, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(2, index); + + /* test wakeup via alert */ + + ret = ioctl(fd, WINESYNC_IOC_RESET_EVENT, &event_args); + EXPECT_EQ(0, ret); + + get_abs_timeout(&timeout, CLOCK_MONOTONIC, 1000); + wait_args.timeout = (uintptr_t)&timeout; + wait_args.objs = (uintptr_t)objs; + wait_args.count = 2; + wait_args.owner = 123; + wait_args.index = 0xdeadbeef; + wait_args.alert = event_args.event; + thread_args.fd = fd; + thread_args.args = &wait_args; + thread_args.request = WINESYNC_IOC_WAIT_ANY; + ret = pthread_create(&thread, NULL, wait_thread, &thread_args); + EXPECT_EQ(0, ret); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(ETIMEDOUT, ret); + + ret = ioctl(fd, WINESYNC_IOC_SET_EVENT, &event_args); + EXPECT_EQ(0, ret); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, thread_args.ret); + EXPECT_EQ(2, wait_args.index); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &event_args.event); + EXPECT_EQ(0, ret); + + /* test with an auto-reset event */ + + event_args.manual = false; + event_args.signaled = true; + ret = ioctl(fd, WINESYNC_IOC_CREATE_EVENT, &event_args); + EXPECT_EQ(0, ret); + + sem_args.sem = objs[0]; + sem_args.count = 1; + ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); + EXPECT_EQ(0, ret); + + ret = wait_any_alert(fd, 2, objs, 123, event_args.event, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + + ret = wait_any_alert(fd, 2, objs, 123, event_args.event, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(2, index); + + ret = wait_any_alert(fd, 2, objs, 123, event_args.event, &index); + EXPECT_EQ(-1, ret); + EXPECT_EQ(ETIMEDOUT, errno); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &event_args.event); + EXPECT_EQ(0, ret); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &objs[0]); + EXPECT_EQ(0, ret); + ret = ioctl(fd, WINESYNC_IOC_DELETE, &objs[1]); + EXPECT_EQ(0, ret); + + close(fd); +} + +TEST(alert_all) +{ + struct winesync_event_args event_args = {0}; + struct winesync_wait_args wait_args = {0}; + struct winesync_sem_args sem_args = {0}; + struct wait_args thread_args; + struct timespec timeout; + __u32 objs[2], index; + pthread_t thread; + int fd, ret; + + fd = open("/dev/winesync", O_CLOEXEC | O_RDONLY); + ASSERT_LE(0, fd); + + sem_args.count = 2; + sem_args.max = 2; + sem_args.sem = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); + EXPECT_EQ(0, ret); + EXPECT_NE(0xdeadbeef, sem_args.sem); + objs[0] = sem_args.sem; + + sem_args.count = 1; + sem_args.max = 2; + sem_args.sem = 0xdeadbeef; + ret = ioctl(fd, WINESYNC_IOC_CREATE_SEM, &sem_args); + EXPECT_EQ(0, ret); + EXPECT_NE(0xdeadbeef, sem_args.sem); + objs[1] = sem_args.sem; + + event_args.manual = true; + event_args.signaled = true; + ret = ioctl(fd, WINESYNC_IOC_CREATE_EVENT, &event_args); + EXPECT_EQ(0, ret); + + ret = wait_all_alert(fd, 2, objs, 123, event_args.event, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + + ret = wait_all_alert(fd, 2, objs, 123, event_args.event, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(2, index); + + /* test wakeup via alert */ + + ret = ioctl(fd, WINESYNC_IOC_RESET_EVENT, &event_args); + EXPECT_EQ(0, ret); + + get_abs_timeout(&timeout, CLOCK_MONOTONIC, 1000); + wait_args.timeout = (uintptr_t)&timeout; + wait_args.objs = (uintptr_t)objs; + wait_args.count = 2; + wait_args.owner = 123; + wait_args.index = 0xdeadbeef; + wait_args.alert = event_args.event; + thread_args.fd = fd; + thread_args.args = &wait_args; + thread_args.request = WINESYNC_IOC_WAIT_ALL; + ret = pthread_create(&thread, NULL, wait_thread, &thread_args); + EXPECT_EQ(0, ret); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(ETIMEDOUT, ret); + + ret = ioctl(fd, WINESYNC_IOC_SET_EVENT, &event_args); + EXPECT_EQ(0, ret); + + ret = wait_for_thread(thread, 100); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, thread_args.ret); + EXPECT_EQ(2, wait_args.index); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &event_args.event); + EXPECT_EQ(0, ret); + + /* test with an auto-reset event */ + + event_args.manual = false; + event_args.signaled = true; + ret = ioctl(fd, WINESYNC_IOC_CREATE_EVENT, &event_args); + EXPECT_EQ(0, ret); + + sem_args.sem = objs[1]; + sem_args.count = 2; + ret = ioctl(fd, WINESYNC_IOC_PUT_SEM, &sem_args); + EXPECT_EQ(0, ret); + + ret = wait_all_alert(fd, 2, objs, 123, event_args.event, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(0, index); + + ret = wait_all_alert(fd, 2, objs, 123, event_args.event, &index); + EXPECT_EQ(0, ret); + EXPECT_EQ(2, index); + + ret = wait_all_alert(fd, 2, objs, 123, event_args.event, &index); + EXPECT_EQ(-1, ret); + EXPECT_EQ(ETIMEDOUT, errno); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &event_args.event); + EXPECT_EQ(0, ret); + + ret = ioctl(fd, WINESYNC_IOC_DELETE, &objs[0]); + EXPECT_EQ(0, ret); + ret = ioctl(fd, WINESYNC_IOC_DELETE, &objs[1]); + EXPECT_EQ(0, ret); + + close(fd); +} + +TEST_HARNESS_MAIN -- 2.36.0