qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [PATCH 03/12] rcu: add rcu library


From: liu ping fan
Subject: Re: [Qemu-devel] [PATCH 03/12] rcu: add rcu library
Date: Fri, 17 May 2013 15:08:11 +0800

[...]
>> +
>> +void synchronize_rcu(void)
>> +{
>> +    unsigned long was_online;
>> +
>> +    was_online = get_rcu_reader()->ctr;
>> +
>> +    /* Mark the writer thread offline to make sure we don't wait for
>> +     * our own quiescent state. This allows using synchronize_rcu()
>> +     * in threads registered as readers.
>> +     *
>> +     * rcu_thread_offline() and rcu_thread_online() include a
>> +     * memory barrier.
>> +     */
>> +    if (was_online) {
>> +        rcu_thread_offline();
>
> Encourage the user to call synchronize_rcu() in reader? I think the
> caller should ensure it is outside read-section. Also online can be
> nested which make the situation even worse.
>
What about removing call_rcu_thread from @registry, then we can avoid
this, ... and some small changes in _offline()/_online()

> Regards,
> Pingfan
>> +    } else {
>> +        smp_mb();
>> +    }
>> +
>> +    qemu_mutex_lock(&rcu_gp_lock);
>> +
>> +    if (!QLIST_EMPTY(&registry)) {
>> +        if (sizeof(rcu_gp_ctr) < 8) {
>> +            /* For architectures with 32-bit longs, a two-subphases 
>> algorithm
>> +             * ensures we do not encounter overflow bugs.
>> +             *
>> +             * Switch parity: 0 -> 1, 1 -> 0.
>> +             */
>> +            atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
>> +            wait_for_readers();
>> +            atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
>> +        } else {
>> +            /* Increment current grace period.  */
>> +            atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
>> +        }
>> +
>> +        wait_for_readers();
>> +    }
>> +
>> +    qemu_mutex_unlock(&rcu_gp_lock);
>> +
>> +    if (was_online) {
>> +        rcu_thread_online();
>> +    } else {
>> +        smp_mb();
>> +    }
>> +}
>> +
>> +void rcu_register_thread(void)
>> +{
>> +    if (!get_rcu_reader()) {
>> +        alloc_rcu_reader();
>> +    }
>> +
>> +    assert(get_rcu_reader()->ctr == 0);
>> +    qemu_mutex_lock(&rcu_gp_lock);
>> +    QLIST_INSERT_HEAD(&registry, get_rcu_reader(), node);
>> +    qemu_mutex_unlock(&rcu_gp_lock);
>> +    rcu_quiescent_state();
>> +}
>> +
>> +void rcu_unregister_thread(void)
>> +{
>> +    rcu_thread_offline();
>> +    qemu_mutex_lock(&rcu_gp_lock);
>> +    QLIST_REMOVE(get_rcu_reader(), node);
>> +    qemu_mutex_unlock(&rcu_gp_lock);
>> +}
>> +
>> +static void __attribute__((__constructor__)) rcu_init(void)
>> +{
>> +    qemu_mutex_init(&rcu_gp_lock);
>> +    qemu_event_init(&rcu_gp_event, true);
>> +    rcu_register_thread();
>> +}
>> --
>> 1.8.1.4
>>
>>



reply via email to

[Prev in Thread] Current Thread [Next in Thread]