qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] 答复: [Qemu-block] [PATCH] scsi-disk: Fix crash if reque


From: John Snow
Subject: Re: [Qemu-devel] 答复: [Qemu-block] [PATCH] scsi-disk: Fix crash if request is invaild or disk is no medium
Date: Tue, 19 Mar 2019 12:34:59 -0400
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Thunderbird/60.4.0


On 3/18/19 9:52 PM, lizhengui wrote:
> This problem can be reproduced by detaching and attaching remote cdrom 
> repeatly.
> 

While a guest tries to access it, or not? do you have a scriptable test?

--js

> -----邮件原件-----
> 发件人: John Snow [mailto:address@hidden 
> 发送时间: 2019年3月19日 7:34
> 收件人: lizhengui; address@hidden; address@hidden; address@hidden
> 抄送: address@hidden; Fangyi (C); address@hidden; wangjie (P)
> 主题: Re: [Qemu-block] [PATCH] scsi-disk: Fix crash if request is invaild or 
> disk is no medium
> 
> 
> 
> On 3/7/19 3:57 AM, Zhengui li wrote:
>> From: Zhengui Li <address@hidden>
>>
>> Qemu will crash with the assertion error that "assert(r->req.aiocb !=
>> NULL)" in scsi_read_complete if request is invaild or disk is no medium.
>> The error is below:
>> qemu-kvm: hw/scsi/scsi_disk.c:299: scsi_read_complete: Assertion
>> `r->req.aiocb != NULL' failed.
>>
>> This patch add a funtion scsi_read_complete_noio to fix it.
>>
> 
> How do you reproduce this? Is there a test?
> 
> --js
> 
>> Signed-off-by: Zhengui Li <address@hidden>
>> ---
>>  hw/scsi/scsi-disk.c | 37 ++++++++++++++++++++++++-------------
>>  1 file changed, 24 insertions(+), 13 deletions(-)
>>
>> diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
>> index d4e83ae..624df3c 100644
>> --- a/hw/scsi/scsi-disk.c
>> +++ b/hw/scsi/scsi-disk.c
>> @@ -296,22 +296,15 @@ static void scsi_dma_complete(void *opaque, int ret)
>>      aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
>>  }
>>  
>> -static void scsi_read_complete(void * opaque, int ret)
>> +static void scsi_read_complete_noio(SCSIDiskReq *r, int ret)
>>  {
>> -    SCSIDiskReq *r = (SCSIDiskReq *)opaque;
>> -    SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
>> -    int n;
>> +    uint32_t n;
>>  
>> -    assert(r->req.aiocb != NULL);
>> -    r->req.aiocb = NULL;
>> -    aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
>> -    if (scsi_disk_req_check_error(r, ret, true)) {
>> +    assert (r->req.aiocb == NULL);
>> +    if (scsi_disk_req_check_error(r, ret, false)) {
>>          goto done;
>>      }
>>  
>> -    block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
>> -    trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
>> -
>>      n = r->qiov.size / 512;
>>      r->sector += n;
>>      r->sector_count -= n;
>> @@ -319,6 +312,24 @@ static void scsi_read_complete(void * opaque, int ret)
>>  
>>  done:
>>      scsi_req_unref(&r->req);
>> +}
>> +
>> +static void scsi_read_complete(void * opaque, int ret)
>> +{
>> +    SCSIDiskReq *r = (SCSIDiskReq *)opaque;
>> +    SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
>> +
>> +    assert(r->req.aiocb != NULL);
>> +    r->req.aiocb = NULL;
>> +
>> +    aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
>> +    if (ret < 0) {
>> +        block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
>> +    } else {
>> +        block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
>> +        trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
>> +    }
>> +    scsi_read_complete_noio(r, ret);
>>      aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
>>  }
>>  
>> @@ -395,12 +406,12 @@ static void scsi_read_data(SCSIRequest *req)
>>      scsi_req_ref(&r->req);
>>      if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
>>          trace_scsi_disk_read_data_invalid();
>> -        scsi_read_complete(r, -EINVAL);
>> +        scsi_read_complete_noio(r, -EINVAL);
>>          return;
>>      }
>>  
>>      if (!blk_is_available(req->dev->conf.blk)) {
>> -        scsi_read_complete(r, -ENOMEDIUM);
>> +        scsi_read_complete_noio(r, -ENOMEDIUM);
>>          return;
>>      }
>>  
>>



reply via email to

[Prev in Thread] Current Thread [Next in Thread]