@ -403,7 +403,7 @@ static void virtio_gpu_resource_destroy(VirtIOGPU *g,
}
}
pixman_image_unref ( res - > image ) ;
qemu_ pixman_image_unref( res - > image ) ;
virtio_gpu_cleanup_mapping ( g , res ) ;
QTAILQ_REMOVE ( & g - > reslist , res , next ) ;
g - > hostmem - = res - > hostmem ;
@ -492,6 +492,7 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g,
{
struct virtio_gpu_simple_resource * res ;
struct virtio_gpu_resource_flush rf ;
struct virtio_gpu_scanout * scanout ;
pixman_region16_t flush_region ;
int i ;
@ -502,16 +503,29 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g,
res = virtio_gpu_find_check_resource ( g , rf . resource_id , false ,
__func__ , & cmd - > error ) ;
if ( ! res | | res - > blob ) {
if ( ! res ) {
return ;
}
if ( rf . r . x > res - > width | |
if ( res - > blob ) {
for ( i = 0 ; i < g - > parent_obj . conf . max_outputs ; i + + ) {
scanout = & g - > parent_obj . scanout [ i ] ;
if ( scanout - > resource_id = = res - > resource_id & &
console_has_gl ( scanout - > con ) ) {
dpy_gl_update ( scanout - > con , 0 , 0 , scanout - > width ,
scanout - > height ) ;
return ;
}
}
}
if ( ! res - > blob & &
( rf . r . x > res - > width | |
rf . r . y > res - > height | |
rf . r . width > res - > width | |
rf . r . height > res - > height | |
rf . r . x + rf . r . width > res - > width | |
rf . r . y + rf . r . height > res - > height ) {
rf . r . y + rf . r . height > res - > height ) ) {
qemu_log_mask ( LOG_GUEST_ERROR , " %s: flush bounds outside resource "
" bounds for resource %d: %d %d %d %d vs %d %d \n " ,
__func__ , rf . resource_id , rf . r . x , rf . r . y ,
@ -523,7 +537,6 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g,
pixman_region_init_rect ( & flush_region ,
rf . r . x , rf . r . y , rf . r . width , rf . r . height ) ;
for ( i = 0 ; i < g - > parent_obj . conf . max_outputs ; i + + ) {
struct virtio_gpu_scanout * scanout ;
pixman_region16_t region , finalregion ;
pixman_box16_t * extents ;
@ -614,10 +627,23 @@ static void virtio_gpu_do_set_scanout(VirtIOGPU *g,
}
g - > parent_obj . enable = 1 ;
data = ( uint8_t * ) pixman_image_get_data ( res - > image ) ;
if ( res - > blob ) {
if ( console_has_gl ( scanout - > con ) ) {
if ( ! virtio_gpu_update_dmabuf ( g , scanout_id , res , fb ) ) {
virtio_gpu_update_scanout ( g , scanout_id , res , r ) ;
return ;
}
}
data = res - > blob ;
} else {
data = ( uint8_t * ) pixman_image_get_data ( res - > image ) ;
}
/* create a surface for this scanout */
if ( ! scanout - > ds | |
if ( ( res - > blob & & ! console_has_gl ( scanout - > con ) ) | |
! scanout - > ds | |
surface_data ( scanout - > ds ) ! = data + fb - > offset | |
scanout - > width ! = r - > width | |
scanout - > height ! = r - > height ) {
@ -681,6 +707,61 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
& fb , res , & ss . r , & cmd - > error ) ;
}
static void virtio_gpu_set_scanout_blob ( VirtIOGPU * g ,
struct virtio_gpu_ctrl_command * cmd )
{
struct virtio_gpu_simple_resource * res ;
struct virtio_gpu_framebuffer fb = { 0 } ;
struct virtio_gpu_set_scanout_blob ss ;
uint64_t fbend ;
VIRTIO_GPU_FILL_CMD ( ss ) ;
virtio_gpu_scanout_blob_bswap ( & ss ) ;
trace_virtio_gpu_cmd_set_scanout_blob ( ss . scanout_id , ss . resource_id ,
ss . r . width , ss . r . height , ss . r . x ,
ss . r . y ) ;
if ( ss . resource_id = = 0 ) {
virtio_gpu_disable_scanout ( g , ss . scanout_id ) ;
return ;
}
res = virtio_gpu_find_check_resource ( g , ss . resource_id , true ,
__func__ , & cmd - > error ) ;
if ( ! res ) {
return ;
}
fb . format = virtio_gpu_get_pixman_format ( ss . format ) ;
if ( ! fb . format ) {
qemu_log_mask ( LOG_GUEST_ERROR ,
" %s: host couldn't handle guest format %d \n " ,
__func__ , ss . format ) ;
cmd - > error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER ;
return ;
}
fb . bytes_pp = DIV_ROUND_UP ( PIXMAN_FORMAT_BPP ( fb . format ) , 8 ) ;
fb . width = ss . width ;
fb . height = ss . height ;
fb . stride = ss . strides [ 0 ] ;
fb . offset = ss . offsets [ 0 ] + ss . r . x * fb . bytes_pp + ss . r . y * fb . stride ;
fbend = fb . offset ;
fbend + = fb . stride * ( ss . r . height - 1 ) ;
fbend + = fb . bytes_pp * ss . r . width ;
if ( fbend > res - > blob_size ) {
qemu_log_mask ( LOG_GUEST_ERROR ,
" %s: fb end out of range \n " ,
__func__ ) ;
cmd - > error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER ;
return ;
}
virtio_gpu_do_set_scanout ( g , ss . scanout_id ,
& fb , res , & ss . r , & cmd - > error ) ;
}
int virtio_gpu_create_mapping_iov ( VirtIOGPU * g ,
uint32_t nr_entries , uint32_t offset ,
struct virtio_gpu_ctrl_command * cmd ,
@ -875,6 +956,13 @@ void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
case VIRTIO_GPU_CMD_SET_SCANOUT :
virtio_gpu_set_scanout ( g , cmd ) ;
break ;
case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB :
if ( ! virtio_gpu_blob_enabled ( g - > parent_obj . conf ) ) {
cmd - > error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER ;
break ;
}
virtio_gpu_set_scanout_blob ( g , cmd ) ;
break ;
case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING :
virtio_gpu_resource_attach_backing ( g , cmd ) ;
break ;