@ -74,6 +74,7 @@ struct shared_vmport_iopage {
} ;
typedef struct shared_vmport_iopage shared_vmport_iopage_t ;
# endif
static shared_vmport_iopage_t * shared_vmport_page ;
static inline uint32_t xen_vcpu_eport ( shared_iopage_t * shared_page , int i )
{
@ -96,6 +97,11 @@ typedef struct XenPhysmap {
} XenPhysmap ;
static QLIST_HEAD ( , XenPhysmap ) xen_physmap ;
static const XenPhysmap * log_for_dirtybit ;
/* Buffer used by xen_sync_dirty_bitmap */
static unsigned long * dirty_bitmap ;
static Notifier suspend ;
static Notifier wakeup ;
typedef struct XenPciDevice {
PCIDevice * pci_dev ;
@ -106,7 +112,6 @@ typedef struct XenPciDevice {
typedef struct XenIOState {
ioservid_t ioservid ;
shared_iopage_t * shared_page ;
shared_vmport_iopage_t * shared_vmport_page ;
buffered_iopage_t * buffered_io_page ;
xenforeignmemory_resource_handle * fres ;
QEMUTimer * buffered_io_timer ;
@ -126,14 +131,8 @@ typedef struct XenIOState {
MemoryListener io_listener ;
QLIST_HEAD ( , XenPciDevice ) dev_list ;
DeviceListener device_listener ;
hwaddr free_phys_offset ;
const XenPhysmap * log_for_dirtybit ;
/* Buffer used by xen_sync_dirty_bitmap */
unsigned long * dirty_bitmap ;
Notifier exit ;
Notifier suspend ;
Notifier wakeup ;
} XenIOState ;
/* Xen specific function for piix pci */
@ -463,10 +462,10 @@ static int xen_remove_from_physmap(XenIOState *state,
}
QLIST_REMOVE ( physmap , list ) ;
if ( state - > log_for_dirtybit = = physmap ) {
state - > log_for_dirtybit = NULL ;
g_free ( state - > dirty_bitmap ) ;
state - > dirty_bitmap = NULL ;
if ( log_for_dirtybit = = physmap ) {
log_for_dirtybit = NULL ;
g_free ( dirty_bitmap ) ;
dirty_bitmap = NULL ;
}
g_free ( physmap ) ;
@ -627,16 +626,16 @@ static void xen_sync_dirty_bitmap(XenIOState *state,
return ;
}
if ( state - > log_for_dirtybit = = NULL ) {
state - > log_for_dirtybit = physmap ;
state - > dirty_bitmap = g_new ( unsigned long , bitmap_size ) ;
} else if ( state - > log_for_dirtybit ! = physmap ) {
if ( log_for_dirtybit = = NULL ) {
log_for_dirtybit = physmap ;
dirty_bitmap = g_new ( unsigned long , bitmap_size ) ;
} else if ( log_for_dirtybit ! = physmap ) {
/* Only one range for dirty bitmap can be tracked. */
return ;
}
rc = xen_track_dirty_vram ( xen_domid , start_addr > > TARGET_PAGE_BITS ,
npages , state - > dirty_bitmap ) ;
npages , dirty_bitmap ) ;
if ( rc < 0 ) {
# ifndef ENODATA
# define ENODATA ENOENT
@ -651,7 +650,7 @@ static void xen_sync_dirty_bitmap(XenIOState *state,
}
for ( i = 0 ; i < bitmap_size ; i + + ) {
unsigned long map = state - > dirty_bitmap [ i ] ;
unsigned long map = dirty_bitmap [ i ] ;
while ( map ! = 0 ) {
j = ctzl ( map ) ;
map & = ~ ( 1ul < < j ) ;
@ -677,12 +676,10 @@ static void xen_log_start(MemoryListener *listener,
static void xen_log_stop ( MemoryListener * listener , MemoryRegionSection * section ,
int old , int new )
{
XenIOState * state = container_of ( listener , XenIOState , memory_listener ) ;
if ( old & ~ new & ( 1 < < DIRTY_MEMORY_VGA ) ) {
state - > log_for_dirtybit = NULL ;
g_free ( state - > dirty_bitmap ) ;
state - > dirty_bitmap = NULL ;
log_for_dirtybit = NULL ;
g_free ( dirty_bitmap ) ;
dirty_bitmap = NULL ;
/* Disable dirty bit tracking */
xen_track_dirty_vram ( xen_domid , 0 , 0 , NULL ) ;
}
@ -1022,9 +1019,9 @@ static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req)
{
vmware_regs_t * vmport_regs ;
assert ( state - > s hared_vmport_page ) ;
assert ( shared_vmport_page ) ;
vmport_regs =
& state - > s hared_vmport_page - > vcpu_vmport_regs [ state - > send_vcpu ] ;
& shared_vmport_page - > vcpu_vmport_regs [ state - > send_vcpu ] ;
QEMU_BUILD_BUG_ON ( sizeof ( * req ) < sizeof ( * vmport_regs ) ) ;
current_cpu = state - > cpu_by_vcpu_id [ state - > send_vcpu ] ;
@ -1472,7 +1469,6 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
state - > memory_listener = xen_memory_listener ;
memory_listener_register ( & state - > memory_listener , & address_space_memory ) ;
state - > log_for_dirtybit = NULL ;
state - > io_listener = xen_io_listener ;
memory_listener_register ( & state - > io_listener , & address_space_io ) ;
@ -1487,19 +1483,19 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
QLIST_INIT ( & xen_physmap ) ;
xen_read_physmap ( state ) ;
state - > s uspend . notify = xen_suspend_notifier ;
qemu_register_suspend_notifier ( & state - > s uspend ) ;
suspend . notify = xen_suspend_notifier ;
qemu_register_suspend_notifier ( & suspend ) ;
state - > wakeup . notify = xen_wakeup_notifier ;
qemu_register_wakeup_notifier ( & state - > wakeup ) ;
wakeup . notify = xen_wakeup_notifier ;
qemu_register_wakeup_notifier ( & wakeup ) ;
rc = xen_get_vmport_regs_pfn ( xen_xc , xen_domid , & ioreq_pfn ) ;
if ( ! rc ) {
DPRINTF ( " shared vmport page at pfn %lx \n " , ioreq_pfn ) ;
state - > s hared_vmport_page =
shared_vmport_page =
xenforeignmemory_map ( xen_fmem , xen_domid , PROT_READ | PROT_WRITE ,
1 , & ioreq_pfn , NULL ) ;
if ( state - > s hared_vmport_page = = NULL ) {
if ( shared_vmport_page = = NULL ) {
error_report ( " map shared vmport IO page returned error %d handle=%p " ,
errno , xen_xc ) ;
goto err ;