@ -3179,7 +3179,8 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
char * * pinterp_name )
{
g_autofree struct elf_phdr * phdr = NULL ;
abi_ulong load_addr , load_bias , loaddr , hiaddr , error ;
abi_ulong load_addr , load_bias , loaddr , hiaddr , error , align ;
size_t reserve_size , align_size ;
int i , prot_exec ;
Error * err = NULL ;
@ -3263,6 +3264,9 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
load_addr = loaddr ;
align = pow2ceil ( info - > alignment ) ;
info - > alignment = align ;
if ( pinterp_name ! = NULL ) {
if ( ehdr - > e_type = = ET_EXEC ) {
/*
@ -3271,8 +3275,6 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
*/
probe_guest_base ( image_name , loaddr , hiaddr ) ;
} else {
abi_ulong align ;
/*
* The binary is dynamic , but we still need to
* select guest_base . In this case we pass a size .
@ -3290,10 +3292,7 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
* Since we do not have complete control over the guest
* address space , we prefer the kernel to choose some address
* rather than force the use of LOAD_ADDR via MAP_FIXED .
* But without MAP_FIXED we cannot guarantee alignment ,
* only suggest it .
*/
align = pow2ceil ( info - > alignment ) ;
if ( align ) {
load_addr & = - align ;
}
@ -3317,13 +3316,35 @@ static void load_elf_image(const char *image_name, const ImageSource *src,
* In both cases , we will overwrite pages in this range with mappings
* from the executable .
*/
load_addr = target_mmap ( load_addr , ( size_t ) hiaddr - loaddr + 1 , PROT_NONE ,
reserve_size = ( size_t ) hiaddr - loaddr + 1 ;
align_size = reserve_size ;
if ( ehdr - > e_type ! = ET_EXEC & & align > qemu_real_host_page_size ( ) ) {
align_size + = align - 1 ;
}
load_addr = target_mmap ( load_addr , align_size , PROT_NONE ,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE |
( ehdr - > e_type = = ET_EXEC ? MAP_FIXED_NOREPLACE : 0 ) ,
- 1 , 0 ) ;
if ( load_addr = = - 1 ) {
goto exit_mmap ;
}
if ( align_size ! = reserve_size ) {
abi_ulong align_addr = ROUND_UP ( load_addr , align ) ;
abi_ulong align_end = align_addr + reserve_size ;
abi_ulong load_end = load_addr + align_size ;
if ( align_addr ! = load_addr ) {
target_munmap ( load_addr , align_addr - load_addr ) ;
}
if ( align_end ! = load_end ) {
target_munmap ( align_end , load_end - align_end ) ;
}
load_addr = align_addr ;
}
load_bias = load_addr - loaddr ;
if ( elf_is_fdpic ( ehdr ) ) {