39
39
#include "hw/hw.h"
40
40
#include "hw/qdev.h"
41
41
#include "hw/xen/xen.h"
42
+ #include "qemu/bitmap.h"
42
43
#include "qemu/osdep.h"
43
44
#include "qemu/tls.h"
44
45
#include "sysemu/kvm.h"
49
50
#if defined(CONFIG_USER_ONLY )
50
51
#include <qemu.h>
51
52
#endif
53
+ #include "translate-all.h"
52
54
53
55
//#define DEBUG_SUBPAGE
54
56
@@ -532,41 +534,28 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
532
534
return block ;
533
535
}
534
536
535
- /* Note: start and end must be within the same ram block. */
536
- void cpu_physical_memory_reset_dirty (ram_addr_t start , ram_addr_t end ,
537
- int dirty_flags )
537
+ static void tlb_reset_dirty_range_all (ram_addr_t start , ram_addr_t length )
538
538
{
539
- unsigned long length , start1 ;
540
- int i ;
539
+ ram_addr_t end = TARGET_PAGE_ALIGN (start + length );
541
540
542
541
start &= TARGET_PAGE_MASK ;
543
- end = TARGET_PAGE_ALIGN (end );
544
542
545
- length = end - start ;
543
+ RAMBlock * block = qemu_get_ram_block (start );
544
+ assert (block == qemu_get_ram_block (end - 1 ));
545
+ uintptr_t start1 = (uintptr_t )block -> host + (start - block -> offset );
546
+ cpu_tlb_reset_dirty_all (start1 , length );
547
+ }
548
+
549
+ /* Note: start and end must be within the same ram block. */
550
+ void cpu_physical_memory_reset_dirty (ram_addr_t start , ram_addr_t length ,
551
+ unsigned client )
552
+ {
546
553
if (length == 0 )
547
554
return ;
548
- cpu_physical_memory_mask_dirty_range (start , length , dirty_flags );
549
-
550
- /* we modify the TLB cache so that the dirty bit will be set again
551
- when accessing the range */
552
- start1 = (unsigned long )qemu_safe_ram_ptr (start );
553
- /* Chek that we don't span multiple blocks - this breaks the
554
- address comparisons below. */
555
- if ((unsigned long )qemu_safe_ram_ptr (end - 1 ) - start1
556
- != (end - 1 ) - start ) {
557
- abort ();
558
- }
555
+ cpu_physical_memory_clear_dirty_range (start , length , client );
559
556
560
- CPUState * cpu ;
561
- CPU_FOREACH (cpu ) {
562
- int mmu_idx ;
563
- for (mmu_idx = 0 ; mmu_idx < NB_MMU_MODES ; mmu_idx ++ ) {
564
- for (i = 0 ; i < CPU_TLB_SIZE ; i ++ ) {
565
- CPUArchState * env = cpu -> env_ptr ;
566
- tlb_reset_dirty_range (& env -> tlb_table [mmu_idx ][i ],
567
- start1 , length );
568
- }
569
- }
557
+ if (tcg_enabled ()) {
558
+ tlb_reset_dirty_range_all (start , length );
570
559
}
571
560
}
572
561
@@ -603,7 +592,7 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
603
592
p = (void * )(unsigned long )((tlb_entry -> addr_write & TARGET_PAGE_MASK )
604
593
+ tlb_entry -> addend );
605
594
ram_addr = qemu_ram_addr_from_host_nofail (p );
606
- if (! cpu_physical_memory_is_dirty (ram_addr )) {
595
+ if (cpu_physical_memory_is_clean (ram_addr )) {
607
596
tlb_entry -> addr_write |= TLB_NOTDIRTY ;
608
597
}
609
598
}
@@ -1079,6 +1068,9 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
1079
1068
ram_addr_t size , void * host )
1080
1069
{
1081
1070
RAMBlock * block , * new_block ;
1071
+ ram_addr_t old_ram_size , new_ram_size ;
1072
+
1073
+ old_ram_size = last_ram_offset () >> TARGET_PAGE_BITS ;
1082
1074
1083
1075
size = TARGET_PAGE_ALIGN (size );
1084
1076
new_block = g_malloc0 (sizeof (* new_block ));
@@ -1166,11 +1158,17 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
1166
1158
ram_list .version ++ ;
1167
1159
qemu_mutex_unlock_ramlist ();
1168
1160
1169
- ram_list .phys_dirty = g_realloc (ram_list .phys_dirty ,
1170
- last_ram_offset () >> TARGET_PAGE_BITS );
1171
- memset (ram_list .phys_dirty + (new_block -> offset >> TARGET_PAGE_BITS ),
1172
- 0xff , size >> TARGET_PAGE_BITS );
1173
- //cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
1161
+ new_ram_size = last_ram_offset () >> TARGET_PAGE_BITS ;
1162
+
1163
+ if (new_ram_size > old_ram_size ) {
1164
+ int i ;
1165
+ for (i = 0 ; i < DIRTY_MEMORY_NUM ; i ++ ) {
1166
+ ram_list .dirty_memory [i ] =
1167
+ bitmap_zero_extend (ram_list .dirty_memory [i ],
1168
+ old_ram_size , new_ram_size );
1169
+ }
1170
+ }
1171
+ cpu_physical_memory_set_dirty_range (new_block -> offset , size );
1174
1172
1175
1173
qemu_ram_setup_dump (new_block -> host , size );
1176
1174
//qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
@@ -1463,61 +1461,52 @@ static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
1463
1461
static void notdirty_mem_writeb (void * opaque , hwaddr ram_addr ,
1464
1462
uint32_t val )
1465
1463
{
1466
- int dirty_flags ;
1467
- dirty_flags = cpu_physical_memory_get_dirty_flags (ram_addr );
1468
- if (!(dirty_flags & CODE_DIRTY_FLAG )) {
1469
- #if !defined(CONFIG_USER_ONLY )
1464
+ if (!cpu_physical_memory_get_dirty_flag (ram_addr , DIRTY_MEMORY_CODE )) {
1470
1465
tb_invalidate_phys_page_fast0 (ram_addr , 1 );
1471
- dirty_flags = cpu_physical_memory_get_dirty_flags (ram_addr );
1472
- #endif
1473
1466
}
1474
1467
stb_p (qemu_get_ram_ptr (ram_addr ), val );
1475
- dirty_flags |= ( 0xff & ~ CODE_DIRTY_FLAG );
1476
- cpu_physical_memory_set_dirty_flags (ram_addr , dirty_flags );
1468
+ cpu_physical_memory_set_dirty_flag ( ram_addr , DIRTY_MEMORY_MIGRATION );
1469
+ cpu_physical_memory_set_dirty_flag (ram_addr , DIRTY_MEMORY_VGA );
1477
1470
/* we remove the notdirty callback only if the code has been
1478
1471
flushed */
1479
- if (dirty_flags == 0xff )
1480
- tlb_set_dirty (cpu_single_env , cpu_single_env -> mem_io_vaddr );
1472
+ if (!cpu_physical_memory_is_clean (ram_addr )) {
1473
+ CPUArchState * env = current_cpu -> env_ptr ;
1474
+ tlb_set_dirty (env , env -> mem_io_vaddr );
1475
+ }
1481
1476
}
1482
1477
1483
1478
static void notdirty_mem_writew (void * opaque , hwaddr ram_addr ,
1484
1479
uint32_t val )
1485
1480
{
1486
- int dirty_flags ;
1487
- dirty_flags = cpu_physical_memory_get_dirty_flags (ram_addr );
1488
- if (!(dirty_flags & CODE_DIRTY_FLAG )) {
1489
- #if !defined(CONFIG_USER_ONLY )
1490
- tb_invalidate_phys_page_fast0 (ram_addr , 2 );
1491
- dirty_flags = cpu_physical_memory_get_dirty_flags (ram_addr );
1492
- #endif
1481
+ if (!cpu_physical_memory_get_dirty_flag (ram_addr , DIRTY_MEMORY_CODE )) {
1482
+ tb_invalidate_phys_page_fast0 (ram_addr , 1 );
1493
1483
}
1494
1484
stw_p (qemu_get_ram_ptr (ram_addr ), val );
1495
- dirty_flags |= ( 0xff & ~ CODE_DIRTY_FLAG );
1496
- cpu_physical_memory_set_dirty_flags (ram_addr , dirty_flags );
1485
+ cpu_physical_memory_set_dirty_flag ( ram_addr , DIRTY_MEMORY_MIGRATION );
1486
+ cpu_physical_memory_set_dirty_flag (ram_addr , DIRTY_MEMORY_VGA );
1497
1487
/* we remove the notdirty callback only if the code has been
1498
1488
flushed */
1499
- if (dirty_flags == 0xff )
1500
- tlb_set_dirty (cpu_single_env , cpu_single_env -> mem_io_vaddr );
1489
+ if (!cpu_physical_memory_is_clean (ram_addr )) {
1490
+ CPUArchState * env = current_cpu -> env_ptr ;
1491
+ tlb_set_dirty (env , env -> mem_io_vaddr );
1492
+ }
1501
1493
}
1502
1494
1503
1495
static void notdirty_mem_writel (void * opaque , hwaddr ram_addr ,
1504
1496
uint32_t val )
1505
1497
{
1506
- int dirty_flags ;
1507
- dirty_flags = cpu_physical_memory_get_dirty_flags (ram_addr );
1508
- if (!(dirty_flags & CODE_DIRTY_FLAG )) {
1509
- #if !defined(CONFIG_USER_ONLY )
1510
- tb_invalidate_phys_page_fast0 (ram_addr , 4 );
1511
- dirty_flags = cpu_physical_memory_get_dirty_flags (ram_addr );
1512
- #endif
1498
+ if (!cpu_physical_memory_get_dirty_flag (ram_addr , DIRTY_MEMORY_CODE )) {
1499
+ tb_invalidate_phys_page_fast0 (ram_addr , 1 );
1513
1500
}
1514
1501
stl_p (qemu_get_ram_ptr (ram_addr ), val );
1515
- dirty_flags |= ( 0xff & ~ CODE_DIRTY_FLAG );
1516
- cpu_physical_memory_set_dirty_flags (ram_addr , dirty_flags );
1502
+ cpu_physical_memory_set_dirty_flag ( ram_addr , DIRTY_MEMORY_MIGRATION );
1503
+ cpu_physical_memory_set_dirty_flag (ram_addr , DIRTY_MEMORY_VGA );
1517
1504
/* we remove the notdirty callback only if the code has been
1518
1505
flushed */
1519
- if (dirty_flags == 0xff )
1520
- tlb_set_dirty (cpu_single_env , cpu_single_env -> mem_io_vaddr );
1506
+ if (!cpu_physical_memory_is_clean (ram_addr )) {
1507
+ CPUArchState * env = current_cpu -> env_ptr ;
1508
+ tlb_set_dirty (env , env -> mem_io_vaddr );
1509
+ }
1521
1510
}
1522
1511
1523
1512
static CPUReadMemoryFunc * const error_mem_read [3 ] = {
@@ -1532,16 +1521,6 @@ static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1532
1521
notdirty_mem_writel ,
1533
1522
};
1534
1523
1535
- static void tb_check_watchpoint (CPUArchState * env )
1536
- {
1537
- TranslationBlock * tb = tb_find_pc (env -> mem_io_pc );
1538
- if (!tb ) {
1539
- cpu_abort (env , "check_watchpoint: could not find TB for "
1540
- "pc=%p" , (void * )env -> mem_io_pc );
1541
- }
1542
- cpu_restore_state (env , env -> mem_io_pc );
1543
- tb_phys_invalidate (tb , -1 );
1544
- }
1545
1524
1546
1525
/* Generate a debug exception if a watchpoint has been hit. */
1547
1526
static void check_watchpoint (int offset , int len_mask , int flags )
@@ -1919,11 +1898,12 @@ void cpu_physical_memory_rw(hwaddr addr, void *buf,
1919
1898
static void invalidate_and_set_dirty (hwaddr addr ,
1920
1899
hwaddr length )
1921
1900
{
1922
- if (! cpu_physical_memory_is_dirty (addr )) {
1901
+ if (cpu_physical_memory_is_clean (addr )) {
1923
1902
/* invalidate code */
1924
1903
tb_invalidate_phys_page_range (addr , addr + length , 0 );
1925
1904
/* set dirty bit */
1926
- cpu_physical_memory_set_dirty_flags (addr , (0xff & ~CODE_DIRTY_FLAG ));
1905
+ cpu_physical_memory_set_dirty_flag (addr , DIRTY_MEMORY_VGA );
1906
+ cpu_physical_memory_set_dirty_flag (addr , DIRTY_MEMORY_MIGRATION );
1927
1907
}
1928
1908
}
1929
1909
@@ -2435,12 +2415,13 @@ void stl_phys_notdirty(hwaddr addr, uint32_t val)
2435
2415
stl_p (ptr , val );
2436
2416
2437
2417
if (unlikely (in_migration )) {
2438
- if (! cpu_physical_memory_is_dirty (addr1 )) {
2418
+ if (cpu_physical_memory_is_clean (addr1 )) {
2439
2419
/* invalidate code */
2440
2420
tb_invalidate_phys_page_range (addr1 , addr1 + 4 , 0 );
2441
2421
/* set dirty bit */
2442
- cpu_physical_memory_set_dirty_flags (
2443
- addr1 , (0xff & ~CODE_DIRTY_FLAG ));
2422
+ cpu_physical_memory_set_dirty_flag (addr1 ,
2423
+ DIRTY_MEMORY_MIGRATION );
2424
+ cpu_physical_memory_set_dirty_flag (addr1 , DIRTY_MEMORY_VGA );
2444
2425
}
2445
2426
}
2446
2427
}
@@ -2596,12 +2577,12 @@ static inline void stw_phys_internal(hwaddr addr, uint32_t val,
2596
2577
stw_p (ptr , val );
2597
2578
break ;
2598
2579
}
2599
- if (! cpu_physical_memory_is_dirty (addr1 )) {
2580
+ if (cpu_physical_memory_is_clean (addr1 )) {
2600
2581
/* invalidate code */
2601
2582
tb_invalidate_phys_page_range (addr1 , addr1 + 2 , 0 );
2602
2583
/* set dirty bit */
2603
- cpu_physical_memory_set_dirty_flags (addr1 ,
2604
- ( 0xff & ~ CODE_DIRTY_FLAG ) );
2584
+ cpu_physical_memory_set_dirty_flag (addr1 , DIRTY_MEMORY_MIGRATION );
2585
+ cpu_physical_memory_set_dirty_flag ( addr1 , DIRTY_MEMORY_VGA );
2605
2586
}
2606
2587
}
2607
2588
}
0 commit comments