From 1d3770757b6a159dd132ddf642812da927f9bcd8 Mon Sep 17 00:00:00 2001 From: Chris Moeller Date: Sat, 15 Feb 2014 23:38:46 -0800 Subject: [PATCH] Updated LazyUSF library --- Frameworks/lazyusf/lazyusf/audio.c | 2 +- Frameworks/lazyusf/lazyusf/cpu.h | 2 + Frameworks/lazyusf/lazyusf/main.c | 1 - Frameworks/lazyusf/lazyusf/memory.c | 47 +++++++++++++++++---- Frameworks/lazyusf/lazyusf/rsp/rsp.c | 1 - Frameworks/lazyusf/lazyusf/rsp/su.h | 16 +++---- Frameworks/lazyusf/lazyusf/rsp/vu/clamp.h | 20 ++++----- Frameworks/lazyusf/lazyusf/rsp/vu/shuffle.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vabs.h | 8 ++-- Frameworks/lazyusf/lazyusf/rsp/vu/vadd.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vaddc.h | 4 +- Frameworks/lazyusf/lazyusf/rsp/vu/vand.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vch.h | 10 ++--- Frameworks/lazyusf/lazyusf/rsp/vu/vcl.h | 12 +++--- Frameworks/lazyusf/lazyusf/rsp/vu/vcr.h | 8 ++-- Frameworks/lazyusf/lazyusf/rsp/vu/veq.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vge.h | 6 +-- Frameworks/lazyusf/lazyusf/rsp/vu/vlt.h | 6 +-- Frameworks/lazyusf/lazyusf/rsp/vu/vmacf.h | 6 +-- Frameworks/lazyusf/lazyusf/rsp/vu/vmacq.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vmacu.h | 6 +-- Frameworks/lazyusf/lazyusf/rsp/vu/vmadh.h | 6 +-- Frameworks/lazyusf/lazyusf/rsp/vu/vmadl.h | 6 +-- Frameworks/lazyusf/lazyusf/rsp/vu/vmadm.h | 4 +- Frameworks/lazyusf/lazyusf/rsp/vu/vmadn.h | 4 +- Frameworks/lazyusf/lazyusf/rsp/vu/vmrg.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vmudh.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vmudl.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vmudm.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vmudn.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vmulf.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vmulu.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vnand.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vne.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vnop.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vnor.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vnxor.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vor.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vsaw.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vsub.h | 2 +- Frameworks/lazyusf/lazyusf/rsp/vu/vsubc.h | 4 +- Frameworks/lazyusf/lazyusf/rsp/vu/vxor.h | 2 +- Frameworks/lazyusf/lazyusf/tlb.c | 4 +- Frameworks/lazyusf/lazyusf/usf.c | 14 +++--- Frameworks/lazyusf/lazyusf/usf.h | 6 +-- Frameworks/lazyusf/lazyusf/usf_internal.h | 6 +-- 46 files changed, 140 insertions(+), 111 deletions(-) diff --git a/Frameworks/lazyusf/lazyusf/audio.c b/Frameworks/lazyusf/lazyusf/audio.c index 1b8d0b56a..c2d9bc842 100644 --- a/Frameworks/lazyusf/lazyusf/audio.c +++ b/Frameworks/lazyusf/lazyusf/audio.c @@ -7,7 +7,7 @@ #include "usf_internal.h" void AddBuffer(usf_state_t *state, unsigned char *buf, unsigned int length) { - int32_t i, do_max; + unsigned int i, do_max; int16_t * sample_buffer = state->sample_buffer; if(!state->cpu_running) diff --git a/Frameworks/lazyusf/lazyusf/cpu.h b/Frameworks/lazyusf/lazyusf/cpu.h index 0d7052df6..df328580e 100644 --- a/Frameworks/lazyusf/lazyusf/cpu.h +++ b/Frameworks/lazyusf/lazyusf/cpu.h @@ -90,4 +90,6 @@ enum SaveType { FlashRam }; +void StartEmulationFromSave ( usf_state_t * state, void * savestate ); + #endif diff --git a/Frameworks/lazyusf/lazyusf/main.c b/Frameworks/lazyusf/lazyusf/main.c index ba840fe2d..222a71b3a 100644 --- a/Frameworks/lazyusf/lazyusf/main.c +++ b/Frameworks/lazyusf/lazyusf/main.c @@ -3,7 +3,6 @@ #include #include #include -#include #include "usf.h" #include "cpu.h" #include "memory.h" diff --git a/Frameworks/lazyusf/lazyusf/memory.c b/Frameworks/lazyusf/lazyusf/memory.c index df63f6083..72256324f 100644 --- a/Frameworks/lazyusf/lazyusf/memory.c +++ b/Frameworks/lazyusf/lazyusf/memory.c @@ -25,9 +25,7 @@ */ #include #include -#include #include -#include #include "usf.h" #include "main.h" @@ -41,6 +39,39 @@ uint8_t * PageROM(usf_state_t * state, uint32_t addr) { return (state->ROMPages[addr/0x10000])?state->ROMPages[addr/0x10000]+(addr%0x10000):&state->EmptySpace; } +void * large_alloc(size_t); +void large_free(void *, size_t); + +#ifdef _WIN32 +#include + +void * large_alloc(size_t size) +{ + return VirtualAlloc( NULL, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE ); +} + +void large_free(void * p, size_t size) +{ + VirtualFree( p, size, MEM_RELEASE ); +} +#else +#include + +#ifdef __APPLE__ +#define MAP_ANONYMOUS MAP_ANON +#endif + +void * large_alloc(size_t size) +{ + return mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); +} + +void large_free(void * p, size_t size) +{ + munmap( p, size ); +} +#endif + int32_t Allocate_Memory ( void * state ) { uint32_t i = 0; //RdramSize = 0x800000; @@ -51,7 +82,7 @@ int32_t Allocate_Memory ( void * state ) { // the mmap technique works craptacular when the regions don't overlay - USF_STATE->MemChunk = mmap(NULL, 0x100000 * sizeof(uintptr_t) + 0x1D000 + USF_STATE->RdramSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0); + USF_STATE->MemChunk = (uint8_t *) large_alloc( 0x100000 * sizeof(uintptr_t) + 0x1D000 + USF_STATE->RdramSize ); USF_STATE->TLB_Map = (uintptr_t*)USF_STATE->MemChunk; if (USF_STATE->TLB_Map == NULL) { @@ -60,7 +91,7 @@ int32_t Allocate_Memory ( void * state ) { memset(USF_STATE->TLB_Map, 0, 0x100000 * sizeof(uintptr_t) + 0x10000); - USF_STATE->N64MEM = mmap((uintptr_t)USF_STATE->MemChunk + 0x100000 * sizeof(uintptr_t) + 0x10000, 0xD000 + USF_STATE->RdramSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, 0, 0); + USF_STATE->N64MEM = USF_STATE->MemChunk + 0x100000 * sizeof(uintptr_t) + 0x10000; if(USF_STATE->N64MEM == NULL) { DisplayError("Failed to allocate N64MEM"); return 0; @@ -68,7 +99,7 @@ int32_t Allocate_Memory ( void * state ) { //memset(state->N64MEM, 0, state->RdramSize); - USF_STATE->NOMEM = mmap((uintptr_t)USF_STATE->N64MEM + USF_STATE->RdramSize, 0xD000, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, 0, 0); + USF_STATE->NOMEM = USF_STATE->N64MEM + USF_STATE->RdramSize; if(USF_STATE->RdramSize == 0x400000) { @@ -98,7 +129,7 @@ int PreAllocate_Memory(usf_state_t * state) { int i = 0; // Moved the savestate allocation here :) (for better management later) - state->savestatespace = malloc(0x80275C); + state->savestatespace = (uint8_t *) malloc(0x80275C); if(state->savestatespace == 0) return 0; @@ -124,9 +155,7 @@ void Release_Memory ( usf_state_t * state ) { state->MemoryState = 0; - if (state->MemChunk != 0) {munmap(state->MemChunk, 0x100000 * sizeof(uintptr_t) + 0x10000); state->MemChunk=0;} - if (state->N64MEM != 0) {munmap(state->N64MEM, state->RdramSize); state->N64MEM=0;} - if (state->NOMEM != 0) {munmap(state->NOMEM, 0xD000); state->NOMEM=0;} + if (state->MemChunk != 0) { large_free( state->MemChunk, 0x100000 * sizeof(uintptr_t) + 0x1D000 + USF_STATE->RdramSize ); state->MemChunk=0; } if(state->savestatespace) free(state->savestatespace); diff --git a/Frameworks/lazyusf/lazyusf/rsp/rsp.c b/Frameworks/lazyusf/lazyusf/rsp/rsp.c index 41c2d6f51..4e68de4a9 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/rsp.c +++ b/Frameworks/lazyusf/lazyusf/rsp/rsp.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include "../usf.h" diff --git a/Frameworks/lazyusf/lazyusf/rsp/su.h b/Frameworks/lazyusf/lazyusf/rsp/su.h index 5a80747ca..2a0a810c1 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/su.h +++ b/Frameworks/lazyusf/lazyusf/rsp/su.h @@ -219,10 +219,10 @@ static void MT_CMD_CLOCK(usf_state_t * state, int rt) } static void MT_READ_ONLY(usf_state_t * state, int rt) { - char text[64]; + //char text[64]; - sprintf(text, "MTC0\nInvalid write attempt.\nstate->SR[%i] = 0x%08X", rt, state->SR[rt]); - message(text, 2); + //sprintf(text, "MTC0\nInvalid write attempt.\nstate->SR[%i] = 0x%08X", rt, state->SR[rt]); + //message(text, 2); return; } @@ -1089,11 +1089,11 @@ static void LHV(usf_state_t * state, int vt, int element, int offset, int base) } NOINLINE static void LFV(usf_state_t * state, int vt, int element, int offset, int base) { /* Dummy implementation only: Do any games execute this? */ - char debugger[32]; + /*char debugger[32]; sprintf(debugger, "%s $v%i[0x%X], 0x%03X($%i)", "LFV", vt, element, offset & 0xFFF, base); - message(debugger, 3); + message(debugger, 3);*/ return; } static void SHV(usf_state_t * state, int vt, int element, int offset, int base) @@ -1308,7 +1308,7 @@ INLINE static void SQV(usf_state_t * state, int vt, int element, int offset, int { /* happens with "Mia Hamm Soccer 64" */ register int i; - for (i = 0; i < 16 - addr%16; i++) + for (i = 0; i < (int)(16 - addr%16); i++) state->DMEM[BES((addr + i) & 0xFFF)] = VR_B(vt, (e + i) & 0xF); return; } @@ -1455,11 +1455,11 @@ INLINE static void LTV(usf_state_t * state, int vt, int element, int offset, int } NOINLINE static void SWV(usf_state_t * state, int vt, int element, int offset, int base) { /* Dummy implementation only: Do any games execute this? */ - char debugger[32]; + /*char debugger[32]; sprintf(debugger, "%s $v%i[0x%X], 0x%03X($%i)", "SWV", vt, element, offset & 0xFFF, base); - message(debugger, 3); + message(debugger, 3);*/ return; } INLINE static void STV(usf_state_t * state, int vt, int element, int offset, int base) diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/clamp.h b/Frameworks/lazyusf/lazyusf/rsp/vu/clamp.h index 8a8b2ce2b..e2b6d79d6 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/clamp.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/clamp.h @@ -47,7 +47,7 @@ static INLINE void merge(short* VD, short* cmp, short* pass, short* fail) for (i = 0; i < N; i++) VD[i] = (cmp[i] != 0) ? pass[i] : fail[i]; #else - short diff[N]; + ALIGNED short diff[N]; for (i = 0; i < N; i++) diff[i] = pass[i] - fail[i]; @@ -73,8 +73,8 @@ static INLINE void vector_copy(short* VD, short* VS) static INLINE void SIGNED_CLAMP_ADD(usf_state_t * state, short* VD, short* VS, short* VT) { - int32_t sum[N]; - short hi[N], lo[N]; + ALIGNED int32_t sum[N]; + ALIGNED short hi[N], lo[N]; register int i; for (i = 0; i < N; i++) @@ -94,8 +94,8 @@ static INLINE void SIGNED_CLAMP_ADD(usf_state_t * state, short* VD, short* VS, s } static INLINE void SIGNED_CLAMP_SUB(usf_state_t * state, short* VD, short* VS, short* VT) { - int32_t dif[N]; - short hi[N], lo[N]; + ALIGNED int32_t dif[N]; + ALIGNED short hi[N], lo[N]; register int i; for (i = 0; i < N; i++) @@ -115,7 +115,7 @@ static INLINE void SIGNED_CLAMP_SUB(usf_state_t * state, short* VD, short* VS, s } static INLINE void SIGNED_CLAMP_AM(usf_state_t * state, short* VD) { /* typical sign-clamp of accumulator-mid (bits 31:16) */ - short hi[N], lo[N]; + ALIGNED short hi[N], lo[N]; register int i; for (i = 0; i < N; i++) @@ -225,8 +225,8 @@ static INLINE void SIGNED_CLAMP_AM(usf_state_t * state, short* VD) static INLINE void UNSIGNED_CLAMP(usf_state_t * state, short* VD) { /* sign-zero hybrid clamp of accumulator-mid (bits 31:16) */ - short cond[N]; - short temp[N]; + ALIGNED short cond[N]; + ALIGNED short temp[N]; register int i; SIGNED_CLAMP_AM(state, temp); /* no direct map in SSE, but closely based on this */ @@ -240,8 +240,8 @@ static INLINE void UNSIGNED_CLAMP(usf_state_t * state, short* VD) } static INLINE void SIGNED_CLAMP_AL(usf_state_t * state, short* VD) { /* sign-clamp accumulator-low (bits 15:0) */ - short cond[N]; - short temp[N]; + ALIGNED short cond[N]; + ALIGNED short temp[N]; register int i; SIGNED_CLAMP_AM(state, temp); /* no direct map in SSE, but closely based on this */ diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/shuffle.h b/Frameworks/lazyusf/lazyusf/rsp/vu/shuffle.h index 674e75fc0..3f424bd7a 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/shuffle.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/shuffle.h @@ -56,7 +56,7 @@ int sub_mask[16] = { INLINE static void SHUFFLE_VECTOR(short* VD, short* VT, const int e) { - short SV[8]; + ALIGNED short SV[8]; register int i, j; #if (0 == 0) j = sub_mask[e]; diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vabs.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vabs.h index 09b599bc4..5f36d3420 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vabs.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vabs.h @@ -21,9 +21,9 @@ */ INLINE static void do_abs(usf_state_t * state, short* VD, short* VS, short* VT) { - short neg[N], pos[N]; - short nez[N], cch[N]; /* corner case hack -- abs(-32768) == +32767 */ - short res[N]; + ALIGNED short neg[N], pos[N]; + ALIGNED short nez[N], cch[N]; /* corner case hack -- abs(-32768) == +32767 */ + ALIGNED short res[N]; register int i; vector_copy(res, VT); @@ -69,7 +69,7 @@ INLINE static void do_abs(usf_state_t * state, short* VD, short* VS, short* VT) static void VABS(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_abs(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vadd.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vadd.h index 6f9e97f43..d29fb80a0 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vadd.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vadd.h @@ -29,7 +29,7 @@ INLINE static void clr_ci(usf_state_t * state, short* VD, short* VS, short* VT) static void VADD(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); clr_ci(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vaddc.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vaddc.h index d055eaf11..b352040c4 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vaddc.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vaddc.h @@ -15,7 +15,7 @@ INLINE static void set_co(usf_state_t * state, short* VD, short* VS, short* VT) { /* set CARRY and carry out from sum */ - int32_t sum[N]; + ALIGNED int32_t sum[N]; register int i; for (i = 0; i < N; i++) @@ -32,7 +32,7 @@ INLINE static void set_co(usf_state_t * state, short* VD, short* VS, short* VT) static void VADDC(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); set_co(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vand.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vand.h index d0ff119ac..98c5d1139 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vand.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vand.h @@ -25,7 +25,7 @@ INLINE void do_and(usf_state_t * state, short* VD, short* VS, short* VT) static void VAND(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_and(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vch.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vch.h index acefdaaa4..fb9eb9744 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vch.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vch.h @@ -15,10 +15,10 @@ INLINE static void do_ch(usf_state_t * state, short* VD, short* VS, short* VT) { - short eq[N], ge[N], le[N]; - short sn[N]; - short VC[N]; - short diff[N]; + ALIGNED short eq[N], ge[N], le[N]; + ALIGNED short sn[N]; + ALIGNED short VC[N]; + ALIGNED short diff[N]; register int i; for (i = 0; i < N; i++) @@ -76,7 +76,7 @@ INLINE static void do_ch(usf_state_t * state, short* VD, short* VS, short* VT) static void VCH(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_ch(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vcl.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vcl.h index dfaa6298d..328bb3ff6 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vcl.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vcl.h @@ -15,11 +15,11 @@ INLINE static void do_cl(usf_state_t * state, short* VD, short* VS, short* VT) { - short eq[N], ge[N], le[N]; - short gen[N], len[N], lz[N], uz[N], sn[N]; - short diff[N]; - short cmp[N]; - unsigned short VB[N], VC[N]; + ALIGNED short eq[N], ge[N], le[N]; + ALIGNED short gen[N], len[N], lz[N], uz[N], sn[N]; + ALIGNED short diff[N]; + ALIGNED short cmp[N]; + ALIGNED unsigned short VB[N], VC[N]; register int i; for (i = 0; i < N; i++) @@ -92,7 +92,7 @@ INLINE static void do_cl(usf_state_t * state, short* VD, short* VS, short* VT) static void VCL(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_cl(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vcr.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vcr.h index 9cb6637cd..589cdbb8b 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vcr.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vcr.h @@ -15,9 +15,9 @@ INLINE static void do_cr(usf_state_t * state, short* VD, short* VS, short* VT) { - short ge[N], le[N], sn[N]; - short VC[N]; - short cmp[N]; + ALIGNED short ge[N], le[N], sn[N]; + ALIGNED short VC[N]; + ALIGNED short cmp[N]; register int i; for (i = 0; i < N; i++) @@ -59,7 +59,7 @@ INLINE static void do_cr(usf_state_t * state, short* VD, short* VS, short* VT) static void VCR(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_cr(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/veq.h b/Frameworks/lazyusf/lazyusf/rsp/vu/veq.h index 5afe23537..e6002f6e6 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/veq.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/veq.h @@ -39,7 +39,7 @@ INLINE static void do_eq(usf_state_t * state, short* VD, short* VS, short* VT) static void VEQ(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_eq(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vge.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vge.h index 15d0dab41..694632f02 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vge.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vge.h @@ -15,8 +15,8 @@ INLINE static void do_ge(usf_state_t * state, short* VD, short* VS, short* VT) { - short ce[N]; - short eq[N]; + ALIGNED short ce[N]; + ALIGNED short eq[N]; register int i; for (i = 0; i < N; i++) @@ -43,7 +43,7 @@ INLINE static void do_ge(usf_state_t * state, short* VD, short* VS, short* VT) static void VGE(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_ge(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vlt.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vlt.h index bd877b16b..54469474f 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vlt.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vlt.h @@ -15,8 +15,8 @@ INLINE static void do_lt(usf_state_t * state, short* VD, short* VS, short* VT) { - short cn[N]; - short eq[N]; + ALIGNED short cn[N]; + ALIGNED short eq[N]; register int i; for (i = 0; i < N; i++) @@ -43,7 +43,7 @@ INLINE static void do_lt(usf_state_t * state, short* VD, short* VS, short* VT) static void VLT(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_lt(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vmacf.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vmacf.h index 0eb34998f..3a5564afb 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vmacf.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vmacf.h @@ -15,8 +15,8 @@ INLINE static void do_macf(usf_state_t * state, short* VD, short* VS, short* VT) { - int32_t product[N]; - uint32_t addend[N]; + ALIGNED int32_t product[N]; + ALIGNED uint32_t addend[N]; register int i; for (i = 0; i < N; i++) @@ -43,7 +43,7 @@ INLINE static void do_macf(usf_state_t * state, short* VD, short* VS, short* VT) static void VMACF(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_macf(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vmacq.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vmacq.h index 911249e79..9b53e69b4 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vmacq.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vmacq.h @@ -13,7 +13,7 @@ \******************************************************************************/ #include "vu.h" -static void VMACQ(int vd, int vs, int vt, int e) +static void VMACQ(usf_state_t * state, int vd, int vs, int vt, int e) { vd &= vs &= vt &= e &= 0; /* unused */ if (vd != vs || vt != e) diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vmacu.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vmacu.h index 4d954d5ef..0298dabbc 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vmacu.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vmacu.h @@ -15,8 +15,8 @@ INLINE static void do_macu(usf_state_t * state, short* VD, short* VS, short* VT) { - int32_t product[N]; - uint32_t addend[N]; + ALIGNED int32_t product[N]; + ALIGNED uint32_t addend[N]; register int i; for (i = 0; i < N; i++) @@ -43,7 +43,7 @@ INLINE static void do_macu(usf_state_t * state, short* VD, short* VS, short* VT) static void VMACU(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_macu(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vmadh.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vmadh.h index a1f5bc66c..e46d1023d 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vmadh.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vmadh.h @@ -15,8 +15,8 @@ INLINE static void do_madh(usf_state_t * state, short* VD, short* VS, short* VT) { - int32_t product[N]; - uint32_t addend[N]; + ALIGNED int32_t product[N]; + ALIGNED uint32_t addend[N]; register int i; for (i = 0; i < N; i++) @@ -33,7 +33,7 @@ INLINE static void do_madh(usf_state_t * state, short* VD, short* VS, short* VT) static void VMADH(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_madh(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vmadl.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vmadl.h index 41a3a8ef7..bc540da9f 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vmadl.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vmadl.h @@ -15,8 +15,8 @@ INLINE static void do_madl(usf_state_t * state, short* VD, short* VS, short* VT) { - int32_t product[N]; - uint32_t addend[N]; + ALIGNED int32_t product[N]; + ALIGNED uint32_t addend[N]; register int i; for (i = 0; i < N; i++) @@ -41,7 +41,7 @@ INLINE static void do_madl(usf_state_t * state, short* VD, short* VS, short* VT) static void VMADL(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_madl(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vmadm.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vmadm.h index 034492fd6..0cf54f7b8 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vmadm.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vmadm.h @@ -15,7 +15,7 @@ INLINE static void do_madm(usf_state_t * state, short* VD, short* VS, short* VT) { - uint32_t addend[N]; + ALIGNED uint32_t addend[N]; register int i; for (i = 0; i < N; i++) @@ -36,7 +36,7 @@ INLINE static void do_madm(usf_state_t * state, short* VD, short* VS, short* VT) static void VMADM(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_madm(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vmadn.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vmadn.h index 16b282962..f60c73a29 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vmadn.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vmadn.h @@ -15,7 +15,7 @@ INLINE static void do_madn(usf_state_t * state, short* VD, short* VS, short* VT) { - uint32_t addend[N]; + ALIGNED uint32_t addend[N]; register int i; for (i = 0; i < N; i++) @@ -36,7 +36,7 @@ INLINE static void do_madn(usf_state_t * state, short* VD, short* VS, short* VT) static void VMADN(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_madn(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vmrg.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vmrg.h index 1d2ecc76f..ba9395409 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vmrg.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vmrg.h @@ -22,7 +22,7 @@ INLINE static void do_mrg(usf_state_t * state, short* VD, short* VS, short* VT) static void VMRG(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_mrg(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vmudh.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vmudh.h index c0d0fbe26..86fcd14d3 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vmudh.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vmudh.h @@ -29,7 +29,7 @@ INLINE static void do_mudh(usf_state_t * state, short* VD, short* VS, short* VT) static void VMUDH(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_mudh(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vmudl.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vmudl.h index 9cb006792..a159abe5b 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vmudl.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vmudl.h @@ -29,7 +29,7 @@ INLINE static void do_mudl(usf_state_t * state, short* VD, short* VS, short* VT) static void VMUDL(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_mudl(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vmudm.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vmudm.h index ab3b5ed0a..2af1a8caa 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vmudm.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vmudm.h @@ -29,7 +29,7 @@ INLINE static void do_mudm(usf_state_t * state, short* VD, short* VS, short* VT) static void VMUDM(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_mudm(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vmudn.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vmudn.h index eb0692e9b..1a879cd4e 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vmudn.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vmudn.h @@ -29,7 +29,7 @@ INLINE static void do_mudn(usf_state_t * state, short* VD, short* VS, short* VT) static void VMUDN(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_mudn(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vmulf.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vmulf.h index b095f0315..0464e6295 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vmulf.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vmulf.h @@ -47,7 +47,7 @@ INLINE static void do_mulf(usf_state_t * state, short* VD, short* VS, short* VT) static void VMULF(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_mulf(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vmulu.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vmulu.h index 2726dca18..038294a33 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vmulu.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vmulu.h @@ -49,7 +49,7 @@ INLINE static void do_mulu(usf_state_t * state, short* VD, short* VS, short* VT) static void VMULU(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_mulu(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vnand.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vnand.h index af3aa012d..87daf31ef 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vnand.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vnand.h @@ -25,7 +25,7 @@ INLINE void do_nand(usf_state_t * state, short* VD, short* VS, short* VT) static void VNAND(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_nand(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vne.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vne.h index 8f7869b93..b84589fc2 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vne.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vne.h @@ -39,7 +39,7 @@ INLINE static void do_ne(usf_state_t * state, short* VD, short* VS, short* VT) static void VNE(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_ne(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vnop.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vnop.h index 7157a0b82..73d32f380 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vnop.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vnop.h @@ -13,7 +13,7 @@ \******************************************************************************/ #include "vu.h" -static void VNOP(int vd, int vs, int vt, int e) +static void VNOP(usf_state_t * state, int vd, int vs, int vt, int e) { const int WB_inhibit = vd = vs = vt = e = 1; diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vnor.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vnor.h index 9057fdf64..a47f43ce0 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vnor.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vnor.h @@ -25,7 +25,7 @@ INLINE void do_nor(usf_state_t * state, short* VD, short* VS, short* VT) static void VNOR(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_nor(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vnxor.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vnxor.h index 14030fa5b..9f2176fec 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vnxor.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vnxor.h @@ -25,7 +25,7 @@ INLINE void do_nxor(usf_state_t * state, short* VD, short* VS, short* VT) static void VNXOR(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_nxor(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vor.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vor.h index d96b4aa46..e92246d68 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vor.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vor.h @@ -25,7 +25,7 @@ INLINE void do_or(usf_state_t * state, short* VD, short* VS, short* VT) static void VOR(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_or(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vsaw.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vsaw.h index 4e2625d12..4263d90b6 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vsaw.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vsaw.h @@ -16,7 +16,7 @@ #ifdef VU_EMULATE_SCALAR_ACCUMULATOR_READ static void VSAR(int vd, int vs, int vt, int e) { - short oldval[N]; + ALIGNED short oldval[N]; register int i; for (i = 0; i < N; i++) diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vsub.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vsub.h index a13fcb70e..f3c1b511a 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vsub.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vsub.h @@ -29,7 +29,7 @@ INLINE static void clr_bi(usf_state_t * state, short* VD, short* VS, short* VT) static void VSUB(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); clr_bi(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vsubc.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vsubc.h index cb520ffca..b7b055170 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vsubc.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vsubc.h @@ -15,7 +15,7 @@ INLINE static void set_bo(usf_state_t * state, short* VD, short* VS, short* VT) { /* set CARRY and borrow out from difference */ - int32_t dif[N]; + ALIGNED int32_t dif[N]; register int i; for (i = 0; i < N; i++) @@ -32,7 +32,7 @@ INLINE static void set_bo(usf_state_t * state, short* VD, short* VS, short* VT) static void VSUBC(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); set_bo(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/rsp/vu/vxor.h b/Frameworks/lazyusf/lazyusf/rsp/vu/vxor.h index 7bbb2a54b..04454e688 100644 --- a/Frameworks/lazyusf/lazyusf/rsp/vu/vxor.h +++ b/Frameworks/lazyusf/lazyusf/rsp/vu/vxor.h @@ -25,7 +25,7 @@ INLINE void do_xor(usf_state_t * state, short* VD, short* VS, short* VT) static void VXOR(usf_state_t * state, int vd, int vs, int vt, int e) { - short ST[N]; + ALIGNED short ST[N]; SHUFFLE_VECTOR(ST, state->VR[vt], e); do_xor(state, state->VR[vd], state->VR[vs], ST); diff --git a/Frameworks/lazyusf/lazyusf/tlb.c b/Frameworks/lazyusf/lazyusf/tlb.c index cd75ff6ed..a3f8f2aec 100644 --- a/Frameworks/lazyusf/lazyusf/tlb.c +++ b/Frameworks/lazyusf/lazyusf/tlb.c @@ -73,7 +73,7 @@ test=(BYTE *) VirtualAlloc( 0x10, 0x70000, MEM_RESERVE, PAGE_EXECUTE_READWRITE); */ void SetupTLB_Entry (usf_state_t * state, int Entry) { - uint32_t FastIndx; + int32_t FastIndx; if (!state->tlb[Entry].EntryDefined) { return; } @@ -157,7 +157,7 @@ uint32_t TranslateVaddr ( usf_state_t * state, uintptr_t * Addr) { } void WriteTLBEntry (usf_state_t * state, int32_t index) { - uint32_t FastIndx; + int32_t FastIndx; FastIndx = index << 1; if ((state->PROGRAM_COUNTER >= state->FastTlb[FastIndx].VSTART && diff --git a/Frameworks/lazyusf/lazyusf/usf.c b/Frameworks/lazyusf/lazyusf/usf.c index 76fc3ff3d..cce9d0563 100644 --- a/Frameworks/lazyusf/lazyusf/usf.c +++ b/Frameworks/lazyusf/lazyusf/usf.c @@ -13,14 +13,14 @@ #include "usf_internal.h" -ssize_t get_usf_state_size() +size_t get_usf_state_size() { return sizeof(usf_state_t) + 8192; } void usf_clear(void * state) { - ssize_t offset; + size_t offset; memset(state, 0, get_usf_state_size()); offset = 4096 - (((uintptr_t)state) & 4095); USF_STATE_HELPER->offset_to_structure = offset; @@ -85,7 +85,7 @@ static uint32_t get_le32( const void * _p ) return p[0] + p[1] * 0x100 + p[2] * 0x10000 + p[3] * 0x1000000; } -int usf_upload_section(void * state, const uint8_t * data, ssize_t size) +int usf_upload_section(void * state, const uint8_t * data, size_t size) { uint32_t temp; @@ -103,8 +103,8 @@ int usf_upload_section(void * state, const uint8_t * data, ssize_t size) start = get_le32( data ); data += 4; size -= 4; while(len) { - int page = start >> 16; - int readLen = ( ((start + len) >> 16) > page) ? (((page + 1) << 16) - start) : len; + uint32_t page = start >> 16; + uint32_t readLen = ( ((start + len) >> 16) > page) ? (((page + 1) << 16) - start) : len; if( USF_STATE->ROMPages[page] == 0 ) { USF_STATE->ROMPages[page] = malloc(0x10000); @@ -172,14 +172,14 @@ static void usf_startup(void * state) StartEmulationFromSave(USF_STATE, USF_STATE->savestatespace); } -void usf_render(void * state, int16_t * buffer, ssize_t count, int32_t * sample_rate) +void usf_render(void * state, int16_t * buffer, size_t count, int32_t * sample_rate) { if ( !USF_STATE->MemoryState ) usf_startup( USF_STATE ); if ( USF_STATE->samples_in_buffer ) { - ssize_t do_max = USF_STATE->samples_in_buffer; + size_t do_max = USF_STATE->samples_in_buffer; if ( do_max > count ) do_max = count; diff --git a/Frameworks/lazyusf/lazyusf/usf.h b/Frameworks/lazyusf/lazyusf/usf.h index 77f14ce0c..e4b32fb62 100644 --- a/Frameworks/lazyusf/lazyusf/usf.h +++ b/Frameworks/lazyusf/lazyusf/usf.h @@ -21,16 +21,16 @@ typedef struct usf_state_helper usf_state_helper_t; extern "C" { #endif -ssize_t get_usf_state_size(); +size_t get_usf_state_size(); void usf_clear(void * state); void usf_set_compare(void * state, int enable); void usf_set_fifo_full(void * state, int enable); -int usf_upload_section(void * state, const uint8_t * data, ssize_t size); +int usf_upload_section(void * state, const uint8_t * data, size_t size); -void usf_render(void * state, int16_t * buffer, ssize_t count, int32_t * sample_rate); +void usf_render(void * state, int16_t * buffer, size_t count, int32_t * sample_rate); void usf_shutdown(void * state); diff --git a/Frameworks/lazyusf/lazyusf/usf_internal.h b/Frameworks/lazyusf/lazyusf/usf_internal.h index 38784785a..8ddb8fd75 100644 --- a/Frameworks/lazyusf/lazyusf/usf_internal.h +++ b/Frameworks/lazyusf/lazyusf/usf_internal.h @@ -3,7 +3,7 @@ struct usf_state_helper { - ssize_t offset_to_structure; + size_t offset_to_structure; }; typedef uint32_t RCPREG; @@ -46,13 +46,13 @@ struct usf_state uint32_t enablecompare, enableFIFOfull; // buffering for rendered sample data - ssize_t sample_buffer_count; + size_t sample_buffer_count; int16_t * sample_buffer; // audio.c int32_t SampleRate; int16_t samplebuf[16384]; - ssize_t samples_in_buffer; + size_t samples_in_buffer; // cpu.c uint32_t NextInstruction, JumpToLocation, AudioIntrReg;