interpolate scrolling textures (#30)

* interpolate scrolling textures

* cleaner way to skip interpolation

* fixed interpolation only lasting for one frame

* just reorder these

just because
This commit is contained in:
Isaac0-dev 2024-05-14 09:37:29 +10:00 committed by GitHub
parent a6c1b2a3d1
commit 082b6dd6da
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 123 additions and 19 deletions

View file

@ -11,7 +11,8 @@
#include "pc/pc_main.h"
#include "pc/utils/misc.h"
static inline void shift_UV_JUMP(Vtx* *verts, u16 vertcount, s16 speed, u16 bhv, u16 cycle) {
static inline void shift_UV_JUMP(struct ScrollTarget *scroll, u16 vertcount, s16 speed, u16 bhv, u16 cycle) {
Vtx* *verts = scroll->vertices;
u16 i;
if (verts[0]->n.flag++ <= cycle) {
@ -22,16 +23,17 @@ static inline void shift_UV_JUMP(Vtx* *verts, u16 vertcount, s16 speed, u16 bhv,
if (bhv < SCROLL_UV_X) {
for (i = 0; i < vertcount; i++) {
verts[i]->n.ob[MIN(bhv, 2)] += speed;
scroll->interpF32[i] += speed;
}
} else {
for (i = 0; i < vertcount; i++) {
verts[i]->n.tc[MIN(bhv-SCROLL_UV_X, 1)] += speed;
scroll->interpS16[i] += speed;
}
}
}
static inline void shift_UV_NORMAL(Vtx* *verts, u16 vertcount, s16 speed, u16 bhv, u16 cycle) {
static inline void shift_UV_NORMAL(struct ScrollTarget *scroll, u16 vertcount, s16 speed, u16 bhv, u16 cycle) {
Vtx* *verts = scroll->vertices;
u16 overflownum = 0x1000;
u16 correction = 0;
u16 i;
@ -44,9 +46,9 @@ static inline void shift_UV_NORMAL(Vtx* *verts, u16 vertcount, s16 speed, u16 bh
for (i = 0; i < vertcount; i++) {
if (correction == 0) {
verts[i]->n.ob[MIN(bhv, 2)] += speed;
scroll->interpF32[i] += speed;
} else {
verts[i]->n.ob[MIN(bhv, 2)] -= correction;
scroll->interpF32[i] -= correction;
}
}
} else {
@ -57,28 +59,42 @@ static inline void shift_UV_NORMAL(Vtx* *verts, u16 vertcount, s16 speed, u16 bh
for (i = 0; i < vertcount; i++) {
if (correction == 0) {
verts[i]->n.tc[MIN(bhv-SCROLL_UV_X, 1)] += speed;
scroll->interpS16[i] += speed;
} else {
verts[i]->n.tc[MIN(bhv-SCROLL_UV_X, 1)] -= correction;
scroll->interpS16[i] -= correction;
}
}
}
if (correction == 0) {
verts[0]->n.flag++;
} else {
if (bhv < SCROLL_UV_X) {
u8 bhvIndex = MIN(bhv, 2);
for (i = 0; i < vertcount; i++) {
verts[i]->n.ob[bhvIndex] = scroll->interpF32[i];
}
} else {
u8 bhvIndex = MIN(bhv-SCROLL_UV_X, 1);
for (i = 0; i < vertcount; i++) {
verts[i]->n.tc[bhvIndex] = scroll->interpS16[i];
}
}
scroll->needInterp = false;
}
}
static inline void shift_UV_SINE(Vtx* *verts, u16 vertcount, s16 speed, u16 bhv, u16 cycle) {
static inline void shift_UV_SINE(struct ScrollTarget *scroll, u16 vertcount, s16 speed, u16 bhv, u16 cycle) {
Vtx* *verts = scroll->vertices;
u32 i;
if (bhv < SCROLL_UV_X) {
for (i = 0; i < vertcount; i++) {
verts[i]->n.ob[MIN(bhv, 2)] += sins(verts[0]->n.flag) * speed;
scroll->interpF32[i] += sins(verts[0]->n.flag) * speed;
}
} else {
for (i = 0; i < vertcount; i++) {
verts[i]->n.tc[MIN(bhv-SCROLL_UV_X, 1)] += (u16) (sins(verts[0]->n.flag) * speed);
scroll->interpS16[i] += (u16) (sins(verts[0]->n.flag) * speed);
}
}
verts[0]->n.flag += cycle * 0x23;
@ -104,22 +120,60 @@ void uv_update_scroll(void) {
u16 offset = (u16) round(o->oFaceAnglePitch * 180.0 / 0x8000);
u32 vtxIndex = (u32) o->oBehParams;
struct ScrollTarget *scroll = get_scroll_targets(vtxIndex, vertCount, offset);
if (!scroll || !scroll->vertices) { return; }
// Check for invalid scrolling behavior
if (bhv == 3 || bhv > SCROLL_UV_Y) { return; }
struct ScrollTarget *scroll = get_scroll_targets(vtxIndex, vertCount, offset);
if (!scroll || !scroll->vertices) { return; }
Vtx* *verts = scroll->vertices;
// Init interpolation
if (!scroll->hasInterpInit) {
scroll->hasInterpInit = true;
scroll->bhv = bhv;
if (bhv < SCROLL_UV_X) {
scroll->interpF32 = calloc(scroll->size, sizeof(f32));
scroll->prevF32 = calloc(scroll->size, sizeof(f32));
u8 bhvIndex = MIN(bhv, 2);
for (u16 k = 0; k < scroll->size; k++) {
scroll->interpF32[k] = verts[k]->n.ob[bhvIndex];
}
} else {
scroll->interpS16 = calloc(scroll->size, sizeof(s16));
scroll->prevS16 = calloc(scroll->size, sizeof(s16));
u8 bhvIndex = MIN(bhv-SCROLL_UV_X, 1);
for (u16 k = 0; k < scroll->size; k++) {
scroll->interpS16[k] = verts[k]->n.tc[bhvIndex];
}
}
}
// Prepare for interpolation
if (bhv < SCROLL_UV_X) {
u8 bhvIndex = MIN(bhv, 2);
for (u16 k = 0; k < scroll->size; k++) {
scroll->prevF32[k] = verts[k]->n.ob[bhvIndex];
}
} else {
u8 bhvIndex = MIN(bhv-SCROLL_UV_X, 1);
for (u16 k = 0; k < scroll->size; k++) {
scroll->prevS16[k] = verts[k]->n.tc[bhvIndex];
}
}
scroll->needInterp = true;
switch (scrollType) {
case MODE_SCROLL_UV:
shift_UV_NORMAL(verts, vertCount, speed, bhv, cycle);
shift_UV_NORMAL(scroll, vertCount, speed, bhv, cycle);
break;
case MODE_SCROLL_SINE:
shift_UV_SINE(verts, vertCount, speed, bhv, cycle);
shift_UV_SINE(scroll, vertCount, speed, bhv, cycle);
break;
case MODE_SCROLL_JUMP:
shift_UV_JUMP(verts, vertCount, speed, bhv, cycle);
shift_UV_JUMP(scroll, vertCount, speed, bhv, cycle);
break;
}
}

View file

@ -60,7 +60,7 @@ struct ScrollTarget* find_or_create_scroll_targets(u32 id, bool hasOffset) {
}
if (scroll == NULL) {
scroll = malloc(sizeof(struct ScrollTarget));
scroll = calloc(1, sizeof(struct ScrollTarget));
scroll->id = id;
scroll->size = 0;
scroll->vertices = NULL;
@ -91,7 +91,7 @@ void add_vtx_scroll_target(u32 id, Vtx *vtx, u32 size, bool hasOffset) {
Vtx* *newArray = realloc(scroll->vertices, newSize);
if (!newArray) {
newArray = malloc(newSize);
newArray = calloc(1, newSize);
memcpy(newArray, scroll->vertices, oldSize);
free(scroll->vertices);
}
@ -116,6 +116,10 @@ void free_vtx_scroll_targets(void) {
while (scroll) {
nextScroll = scroll->next;
free(scroll->interpF32);
free(scroll->prevF32);
free(scroll->interpS16);
free(scroll->prevS16);
free(scroll->vertices);
free(scroll);
scroll = nextScroll;
@ -123,3 +127,36 @@ void free_vtx_scroll_targets(void) {
sScrollTargets = NULL;
}
void patch_scroll_targets_before(void) {
struct ScrollTarget *scroll = sScrollTargets;
while (scroll) {
scroll->needInterp = false;
scroll = scroll->next;
}
}
void patch_scroll_targets_interpolated(f32 delta) {
f32 antiDelta = 1.0f - delta;
struct ScrollTarget *scroll = sScrollTargets;
while (scroll) {
if (scroll->needInterp) {
Vtx* *verts = scroll->vertices;
if (scroll->bhv < SCROLL_UV_X) {
u8 bhvIndex = MIN(scroll->bhv, 2);
for (u16 k = 0; k < scroll->size; k++) {
verts[k]->n.ob[bhvIndex] = scroll->prevF32[k] * antiDelta + scroll->interpF32[k] * delta;
}
} else {
u8 bhvIndex = MIN(scroll->bhv-SCROLL_UV_X, 1);
for (u16 k = 0; k < scroll->size; k++) {
verts[k]->n.tc[bhvIndex] = (int) scroll->prevS16[k] * antiDelta + scroll->interpS16[k] * delta;
}
}
}
scroll = scroll->next;
}
}

View file

@ -31,7 +31,16 @@ struct ScrollTarget {
u32 id;
u32 size;
Vtx* *vertices;
bool hasOffset;
bool hasInterpInit;
bool needInterp;
f32 *interpF32;
f32 *prevF32;
s16 *interpS16;
s16 *prevS16;
u16 bhv;
struct ScrollTarget *next;
};

View file

@ -126,6 +126,7 @@ extern void patch_bubble_particles_before(void);
extern void patch_snow_particles_before(void);
extern void patch_djui_before(void);
extern void patch_djui_hud_before(void);
extern void patch_scroll_targets_before(void);
extern void patch_mtx_interpolated(f32 delta);
extern void patch_screen_transition_interpolated(f32 delta);
@ -137,6 +138,7 @@ extern void patch_bubble_particles_interpolated(f32 delta);
extern void patch_snow_particles_interpolated(f32 delta);
extern void patch_djui_interpolated(f32 delta);
extern void patch_djui_hud(f32 delta);
extern void patch_scroll_targets_interpolated(f32 delta);
static void patch_interpolations_before(void) {
patch_mtx_before();
@ -149,6 +151,7 @@ static void patch_interpolations_before(void) {
patch_snow_particles_before();
patch_djui_before();
patch_djui_hud_before();
patch_scroll_targets_before();
}
static inline void patch_interpolations(f32 delta) {
@ -162,6 +165,7 @@ static inline void patch_interpolations(f32 delta) {
patch_snow_particles_interpolated(delta);
patch_djui_interpolated(delta);
patch_djui_hud(delta);
patch_scroll_targets_interpolated(delta);
}
void produce_interpolation_frames_and_delay(void) {