627 lines
19 KiB
C
627 lines
19 KiB
C
#include <stdio.h>
|
|
#include <string.h>
|
|
#include <r_io.h>
|
|
#include <r_util.h>
|
|
#include <gb.h>
|
|
#include <ragb_sdl.h>
|
|
|
|
RIOPlugin r_io_plugin_gb_ppu;
|
|
|
|
static ut64 __lseek(RIO* io, RIODesc *desc, ut64 offset, int whence) {
|
|
GBPPU *ppu = desc->data;
|
|
ut64 seek = ppu->seek & 0xffff;
|
|
switch (whence) {
|
|
case R_IO_SEEK_SET:
|
|
seek = R_MIN (GB_PPU_N_REGS, offset);
|
|
break;
|
|
case R_IO_SEEK_CUR:
|
|
seek = R_MIN (GB_PPU_N_REGS, seek + offset);
|
|
break;
|
|
case R_IO_SEEK_END:
|
|
seek = GB_PPU_N_REGS;
|
|
break;
|
|
}
|
|
ppu->seek = (ppu->seek & (~0xffff)) | seek;
|
|
return seek;
|
|
}
|
|
|
|
static bool __check(RIO *io, const char *pathname, bool many) {
|
|
return r_str_startswith (pathname, "gb_ppu://");
|
|
}
|
|
|
|
static int __read(RIO *io, RIODesc *desc, ut8 *buf, int len) {
|
|
GBPPU *ppu = desc->data;
|
|
ut64 seek = ppu->seek & 0xffff;
|
|
if (ppu->seek >= GB_PPU_N_REGS || len < 1) {
|
|
return 0;
|
|
}
|
|
len = R_MIN (len, GB_PPU_N_REGS - seek);
|
|
memcpy (buf, &ppu->buf[ppu->seek], len);
|
|
seek += len;
|
|
ppu->seek = (ppu->seek & (~0xffff)) | seek;
|
|
return len;
|
|
}
|
|
|
|
static int __write(RIO *io, RIODesc *desc, const ut8 *buf, int len) {
|
|
GBPPU *ppu = desc->data;
|
|
ut64 seek = ppu->seek & 0xffff;
|
|
if (ppu->seek >= GB_PPU_N_REGS || len < 1) {
|
|
return 0;
|
|
}
|
|
len = R_MIN (len, GB_PPU_N_REGS - seek);
|
|
ut32 i;
|
|
for (i = 0; i < len; i++) {
|
|
switch (seek) {
|
|
case GB_PPU_STAT:
|
|
ppu->buf[GB_PPU_STAT] = (buf[i] & 0xf8) |
|
|
(ppu->buf[GB_PPU_STAT] & 0x7);
|
|
case GB_PPU_LY:
|
|
break;
|
|
case GB_PPU_LCDC:
|
|
if ((ppu->buf[GB_PPU_LCDC] & GB_PPU_LCDC_ENABLE) !=
|
|
(buf[i] & GB_PPU_LCDC_ENABLE)) {
|
|
ppu->fifo.flags1 |= GB_PIXEL_FIFO_LCDC_SWITCH;
|
|
}
|
|
default:
|
|
ppu->buf[seek] = buf[i];
|
|
break;
|
|
}
|
|
seek++;
|
|
}
|
|
return len;
|
|
}
|
|
|
|
static bool __close(RIODesc *desc) {
|
|
return true;
|
|
}
|
|
|
|
static RIODesc *__open(RIO *io, const char *pathname, int rw, int mode) {
|
|
if (!r_str_startswith (pathname, "gb_ppu://")) {
|
|
return NULL;
|
|
}
|
|
GBPPU *ppu = NULL;
|
|
sscanf (pathname, "gb_ppu://%p", &ppu);
|
|
RIODesc *desc = r_io_desc_new (io, &r_io_plugin_gb_ppu, pathname,
|
|
R_PERM_RWX, mode, ppu);
|
|
return desc;
|
|
}
|
|
|
|
RIOPlugin r_io_plugin_gb_ppu = {
|
|
.meta = {
|
|
.name = "gb_ppu",
|
|
.desc = "gb_ppu",
|
|
.license = "LGPL",
|
|
},
|
|
.uris = "gb_ppu://",
|
|
.open = __open,
|
|
.close = __close,
|
|
.read = __read,
|
|
.check = __check,
|
|
.seek = __lseek,
|
|
.write = __write,
|
|
};
|
|
|
|
GBPPU *gb_ppu_open (RIO *io, SDL_Renderer *renderer) {
|
|
GBPPU *ppu = R_NEW0 (GBPPU);
|
|
if (!ppu) {
|
|
return NULL;
|
|
}
|
|
ppu->vram_fd = r_io_fd_open (io, "malloc//:0x2000", R_PERM_RWX, 0);
|
|
if (ppu->vram_fd < 0) {
|
|
free (ppu);
|
|
return NULL;
|
|
}
|
|
RIOMap *vram = r_io_map_add (io, ppu->vram_fd, R_PERM_RWX, 0ULL, 0x8000, 0x2000);
|
|
if (!vram) {
|
|
r_io_fd_close (io, ppu->vram_fd);
|
|
free (ppu);
|
|
return NULL;
|
|
}
|
|
ppu->vram_mapid = vram->id;
|
|
char uri[64];
|
|
sprintf (uri, "gb_ppu://%p", ppu);
|
|
RIODesc *desc = r_io_desc_open_plugin (io, &r_io_plugin_gb_ppu, uri, R_PERM_RWX, 0);
|
|
if (!desc || !r_io_map_add (io, desc->fd, R_PERM_RWX, 0ULL, 0xff40, 0x6) ||
|
|
!r_io_map_add (io, desc->fd, R_PERM_RWX, 0x6, 0xff47, 0x5)) {
|
|
r_io_fd_close (io, ppu->vram_fd);
|
|
free (ppu);
|
|
return NULL;
|
|
}
|
|
ppu->reg_fd = desc->fd;
|
|
//TODO: use proper clear color
|
|
ppu->pixbuf = gb_pix_buf_new (renderer, 160, 144, 0xabcdef);
|
|
if (!ppu->pixbuf) {
|
|
r_io_desc_close (desc);
|
|
r_io_fd_close (io, ppu->vram_fd);
|
|
free (ppu);
|
|
return NULL;
|
|
}
|
|
return ppu;
|
|
}
|
|
|
|
static ut32 gb_ppu_oam_scan_continue (GB *gb, ut32 cycles) {
|
|
if (gb->ppu->ost.addr >= 0xa0) {
|
|
gb->ppu->ost.addr = 0;
|
|
gb->ppu->ost.n_entries = 0;
|
|
}
|
|
if (cycles & 0x1) {
|
|
R_LOG_WARN ("Odd amount of cycles");
|
|
}
|
|
if (gb->dma.seek & GB_DMA_ACTIVE) {
|
|
const ut8 running_cycles = R_MIN (cycles, (0xa0 - gb->ppu->ost.addr) >> 1);
|
|
//every oam entry costs 2 cycles
|
|
gb->ppu->ost.addr += running_cycles << 1;
|
|
cycles -= running_cycles;
|
|
goto beach;
|
|
}
|
|
const ut8 height = (gb->ppu->buf[GB_PPU_LCDC] & GB_PPU_LCDC_BIG_OBJ)? 16: 8;
|
|
const ut8 ly = gb->ppu->buf[GB_PPU_LY] + 16;
|
|
while (cycles && gb->ppu->ost.addr <= 0xa0) {
|
|
if (gb->ppu->ost.n_entries < 10) {
|
|
ut8 yx[2];
|
|
//this is probably inaccurate
|
|
r_io_fd_read_at (gb->io, gb->dma.oam_fd, (ut64)gb->ppu->ost.addr, yx, 2);
|
|
if ((yx[0] <= ly) && (ly < (yx[0] + height))) {
|
|
gb->ppu->ost.data[gb->ppu->ost.n_entries] =
|
|
(gb->ppu->ost.addr << 16) | (yx[0] << 8) | yx[1];
|
|
gb->ppu->ost.n_entries++;
|
|
}
|
|
}
|
|
gb->ppu->ost.addr += 4;
|
|
cycles -= 2;
|
|
}
|
|
beach:
|
|
if (gb->ppu->ost.addr == 0xa0) {
|
|
//indicate next mode
|
|
gb->ppu->buf[GB_PPU_STAT] |= GB_PPU_STAT_MODE_RENDER;
|
|
RIOMap *vram = r_io_map_get (gb->io, gb->ppu->vram_mapid);
|
|
vram->perm = 0; //disable vram access for rendering
|
|
}
|
|
return cycles;
|
|
}
|
|
|
|
static void read_tile_data (GBPPU *ppu, RIO *io, ut8 *tile) {
|
|
const bool use_window = !!(ppu->fifo.flags & GB_PIXEL_FIFO_FETCH_WINDOW);
|
|
ut64 addr;
|
|
if (use_window) {
|
|
const ut8 x = ((ppu->fifo.x + ((!!ppu->fifo.n_fpixel) << 3)) -
|
|
ppu->buf[GB_PPU_WX]) & 0xf8;
|
|
//maybe store this at begin of line
|
|
#if 0
|
|
const ut8 y = (ppu->buf[GB_PPU_LY] - ppu->buf[GB_PPU_WY]) & 0xf8;
|
|
#else
|
|
const ut8 y = (ppu->buf[GB_PPU_LY] - ppu->fifo.wy) & 0xf8;
|
|
#endif
|
|
addr = ((ppu->buf[GB_PPU_LCDC] & GB_PPU_LCDC_WIN_TILE_MAP)?
|
|
0x1800: 0x1c00) + y * 32 + x;
|
|
} else {
|
|
const ut8 x = (ppu->fifo.x + ppu->buf[GB_PPU_SCX] +
|
|
((!!ppu->fifo.n_fpixel) << 3)) & 0xf8;
|
|
const ut8 y = (ppu->buf[GB_PPU_LY] + ppu->buf[GB_PPU_SCY]) & 0xf8;
|
|
addr = ((ppu->buf[GB_PPU_LCDC] & GB_PPU_LCDC_BG_TILE_MAP)?
|
|
0x1800: 0x1c00) + y * 32 + x;
|
|
}
|
|
r_io_fd_read_at (io, ppu->vram_fd, addr, tile, 1);
|
|
}
|
|
|
|
static void read_obj_data (GBPPU *ppu, RIO *io, ut8 *tile, int oam_fd) {
|
|
ut8 addr = (ppu->fifo.obj >> 16) & 0xff;
|
|
r_io_fd_read_at (io, oam_fd, (ut64)(addr + 2), tile, 1);
|
|
if (ppu->buf[GB_PPU_LCDC] & GB_PPU_LCDC_BIG_OBJ) {
|
|
tile[0] &= 0xfe;
|
|
}
|
|
}
|
|
|
|
static void gb_ppu_pixel_fifo_merge_opixels (PixelFifo *pxf, bool priority) {
|
|
ut32 pixels = pxf->data >> 32;
|
|
ut64 newpixels = 0;
|
|
ut32 i;
|
|
for (i = 0; i < 8; i++) {
|
|
const ut8 bg_pixel = (pixels & (0xf << ((7 - i) << 2))) >> ((7 - i) << 2);
|
|
const ut8 fg_pixel = (pxf->fetcher[1].fetched & (0xf << ((7 - i) << 2))) >> ((7 - i) << 2);
|
|
newpixels <<= 4;
|
|
//background always wins if obj is transparent or bg is obj
|
|
if ((!(fg_pixel & 0x3)) || (bg_pixel & 0x8)) {
|
|
newpixels |= bg_pixel;
|
|
continue;
|
|
}
|
|
#if 0
|
|
if ((bg_pixel & 0xc) == 0x4) {
|
|
//bg is window
|
|
//TODO: check for window priority
|
|
}
|
|
#endif
|
|
if (!priority && (bg_pixel & 0x3)) {
|
|
newpixels |= bg_pixel;
|
|
continue;
|
|
}
|
|
newpixels |= fg_pixel;
|
|
}
|
|
pxf->data = (pxf->data & 0xffffffff) | (newpixels << 32);
|
|
}
|
|
|
|
static void gb_pixel_fifo_fetch_continue (GB *gb) {
|
|
PixelFifo *fifo = &gb->ppu->fifo;
|
|
ut8 fetch_obj = !!(fifo->flags & GB_PIXEL_FIFO_FETCH_OBJECT);
|
|
ut8 tile;
|
|
switch (fifo->fetcher[fetch_obj].state_ctr) {
|
|
case 0:
|
|
if (!fetch_obj) {
|
|
read_tile_data (gb->ppu, gb->io, &tile);
|
|
if (gb->ppu->buf[GB_PPU_LCDC] & GB_PPU_LCDC_TILE_BASE) {
|
|
fifo->fetcher[0].addr = tile * 16;
|
|
} else {
|
|
st8 s_tile = (st8)tile;
|
|
fifo->fetcher[0].addr = 0x1000 + s_tile * 16;
|
|
}
|
|
if (fifo->flags & GB_PIXEL_FIFO_FETCH_WINDOW) {
|
|
fifo->fetcher[0].addr +=
|
|
((gb->ppu->buf[GB_PPU_LY] - fifo->wy) & 0x7) * 2;
|
|
} else {
|
|
fifo->fetcher[0].addr += (gb->ppu->buf[GB_PPU_LY] & 0x7) * 2;
|
|
}
|
|
} else {
|
|
read_obj_data (gb->ppu, gb->io, &tile, gb->dma.oam_fd);
|
|
fifo->fetcher[1].addr = tile;
|
|
}
|
|
case 1:
|
|
break;
|
|
case 2:
|
|
if (fetch_obj) {
|
|
ut8 addr = (fifo->obj >> 16) & 0xff;
|
|
//read object attributes
|
|
r_io_fd_read_at (gb->io, gb->dma.oam_fd, (ut64)(addr + 3), &tile, 1);
|
|
fifo->obj = (fifo->obj & 0xffffff) | (tile << 24);
|
|
//tile idx
|
|
tile = fifo->fetcher[1].addr & 0xff;
|
|
//object_y - line_y
|
|
addr = ((fifo->obj >> 8) & 0xff) - gb->ppu->buf[GB_PPU_LY];
|
|
fifo->fetcher[1].addr = tile * 16;
|
|
if (fifo->obj & GB_FIFO_OAM_FLIP_Y) {
|
|
//perform y flip
|
|
addr ^= (gb->ppu->buf[GB_PPU_LCDC] & GB_PPU_LCDC_BIG_OBJ)? 0xf: 0x7;
|
|
}
|
|
fifo->fetcher[1].addr += addr * 2;
|
|
} else {
|
|
r_io_fd_read_at (gb->io, gb->ppu->vram_fd, fifo->fetcher[0].addr,
|
|
&fifo->fetcher[0].data[0], 1);
|
|
fifo->fetcher[0].addr++;
|
|
}
|
|
case 3:
|
|
break;
|
|
case 4:
|
|
r_io_fd_read_at (gb->io, gb->ppu->vram_fd, fifo->fetcher[fetch_obj].addr,
|
|
&fifo->fetcher[fetch_obj].data[!fetch_obj], 1);
|
|
fifo->fetcher[fetch_obj].addr++;
|
|
if (!fetch_obj) {
|
|
ut8 *p = &fifo->fetcher[0].data[0];
|
|
fifo->fetcher[0].fetched =
|
|
((p[1] & 0x80) << 22) | ((p[0] & 0x80) << 21) |
|
|
((p[1] & 0x40) << 19) | ((p[0] & 0x40) << 18) |
|
|
((p[1] & 0x20) << 16) | ((p[0] & 0x20) << 15) |
|
|
((p[1] & 0x10) << 13) | ((p[0] & 0x10) << 12) |
|
|
((p[1] & 0x8) << 10) | ((p[0] & 0x8) << 9) |
|
|
((p[1] & 0x4) << 7) | ((p[0] & 0x4) << 6) |
|
|
((p[1] & 0x2) << 4) | ((p[0] & 0x2) << 3) |
|
|
((p[1] & 0x1) << 1) | (p[0] & 0x1);
|
|
if (fifo->flags & GB_PIXEL_FIFO_FETCH_WINDOW) {
|
|
fifo->fetcher[0].fetched |= 0x44444444;
|
|
}
|
|
}
|
|
case 5:
|
|
if (!fetch_obj) {
|
|
fifo->flags |= GB_PIXEL_FIFO_FETCH_READY;
|
|
}
|
|
break;
|
|
case 6:
|
|
if (fetch_obj) {
|
|
r_io_fd_read_at (gb->io, gb->ppu->vram_fd, fifo->fetcher[1].addr,
|
|
&fifo->fetcher[1].data[1], 1);
|
|
ut8 *p = &fifo->fetcher[1].data[0];
|
|
if (fifo->obj & GB_FIFO_OAM_FLIP_X) {
|
|
fifo->fetcher[1].fetched =
|
|
((p[1] & 0x80) >> 6) | ((p[0] & 0x80) >> 7) |
|
|
((p[1] & 0x40) >> 1) | ((p[0] & 0x40) >> 2) |
|
|
((p[1] & 0x20) << 4) | ((p[0] & 0x20) << 3) |
|
|
((p[1] & 0x10) << 9) | ((p[0] & 0x10) << 8) |
|
|
((p[1] & 0x8) << 14) | ((p[0] & 0x8) << 13) |
|
|
((p[1] & 0x4) << 19) | ((p[0] & 0x4) << 18) |
|
|
((p[1] & 0x2) << 24) | ((p[0] & 0x2) << 23) |
|
|
((p[1] & 0x1) << 29) | ((p[0] & 0x1) << 28);
|
|
} else {
|
|
fifo->fetcher[1].fetched =
|
|
((p[1] & 0x80) << 22) | ((p[0] & 0x80) << 21) |
|
|
((p[1] & 0x40) << 19) | ((p[0] & 0x40) << 18) |
|
|
((p[1] & 0x20) << 16) | ((p[0] & 0x20) << 15) |
|
|
((p[1] & 0x10) << 13) | ((p[0] & 0x10) << 12) |
|
|
((p[1] & 0x8) << 10) | ((p[0] & 0x8) << 9) |
|
|
((p[1] & 0x4) << 7) | ((p[0] & 0x4) << 6) |
|
|
((p[1] & 0x2) << 4) | ((p[0] & 0x2) << 3) |
|
|
((p[1] & 0x1) << 1) | (p[0] & 0x1);
|
|
}
|
|
fifo->fetcher[1].fetched |= (fifo->obj & GB_FIFO_OAM_PALLET)?
|
|
0xcccccccc: 0x88888888;
|
|
if ((fifo->obj & 0xff) < 8) { //is this correct?
|
|
fifo->fetcher[1].fetched <<= ((8 - (fifo->obj & 0xff)) << 2);
|
|
}
|
|
}
|
|
break;
|
|
case 7:
|
|
if (fetch_obj) {
|
|
fifo->flags |= GB_PIXEL_FIFO_FETCH_READY;
|
|
}
|
|
}
|
|
fifo->fetcher[fetch_obj].state_ctr = (fifo->fetcher[fetch_obj].state_ctr + 1) & 0x7;
|
|
}
|
|
|
|
static bool check_objects (GBPPU *ppu) {
|
|
if (!ppu->fifo.x) {
|
|
return false;
|
|
}
|
|
ut32 i;
|
|
for (i = 0; i < ppu->ost.n_entries; i++) {
|
|
const ut8 x = ppu->ost.data[i] & 0xff;
|
|
//x cannot be 0, because oam entries with x = 0 don't end in this array
|
|
//because of this setting x to 0 can be used to invalidate entries
|
|
if (x && (x == ppu->fifo.x)) {
|
|
ppu->fifo.obj = ppu->ost.data[i];
|
|
#if 0
|
|
ppu->ost.data[i] &= 0xffffff00;
|
|
#else
|
|
ppu->ost.data[i] = 0;
|
|
#endif
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static void shit_out_pixel (GBPPU *ppu) {
|
|
ppu->fifo.shift_out = (ppu->fifo.shift_out & 0xf0) | ppu->fifo.data >> 60;
|
|
ppu->fifo.data = ppu->fifo.data << 4;
|
|
ppu->fifo.n_fpixel--;
|
|
if (ppu->fifo.dx & 0x7) {
|
|
//discard pixel
|
|
ppu->fifo.dx = (ppu->fifo.dx & 0xf8) | ((ppu->fifo.dx & 0x7) - 1);
|
|
return;
|
|
}
|
|
ut8 color;
|
|
//bg pallet
|
|
if (!(ppu->fifo.shift_out & 0x08)) {
|
|
//check if window and bg are enabled
|
|
if (ppu->buf[GB_PPU_LCDC] & GB_PPU_LCDC_BGW_ENABLE) {
|
|
color = (ppu->buf[GB_PPU_BGP] & (0x3 << ((ppu->fifo.shift_out & 0x3) << 1))) >>
|
|
((ppu->fifo.shift_out & 0x3) << 1);
|
|
} else {
|
|
//white
|
|
color = 0;
|
|
}
|
|
} else {
|
|
const ut8 pal = !!(ppu->fifo.shift_out & 0x04);
|
|
color = (ppu->buf[GB_PPU_OBP0 + pal] & (0x3 << ((ppu->fifo.shift_out & 0x3) << 1))) >>
|
|
((ppu->fifo.shift_out & 0x3) << 1);
|
|
}
|
|
#if 0
|
|
if (ppu->fifo.x > 7) { //is this correct?
|
|
gb_pix_buf_set_pixel (ppu->pixbuf, ppu->fifo.x, ppu->buf[GB_PPU_LY], color);
|
|
}
|
|
#endif
|
|
gb_pix_buf_set_pixel (ppu->pixbuf, ppu->fifo.x, ppu->buf[GB_PPU_LY], color);
|
|
ppu->fifo.x++;
|
|
}
|
|
|
|
static void gb_ppu_render_launch (GB *gb) {
|
|
RIOMap *vram = r_io_map_get (gb->io, gb->ppu->vram_mapid);
|
|
vram->perm = 0; //disable vram access for rendering
|
|
gb->ppu->fifo = (const PixelFifo){0};
|
|
gb->ppu->fifo.remaining_cycles = 376;
|
|
}
|
|
|
|
static ut32 gb_ppu_render_continue (GB *gb, ut32 cycles) {
|
|
PixelFifo *fifo = &gb->ppu->fifo;
|
|
if (R_UNLIKELY (!(fifo->flags & GB_PIXEL_FIFO_REG_DATA_LOADED))) {
|
|
fifo->dx = gb->ppu->buf[GB_PPU_SCX] & 0x7;
|
|
fifo->wy = gb->ppu->buf[GB_PPU_WY];
|
|
fifo->flags |= GB_PIXEL_FIFO_REG_DATA_LOADED;
|
|
}
|
|
while (cycles) {
|
|
#if 1
|
|
if (fifo->flags & GB_PIXEL_FIFO_FETCH_WINDOW) {
|
|
//this is probably insufficient,
|
|
//but the actual behaviour of deactivating the window mid fetch is not really documented
|
|
if (!(gb->ppu->buf[GB_PPU_LCDC] & GB_PPU_LCDC_WIN_ENABLE)) {
|
|
fifo->flags ^= GB_PIXEL_FIFO_FETCH_WINDOW;
|
|
}
|
|
} else {
|
|
if (gb->ppu->buf[GB_PPU_LCDC] & GB_PPU_LCDC_WIN_ENABLE) {
|
|
if ((fifo->wy <= gb->ppu->buf[GB_PPU_LY]) &&
|
|
((fifo->flags1 & GB_PIXEL_FIFO_WXC_TRIGGERED) ||
|
|
(gb->ppu->buf[GB_PPU_WX] >= fifo->x))) {
|
|
//when window starts at 0, we don't need to discard pixels anyway
|
|
fifo->flags1 = GB_PIXEL_FIFO_WXC_TRIGGERED;
|
|
fifo->flags |= GB_PIXEL_FIFO_FETCH_WINDOW;
|
|
//clear the fetcher
|
|
fifo->fetcher[0] = (const PixelFifoFetcher) {0};
|
|
//clear the pipe
|
|
fifo->n_fpixel = 0;
|
|
}
|
|
}
|
|
//Wat du wen window gets enabled while obj is fetched?
|
|
}
|
|
#endif
|
|
if (fifo->n_fpixel < 9) {
|
|
//initial phase on each line
|
|
//also when window starts?
|
|
gb_pixel_fifo_fetch_continue (gb);
|
|
if (fifo->flags & GB_PIXEL_FIFO_FETCH_READY) {
|
|
if (!fifo->n_fpixel) {
|
|
fifo->data = ((ut64)fifo->fetcher[0].fetched) << 32;
|
|
} else {
|
|
fifo->data |= fifo->fetcher[0].fetched;
|
|
}
|
|
fifo->n_fpixel += 8;
|
|
//I suspect the initial 16 pixels fetch on each line only takes 12 cycles instead of 16,
|
|
//because the pixel fifo is empty
|
|
fifo->flags ^= GB_PIXEL_FIFO_FETCH_READY;
|
|
fifo->fetcher[0].state_ctr = 0;
|
|
}
|
|
} else {
|
|
if (!(fifo->flags & GB_PIXEL_FIFO_FETCH_OBJECT)) {
|
|
if (check_objects (gb->ppu)) {
|
|
fifo->flags |= GB_PIXEL_FIFO_FETCH_OBJECT;
|
|
} else {
|
|
shit_out_pixel (gb->ppu);
|
|
if (fifo->x > 159) {
|
|
gb->ppu->buf[GB_PPU_STAT] &= ~GB_PPU_STAT_MODE_MASK;
|
|
gb->ppu->buf[GB_PPU_STAT] |= GB_PPU_STAT_MODE_HBLANK;
|
|
fifo->remaining_cycles--;
|
|
return cycles - 1;
|
|
}
|
|
}
|
|
gb_pixel_fifo_fetch_continue (gb);
|
|
if (fifo->n_fpixel < 9 && (fifo->flags & GB_PIXEL_FIFO_FETCH_READY)) {
|
|
fifo->data |= fifo->fetcher[0].fetched;
|
|
fifo->n_fpixel += 8;
|
|
fifo->flags ^= GB_PIXEL_FIFO_FETCH_READY;
|
|
}
|
|
} else {
|
|
gb_pixel_fifo_fetch_continue (gb);
|
|
if (fifo->flags & GB_PIXEL_FIFO_FETCH_READY) {
|
|
if (gb->ppu->buf[GB_PPU_LCDC] & GB_PPU_LCDC_OBJ_ENABLE) {
|
|
gb_ppu_pixel_fifo_merge_opixels (fifo, !!(fifo->obj & GB_FIFO_OAM_PRIORITY));
|
|
}
|
|
fifo->flags ^= GB_PIXEL_FIFO_FETCH_OBJECT | GB_PIXEL_FIFO_FETCH_READY;
|
|
}
|
|
}
|
|
}
|
|
fifo->remaining_cycles--;
|
|
cycles--;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static ut32 gb_ppu_hblank_continue (GB *gb, ut32 cycles) {
|
|
if (cycles >= gb->ppu->fifo.remaining_cycles) {
|
|
gb->ppu->buf[GB_PPU_LY]++;
|
|
gb->ppu->buf[GB_PPU_STAT] &= ~GB_PPU_STAT_MODE_MASK;
|
|
if (gb->ppu->buf[GB_PPU_LY] == 144) {
|
|
//launch vblank
|
|
gb->ppu->buf[GB_PPU_STAT] |= GB_PPU_STAT_MODE_VBLANK;
|
|
return cycles - gb->ppu->fifo.remaining_cycles;
|
|
}
|
|
//launch oam scan
|
|
gb->ppu->buf[GB_PPU_STAT] |= GB_PPU_STAT_MODE_OAM_SCAN;
|
|
return cycles - gb->ppu->fifo.remaining_cycles;
|
|
}
|
|
gb->ppu->fifo.remaining_cycles -= cycles;
|
|
return 0;
|
|
}
|
|
|
|
static ut32 gb_ppu_vblank_continue (GB *gb, ut32 cycles) {
|
|
const ut32 m = gb->ppu->fifo.remaining_cycles % 456;
|
|
if (m && m < cycles) {
|
|
gb->ppu->buf[GB_PPU_LY]++;
|
|
if (gb->ppu->buf[GB_PPU_LY] == 156) {
|
|
gb->ppu->buf[GB_PPU_STAT] &= ~GB_PPU_STAT_MODE_MASK;
|
|
gb->ppu->buf[GB_PPU_STAT] |= GB_PPU_STAT_MODE_OAM_SCAN;
|
|
return cycles - gb->ppu->fifo.remaining_cycles;
|
|
}
|
|
}
|
|
gb->ppu->fifo.remaining_cycles -= cycles;
|
|
return 0;
|
|
}
|
|
|
|
void gb_ppu_continue (GB *gb, ut32 cycles) {
|
|
if (gb->ppu->fifo.flags1 & GB_PIXEL_FIFO_LCDC_SWITCH) {
|
|
gb->ppu->fifo.flags1 ^= GB_PIXEL_FIFO_LCDC_SWITCH;
|
|
RIOMap *map = r_io_map_get (gb->io, gb->dma.oam_mapid);
|
|
if (gb->ppu->buf[GB_PPU_LCDC] & GB_PPU_LCDC_ENABLE) {
|
|
//lcd was switched on
|
|
//launch oam scan
|
|
gb->ppu->buf[GB_PPU_LY] = 0;
|
|
//disable oam access
|
|
map->perm = 0;
|
|
gb->ppu->buf[GB_PPU_STAT] &= ~GB_PPU_STAT_MODE_MASK;
|
|
gb->ppu->buf[GB_PPU_STAT] |= GB_PPU_STAT_MODE_OAM_SCAN;
|
|
if (gb->ppu->buf[GB_PPU_STAT] & GB_PPU_STAT_INTR_OAM) {
|
|
gb_interrupts_request (gb, GB_INTERRUPT_STAT);
|
|
}
|
|
} else {
|
|
//lcd was switched off
|
|
//enable oam and vram access
|
|
map->perm = R_PERM_RWX;
|
|
map = r_io_map_get (gb->io, gb->ppu->vram_mapid);
|
|
map->perm = R_PERM_RWX;
|
|
//clear screen
|
|
gb_pix_buf_clear (gb->ppu->pixbuf);
|
|
return;
|
|
}
|
|
}
|
|
if (!(gb->ppu->buf[GB_PPU_LCDC] & GB_PPU_LCDC_ENABLE)) {
|
|
return;
|
|
}
|
|
if (gb->ppu->buf[GB_PPU_STAT] & GB_PPU_STAT_INTR_LYC) {
|
|
if (gb->ppu->buf[GB_PPU_LY] == gb->ppu->buf[GB_PPU_LYC]) {
|
|
gb_interrupts_request (gb, GB_INTERRUPT_STAT);
|
|
}
|
|
}
|
|
while (cycles) {
|
|
switch (gb->ppu->buf[GB_PPU_STAT] & GB_PPU_STAT_MODE_MASK) {
|
|
case GB_PPU_STAT_MODE_OAM_SCAN:
|
|
cycles = gb_ppu_oam_scan_continue (gb, cycles);
|
|
if ((gb->ppu->buf[GB_PPU_STAT] & GB_PPU_STAT_MODE_MASK) == GB_PPU_STAT_MODE_RENDER) {
|
|
gb_ppu_render_launch (gb);
|
|
}
|
|
break;
|
|
case GB_PPU_STAT_MODE_RENDER:
|
|
cycles = gb_ppu_render_continue (gb, cycles);
|
|
if ((gb->ppu->buf[GB_PPU_STAT] & GB_PPU_STAT_MODE_MASK) == GB_PPU_STAT_MODE_HBLANK) {
|
|
RIOMap *map = r_io_map_get (gb->io, gb->dma.oam_mapid);
|
|
map->perm = R_PERM_RWX;
|
|
map = r_io_map_get (gb->io, gb->ppu->vram_mapid);
|
|
map->perm = R_PERM_RWX;
|
|
}
|
|
break;
|
|
case GB_PPU_STAT_MODE_HBLANK:
|
|
cycles = gb_ppu_hblank_continue (gb, cycles);
|
|
if ((gb->ppu->buf[GB_PPU_STAT] & GB_PPU_STAT_MODE_MASK) == GB_PPU_STAT_MODE_OAM_SCAN) {
|
|
RIOMap *oam = r_io_map_get (gb->io, gb->dma.oam_mapid);
|
|
oam->perm = 0;
|
|
if (gb->ppu->buf[GB_PPU_STAT] & GB_PPU_STAT_INTR_OAM) {
|
|
gb_interrupts_request (gb, GB_INTERRUPT_STAT);
|
|
}
|
|
} else if ((gb->ppu->buf[GB_PPU_STAT] & GB_PPU_STAT_MODE_MASK) == GB_PPU_STAT_MODE_VBLANK) {
|
|
if (gb->ppu->buf[GB_PPU_STAT] & GB_PPU_STAT_INTR_VBLANK) {
|
|
gb_interrupts_request (gb, GB_INTERRUPT_STAT);
|
|
}
|
|
gb_interrupts_request (gb, GB_INTERRUPT_VBLANK);
|
|
gb->ppu->fifo.remaining_cycles = 4560;
|
|
}
|
|
break;
|
|
case GB_PPU_STAT_MODE_VBLANK:
|
|
cycles = gb_ppu_vblank_continue (gb, cycles);
|
|
if ((gb->ppu->buf[GB_PPU_STAT] & GB_PPU_STAT_MODE_MASK) == GB_PPU_STAT_MODE_OAM_SCAN) {
|
|
gb->ppu->buf[GB_PPU_LY] = 0;
|
|
RIOMap *oam = r_io_map_get (gb->io, gb->dma.oam_mapid);
|
|
oam->perm = 0;
|
|
if (gb->ppu->buf[GB_PPU_STAT] & GB_PPU_STAT_INTR_OAM) {
|
|
gb_interrupts_request (gb, GB_INTERRUPT_STAT);
|
|
}
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
if (gb->ppu->buf[GB_PPU_STAT] & GB_PPU_STAT_INTR_LYC) {
|
|
if (gb->ppu->buf[GB_PPU_LY] == gb->ppu->buf[GB_PPU_LYC]) {
|
|
gb_interrupts_request (gb, GB_INTERRUPT_STAT);
|
|
}
|
|
}
|
|
}
|
|
|
|
void gb_ppu_close (GBPPU *ppu, RIO *io) {
|
|
r_io_fd_close (io, ppu->vram_fd);
|
|
r_io_fd_close (io, ppu->reg_fd);
|
|
gb_pix_buf_free (ppu->pixbuf);
|
|
free (ppu);
|
|
}
|