Multiple cleanups and improvements
- Cleanup exception trigger code - Cleanup division to divide by 0 - Cleanup SRET code - Cleanup CSR code - Added interrupts - Added TIMER interrupt
This commit is contained in:
parent
a0935f0aad
commit
02114ea7d8
@ -3,19 +3,19 @@
|
||||
|
||||
uint32_t csr_read(struct RV32_CPU* cpu, uint32_t csr)
|
||||
{
|
||||
switch(csr)
|
||||
{
|
||||
case CSR_TIME:
|
||||
return cpu->sim_ticks_done;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
switch(csr)
|
||||
{
|
||||
case CSR_CYCLE:
|
||||
return cpu->sim_ticks_done;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return cpu->csr[csr];
|
||||
return cpu->csr[csr];
|
||||
}
|
||||
|
||||
void csr_write(struct RV32_CPU* cpu, uint32_t csr, uint32_t value)
|
||||
{
|
||||
cpu->csr[csr] = value;
|
||||
cpu->csr[csr] = value;
|
||||
}
|
||||
|
@ -2,24 +2,48 @@
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
void exception_trigger(rv32_cpu_t* cpu, uint32_t scause)
|
||||
void exception_trigger(rv32_cpu_t* cpu, uint32_t scause, uint32_t tval)
|
||||
{
|
||||
// An exception can only be triggered by the CPU itself,
|
||||
// so we know we already own the mutex
|
||||
// We are in the CPU thread itself, but we need
|
||||
// the return of this function to be the beginning of
|
||||
// the cpu loop
|
||||
// To achieve that, we can just call cpu_loop (noreturn)
|
||||
// at the end of this function
|
||||
// An exception can only be triggered by the CPU itself,
|
||||
// so we know we already own the mutex
|
||||
// We are in the CPU thread itself, but we need
|
||||
// the return of this function to be the beginning of
|
||||
// the cpu loop
|
||||
// To achieve that, we can just call cpu_loop (noreturn)
|
||||
// at the end of this function
|
||||
|
||||
// Save execution context, so that 'mret/sret/..' can restore it
|
||||
// Exceptions cannot be disabled
|
||||
|
||||
// Set xCAUSE : exception cause, with interrupt bit set to null
|
||||
cpu->csr[CSR_SCAUSE] = scause;
|
||||
|
||||
// Save previous interrupt enable in xSTATUS.xPIE
|
||||
if(cpu->csr[CSR_SSTATUS] & 0b10U)
|
||||
cpu->csr[CSR_SSTATUS] |= 0x80;
|
||||
|
||||
// Set previous privilege mode in xSTATUS.xPP
|
||||
// TODO
|
||||
|
||||
// Set PC to STVEC, and set SCAUSE
|
||||
// TODO: If PC cannot be mmu_resolved, throw a 'double fault' ?
|
||||
cpu->pc = cpu->csr[CSR_STVEC];
|
||||
cpu->csr[CSR_SCAUSE] = scause;
|
||||
// Unset SIE (interrupt enable) bit
|
||||
cpu->csr[CSR_SSTATUS] &= ~0b10U;
|
||||
|
||||
pthread_mutex_unlock(&cpu0_mutex);
|
||||
cpu_loop(cpu);
|
||||
// Set privilege mode for exception handling, checking for delegation
|
||||
// TODO
|
||||
|
||||
// Set xTVAL, exception-specific information related to xCAUSE
|
||||
cpu->csr[CSR_STVAL] = tval;
|
||||
|
||||
// Set SEPC to instruction that caused exception
|
||||
cpu->csr[CSR_SEPC] = cpu->pc;
|
||||
|
||||
// Set PC to xTVEC : exception handling code
|
||||
// xTVEC: [Base(30bits) Mode(2 bits)], address 4-byte aligned in base
|
||||
// Exceptions are not vectored (we can safely ignore mode)
|
||||
cpu->pc = cpu->csr[CSR_STVEC] & 0xFFFFFFFC;
|
||||
|
||||
// Unlock cpu mutex, cpu_loop will lock it just after
|
||||
pthread_mutex_unlock(&cpu0_mutex);
|
||||
|
||||
// cpu loop (attribute noreturn should erase previous stack)
|
||||
cpu_loop(cpu);
|
||||
}
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
#include "rv32cpu.h"
|
||||
|
||||
void exception_trigger(rv32_cpu_t* cpu, uint32_t scause);
|
||||
void exception_trigger(rv32_cpu_t* cpu, uint32_t scause, uint32_t tval);
|
||||
|
||||
#define SCAUSE_INSTRUCTION_MISSALIGNED 0x0
|
||||
#define SCAUSE_INSTRUCTION_ACCESS_FAULT 0x1
|
||||
|
87
src/cpu/interrupt.c
Normal file
87
src/cpu/interrupt.c
Normal file
@ -0,0 +1,87 @@
|
||||
#include "interrupt.h"
|
||||
#include "rv32cpu.h"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
|
||||
void interrupt_trigger(rv32_cpu_t* cpu, uint32_t scause)
|
||||
{
|
||||
// Make sure that interrupts are enabled
|
||||
if(cpu->privilege_mode == MACHINE)
|
||||
{
|
||||
// In machine mode, we check mstatus.sie (bit 1)
|
||||
if(!(cpu->csr[CSR_MSTATUS] & 0b10))
|
||||
return;
|
||||
}
|
||||
else if(cpu->privilege_mode == SUPERVISOR)
|
||||
{
|
||||
// In supervisor mode, we check sstatus.sie (bit 1)
|
||||
if(!(cpu->csr[CSR_SSTATUS] & 0b10))
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
// TODO
|
||||
fprintf(stderr, "interrupt_trigger in non M/S-mode not implemented yet\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
// An interrupt can only be triggered from outside
|
||||
// of the cpu, so we are on a different thread
|
||||
// and we don't already own the CPU mutex
|
||||
// We obtain in this thread the control of the CPU,
|
||||
// but we know it is not in the middle of an instruction
|
||||
// (we got the mutex) ; this way we can just change
|
||||
// registers to set interrupt handler execution
|
||||
pthread_mutex_lock(&cpu0_mutex);
|
||||
|
||||
// Set xCAUSE with interrupt bit set
|
||||
cpu->csr[CSR_SCAUSE] = 0x80000000 | scause;
|
||||
|
||||
// Set xSTATUS.xPIE (previous interrupt enable) bit
|
||||
cpu->csr[CSR_SSTATUS] |= 0x80;
|
||||
|
||||
// Set xSTATUS.xPP (Previous Privilege) bit
|
||||
|
||||
// Unset xSTATUS.xIE (interrupt enable) bit
|
||||
cpu->csr[CSR_SSTATUS] &= ~0b10U;
|
||||
|
||||
// Set xEPC : PC at interruption
|
||||
cpu->csr[CSR_SEPC] = cpu->pc;
|
||||
|
||||
// Set PC to xTVEC : exception handler code
|
||||
// xTVEC: [Base(30bits) Mode(2 bits)], address 4-byte aligned in base
|
||||
// Interrupts can be vectored, if mode == 1, then pc = xTVEC + scause * 4
|
||||
int mode = cpu->csr[CSR_STVEC] & 0b11;
|
||||
switch(mode)
|
||||
{
|
||||
case 0:
|
||||
cpu->pc = cpu->csr[CSR_STVEC] & 0xFFFFFFFC;
|
||||
break;
|
||||
case 1:
|
||||
cpu->pc = (cpu->csr[CSR_STVEC] & 0xFFFFFFFC) + scause * 4;
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "interrupt_trigger: invalid mode encountered in sTVEC register\n");
|
||||
exit(EXIT_FAILURE);
|
||||
break;
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&cpu0_mutex);
|
||||
}
|
||||
|
||||
void interrupt_timer_thread()
|
||||
{
|
||||
while(1)
|
||||
{
|
||||
cpu0->csr[CSR_TIME]++;
|
||||
interrupt_trigger(cpu0, SCAUSE_SUPERVISOR_TIMER_INTERRUPT);
|
||||
usleep(1);
|
||||
}
|
||||
}
|
||||
|
||||
void interrupt_timer_setup()
|
||||
{
|
||||
pthread_t timer_thread;
|
||||
pthread_create(&timer_thread, 0, (void*) interrupt_timer_thread, 0);
|
||||
}
|
15
src/cpu/interrupt.h
Normal file
15
src/cpu/interrupt.h
Normal file
@ -0,0 +1,15 @@
|
||||
#ifndef INTERRUPT_H
|
||||
#define INTERRUPT_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "rv32cpu.h"
|
||||
|
||||
#define SCAUSE_SUPERVISOR_SOFTWARE_INTERRUPT 0x1
|
||||
#define SCAUSE_SUPERVISOR_TIMER_INTERRUPT 0x5
|
||||
#define SCAUSE_SUPERVISOR_EXTERNAL_INTERRUPT 0x9
|
||||
|
||||
void interrupt_trigger(rv32_cpu_t* cpu, uint32_t scause);
|
||||
void interrupt_timer_setup();
|
||||
|
||||
#endif
|
@ -49,7 +49,7 @@ void cpu_init()
|
||||
cpu0->privilege_mode = MACHINE;
|
||||
}
|
||||
|
||||
static void cpu_decode(raw_instruction_t raw_instruction, instruction_t* output)
|
||||
static void cpu_decode(rv32_cpu_t* cpu, raw_instruction_t raw_instruction, instruction_t* output)
|
||||
{
|
||||
output->opcode = raw_instruction.opcode;
|
||||
output->immediate = 0;
|
||||
@ -112,8 +112,8 @@ static void cpu_decode(raw_instruction_t raw_instruction, instruction_t* output)
|
||||
// TODO : Decode NOP instructions
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "Error: Unknown instruction opcode 0x%x, could not decode\n", raw_instruction.opcode);
|
||||
exit(EXIT_FAILURE);
|
||||
// Throw an 'Invalid OPCODE' exception
|
||||
exception_trigger(cpu, SCAUSE_ILLEGAL_INSTRUCTION, 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -433,6 +433,13 @@ static void cpu_execute(rv32_cpu_t* cpu, instruction_t* instruction)
|
||||
cpu->regs.x[instruction->rd] = cpu->regs.x[instruction->rs1] ^ cpu->regs.x[instruction->rs2];
|
||||
break;
|
||||
case FUNC7_DIV:
|
||||
if(!cpu->regs.x[instruction->rs2])
|
||||
{
|
||||
// ISA dictates that we return FFFFFFFF and set a flag bit to signal the exception
|
||||
cpu->regs.x[instruction->rd] = 0xFFFFFFFF;
|
||||
// TODO flag ?
|
||||
break;
|
||||
}
|
||||
cpu->regs.x[instruction->rd] = ((int32_t) cpu->regs.x[instruction->rs1]) / ((int32_t) cpu->regs.x[instruction->rs2]);
|
||||
break;
|
||||
default:
|
||||
@ -518,15 +525,30 @@ static void cpu_execute(rv32_cpu_t* cpu, instruction_t* instruction)
|
||||
break;
|
||||
case IMM_EBREAK:
|
||||
// EBREAK : generate a breakpoint exception
|
||||
exception_trigger(cpu, SCAUSE_BREAKPOINT);
|
||||
exception_trigger(cpu, SCAUSE_BREAKPOINT, 0);
|
||||
break;
|
||||
case IMM_SRET:
|
||||
fprintf(stderr, "SRET: We don't support that.\n");
|
||||
// SRET: Return from supervisor interrupt
|
||||
// Restore Interrupt Enable from Previous Interrupt Enable
|
||||
uint32_t sstatus = csr_read(cpu, CSR_SSTATUS);
|
||||
if(sstatus & 0x80)
|
||||
csr_write(cpu, CSR_SSTATUS, sstatus | 0b10);
|
||||
|
||||
// Restore privilege mode from Previous Privilege
|
||||
// TODO
|
||||
|
||||
// Set Previous Interrupt Enable to 1
|
||||
csr_write(cpu, CSR_SSTATUS, csr_read(cpu, CSR_SSTATUS) | 0x80);
|
||||
|
||||
// Set Previous Privilege to 0
|
||||
// TODO
|
||||
|
||||
// Saved PC before interrupt is in CSR SEPC, jump back
|
||||
cpu->pc = csr_read(cpu, CSR_SEPC) - 4;
|
||||
break;
|
||||
case IMM_MRET:
|
||||
// Ret to destination address : CSR_MEPC content
|
||||
// Change privilege mode to SUPERVISOR
|
||||
// TODO : Pop lower-privilege interrupt enable and privilege mode stack
|
||||
// TODO fix it to act like sret
|
||||
cpu->privilege_mode = SUPERVISOR;
|
||||
cpu->pc = cpu->csr[CSR_MEPC] - 4;
|
||||
break;
|
||||
@ -537,7 +559,9 @@ static void cpu_execute(rv32_cpu_t* cpu, instruction_t* instruction)
|
||||
// TODO : Check if we really need to do something on SFENCE.VMA ?
|
||||
break;
|
||||
case FUNC7_WFI:
|
||||
fprintf(stderr, "WFI: Guest kernel must think we have interrupts. We have none. Halting simulation.\n");
|
||||
// Wait For Interrupt : halt the simulation
|
||||
// It will be woken up by an interruption
|
||||
fprintf(stderr, "WFI: Halting simulation.\n");
|
||||
cpu->sim_ticks_left = 1;
|
||||
break;
|
||||
default:
|
||||
@ -712,7 +736,7 @@ __attribute__((noreturn)) void cpu_loop(rv32_cpu_t* cpu)
|
||||
|
||||
// Decode
|
||||
instruction_t instruction;
|
||||
cpu_decode(raw_instruction, &instruction);
|
||||
cpu_decode(cpu, raw_instruction, &instruction);
|
||||
|
||||
if(trace)
|
||||
{
|
||||
|
25
src/main.c
25
src/main.c
@ -2,6 +2,7 @@
|
||||
#include "memory/memory.h"
|
||||
#include "bootloader/bootloader.h"
|
||||
#include "cpu/rv32cpu.h"
|
||||
#include "cpu/interrupt.h"
|
||||
#include "gdbstub/gdbstub.h"
|
||||
#include "devices/uart/uart.h"
|
||||
|
||||
@ -31,34 +32,20 @@ int main(int argc, char** argv)
|
||||
gdbstub_wait_for_connection();
|
||||
}
|
||||
|
||||
// Initialize timer for timer interrupt
|
||||
interrupt_timer_setup();
|
||||
|
||||
// CPU simulation : create cpu0 thread
|
||||
if(!gdbstub) cpu0->sim_ticks_left = -1; // Simulate forever
|
||||
pthread_t cpu0_thread;
|
||||
pthread_create(&cpu0_thread, 0, (void*) cpu_loop, cpu0);
|
||||
|
||||
// Wait for the simulation to end
|
||||
// Wait forever, until simulation end (which should be an ecall shutdown)
|
||||
pthread_join(cpu0_thread, 0);
|
||||
if(gdbstub)
|
||||
{
|
||||
pthread_join(cpu0_thread, 0);
|
||||
gdbstub_stop();
|
||||
}
|
||||
else
|
||||
{
|
||||
while(1)
|
||||
{
|
||||
pthread_mutex_lock(&cpu0_mutex);
|
||||
pthread_cond_wait(&cpu0->sim_condition, &cpu0_mutex);
|
||||
if(!cpu0->sim_ticks_left && cpu0->sim_ticks_done > 0)
|
||||
{
|
||||
// Simulation ended
|
||||
break;
|
||||
}
|
||||
pthread_mutex_unlock(&cpu0_mutex);
|
||||
}
|
||||
|
||||
fprintf(stderr, "Simulation ended (in a non-debug environment)\n");
|
||||
return cpu0->regs.a0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ uint32_t mmu_resolve(rv32_cpu_t* cpu, memory_access_type_t access_type, uint32_t
|
||||
if(!(pte & PTE_V))
|
||||
{
|
||||
// Invalid PTE
|
||||
exception_trigger(cpu, mmu_scause_from_access(access_type));
|
||||
exception_trigger(cpu, mmu_scause_from_access(access_type), vaddr);
|
||||
}
|
||||
|
||||
if((pte & PTE_R) || (pte & PTE_W) || (pte & PTE_X))
|
||||
@ -109,7 +109,7 @@ uint32_t mmu_resolve(rv32_cpu_t* cpu, memory_access_type_t access_type, uint32_t
|
||||
if(!(pte & PTE_V))
|
||||
{
|
||||
// Invalid PTE
|
||||
exception_trigger(cpu, mmu_scause_from_access(access_type));
|
||||
exception_trigger(cpu, mmu_scause_from_access(access_type), vaddr);
|
||||
}
|
||||
|
||||
// This must be a leaf PTE, as Sv32 only supports 2-level mappings
|
||||
|
Loading…
Reference in New Issue
Block a user