From: Mark W. <ma...@so...> - 2025-02-25 16:52:24
|
https://sourceware.org/git/gitweb.cgi?p=valgrind.git;h=98e075fc8976bedfee0861225f8c79e2062c484f commit 98e075fc8976bedfee0861225f8c79e2062c484f Author: Petr Pavlu <pet...@da...> Date: Tue Apr 11 19:30:42 2023 +0000 riscv64: Add initial support: new port-specific VEX files The following people contributed to the initial RISC-V support: Petr Pavlu <pet...@da...> Xeonacid <h.d...@gm...> laokz <la...@fo...> Chelsea E. Manning <me...@xy...> zhaomingxin <zha...@al...> Jojo R <rj...@li...> Some integration fixes were added by Mark Wielaard <ma...@kl...> - Handle Ity_I1, Iex.Const (boolean) https://bugs.kde.org/show_bug.cgi?id=468575 Diff: --- VEX/priv/guest_riscv64_defs.h | 136 ++ VEX/priv/guest_riscv64_helpers.c | 481 ++++++ VEX/priv/guest_riscv64_toIR.c | 3511 ++++++++++++++++++++++++++++++++++++++ VEX/priv/host_riscv64_defs.c | 2696 +++++++++++++++++++++++++++++ VEX/priv/host_riscv64_defs.h | 644 +++++++ VEX/priv/host_riscv64_isel.c | 2097 +++++++++++++++++++++++ VEX/pub/libvex_guest_riscv64.h | 148 ++ 7 files changed, 9713 insertions(+) diff --git a/VEX/priv/guest_riscv64_defs.h b/VEX/priv/guest_riscv64_defs.h new file mode 100644 index 0000000000..ee5435e145 --- /dev/null +++ b/VEX/priv/guest_riscv64_defs.h @@ -0,0 +1,136 @@ + +/*--------------------------------------------------------------------*/ +/*--- begin guest_riscv64_defs.h ---*/ +/*--------------------------------------------------------------------*/ + +/* + This file is part of Valgrind, a dynamic binary instrumentation + framework. + + Copyright (C) 2020-2023 Petr Pavlu + pet...@da... + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, see <http://www.gnu.org/licenses/>. + + The GNU General Public License is contained in the file COPYING. + + Neither the names of the U.S. Department of Energy nor the + University of California nor the names of its contributors may be + used to endorse or promote products derived from this software + without prior written permission. +*/ + +/* Only to be used within the guest_riscv64_* files. */ + +#ifndef __VEX_GUEST_RISCV64_DEFS_H +#define __VEX_GUEST_RISCV64_DEFS_H + +#include "libvex_basictypes.h" + +#include "guest_generic_bb_to_IR.h" + +/*------------------------------------------------------------*/ +/*--- riscv64 to IR conversion ---*/ +/*------------------------------------------------------------*/ + +/* Convert one riscv64 insn to IR. See the type DisOneInstrFn in + guest_generic_bb_to_IR.h. */ +DisResult disInstr_RISCV64(IRSB* irbb, + const UChar* guest_code, + Long delta, + Addr guest_IP, + VexArch guest_arch, + const VexArchInfo* archinfo, + const VexAbiInfo* abiinfo, + VexEndness host_endness, + Bool sigill_diag); + +/* Used by the optimiser to specialise calls to helpers. */ +IRExpr* guest_riscv64_spechelper(const HChar* function_name, + IRExpr** args, + IRStmt** precedingStmts, + Int n_precedingStmts); + +/* Describes to the optimiser which part of the guest state require precise + memory exceptions. This is logically part of the guest state description. */ +Bool guest_riscv64_state_requires_precise_mem_exns( + Int minoff, Int maxoff, VexRegisterUpdates pxControl); + +extern VexGuestLayout riscv64guest_layout; + +/*------------------------------------------------------------*/ +/*--- riscv64 guest helpers ---*/ +/*------------------------------------------------------------*/ + +/* --- CLEAN HELPERS --- */ + +/* Calculate resulting flags of a specified floating-point operation. Returns + a 32-bit value where bits 4:0 contain the fflags in the RISC-V native + format (NV DZ OF UF NX) and remaining upper bits are zero. */ +UInt riscv64g_calculate_fflags_fsqrt_s(Float a1, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fcvt_w_s(Float a1, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fcvt_wu_s(Float a1, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fcvt_s_w(UInt a1, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fcvt_s_wu(UInt a1, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fcvt_l_s(Float a1, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fcvt_lu_s(Float a1, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fcvt_s_l(ULong a1, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fcvt_s_lu(ULong a1, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fsqrt_d(Double a1, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fcvt_s_d(Double a1, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fcvt_w_d(Double a1, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fcvt_wu_d(Double a1, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fcvt_l_d(Double a1, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fcvt_lu_d(Double a1, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fcvt_d_l(ULong a1, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fcvt_d_lu(ULong a1, UInt rm_RISCV); + +UInt riscv64g_calculate_fflags_fadd_s(Float a1, Float a2, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fmul_s(Float a1, Float a2, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fdiv_s(Float a1, Float a2, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fadd_d(Double a1, Double a2, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fmul_d(Double a1, Double a2, UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fdiv_d(Double a1, Double a2, UInt rm_RISCV); + +UInt riscv64g_calculate_fflags_fmin_s(Float a1, Float a2); +UInt riscv64g_calculate_fflags_fmax_s(Float a1, Float a2); +UInt riscv64g_calculate_fflags_feq_s(Float a1, Float a2); +UInt riscv64g_calculate_fflags_flt_s(Float a1, Float a2); +UInt riscv64g_calculate_fflags_fle_s(Float a1, Float a2); +UInt riscv64g_calculate_fflags_fmin_d(Double a1, Double a2); +UInt riscv64g_calculate_fflags_fmax_d(Double a1, Double a2); +UInt riscv64g_calculate_fflags_feq_d(Double a1, Double a2); +UInt riscv64g_calculate_fflags_flt_d(Double a1, Double a2); +UInt riscv64g_calculate_fflags_fle_d(Double a1, Double a2); + +UInt riscv64g_calculate_fflags_fmadd_s(Float a1, + Float a2, + Float a3, + UInt rm_RISCV); +UInt riscv64g_calculate_fflags_fmadd_d(Double a1, + Double a2, + Double a3, + UInt rm_RISCV); + +/* Calculate floating-point class. Returns a 64-bit value where bits 9:0 + contains the properties in the RISC-V FCLASS-instruction format and remaining + upper bits are zero. */ +ULong riscv64g_calculate_fclass_s(Float a1); +ULong riscv64g_calculate_fclass_d(Double a1); + +#endif /* ndef __VEX_GUEST_RISCV64_DEFS_H */ + +/*--------------------------------------------------------------------*/ +/*--- end guest_riscv64_defs.h ---*/ +/*--------------------------------------------------------------------*/ diff --git a/VEX/priv/guest_riscv64_helpers.c b/VEX/priv/guest_riscv64_helpers.c new file mode 100644 index 0000000000..e7c4ed8055 --- /dev/null +++ b/VEX/priv/guest_riscv64_helpers.c @@ -0,0 +1,481 @@ + +/*--------------------------------------------------------------------*/ +/*--- begin guest_riscv64_helpers.c ---*/ +/*--------------------------------------------------------------------*/ + +/* + This file is part of Valgrind, a dynamic binary instrumentation + framework. + + Copyright (C) 2020-2023 Petr Pavlu + pet...@da... + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, see <http://www.gnu.org/licenses/>. + + The GNU General Public License is contained in the file COPYING. +*/ + +#include "libvex_guest_riscv64.h" + +#include "guest_riscv64_defs.h" +#include "main_util.h" + +/* This file contains helper functions for riscv64 guest code. Calls to these + functions are generated by the back end. These calls are of course in the + host machine code and this file will be compiled to host machine code, so + that all makes sense. + + Only change the signatures of these helper functions very carefully. If you + change the signature here, you'll have to change the parameters passed to it + in the IR calls constructed by guest_riscv64_toIR.c. + + The convention used is that all functions called from generated code are + named riscv64g_<something>, and any function whose name lacks that prefix is + not called from generated code. Note that some LibVEX_* functions can however + be called by VEX's client, but that is not the same as calling them from + VEX-generated code. +*/ + +#if defined(__riscv) && (__riscv_xlen == 64) +/* clang-format off */ +#define CALCULATE_FFLAGS_UNARY64_F(inst) \ + do { \ + UInt res; \ + __asm__ __volatile__( \ + "csrr t0, fcsr\n\t" \ + "csrw frm, %[rm]\n\t" \ + "csrw fflags, zero\n\t" \ + inst " ft0, %[a1]\n\t" \ + "csrr %[res], fflags\n\t" \ + "csrw fcsr, t0\n\t" \ + : [res] "=r"(res) \ + : [a1] "f"(a1), [rm] "r"(rm_RISCV) \ + : "t0", "ft0"); \ + return res; \ + } while (0) +#define CALCULATE_FFLAGS_UNARY64_IF(inst) \ + do { \ + UInt res; \ + __asm__ __volatile__( \ + "csrr t0, fcsr\n\t" \ + "csrw frm, %[rm]\n\t" \ + "csrw fflags, zero\n\t" \ + inst " t1, %[a1]\n\t" \ + "csrr %[res], fflags\n\t" \ + "csrw fcsr, t0\n\t" \ + : [res] "=r"(res) \ + : [a1] "f"(a1), [rm] "r"(rm_RISCV) \ + : "t0", "t1"); \ + return res; \ + } while (0) +#define CALCULATE_FFLAGS_UNARY64_FI(inst) \ + do { \ + UInt res; \ + __asm__ __volatile__( \ + "csrr t0, fcsr\n\t" \ + "csrw frm, %[rm]\n\t" \ + "csrw fflags, zero\n\t" \ + inst " ft0, %[a1]\n\t" \ + "csrr %[res], fflags\n\t" \ + "csrw fcsr, t0\n\t" \ + : [res] "=r"(res) \ + : [a1] "r"(a1), [rm] "r"(rm_RISCV) \ + : "t0", "ft0"); \ + return res; \ + } while (0) +/* clang-format on */ +#else +/* No simulated version is currently implemented. */ +#define CALCULATE_FFLAGS_UNARY64_F(inst) \ + do { \ + (void)rm_RISCV; \ + return 0; \ + } while (0) +#define CALCULATE_FFLAGS_UNARY64_IF(inst) \ + do { \ + (void)rm_RISCV; \ + return 0; \ + } while (0) +#define CALCULATE_FFLAGS_UNARY64_FI(inst) \ + do { \ + (void)rm_RISCV; \ + return 0; \ + } while (0) +#endif + +/* CALLED FROM GENERATED CODE: CLEAN HELPERS */ +UInt riscv64g_calculate_fflags_fsqrt_s(Float a1, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_UNARY64_F("fsqrt.s"); +} +UInt riscv64g_calculate_fflags_fcvt_w_s(Float a1, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_UNARY64_IF("fcvt.w.s"); +} +UInt riscv64g_calculate_fflags_fcvt_wu_s(Float a1, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_UNARY64_IF("fcvt.wu.s"); +} +UInt riscv64g_calculate_fflags_fcvt_s_w(UInt a1, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_UNARY64_FI("fcvt.s.w"); +} +UInt riscv64g_calculate_fflags_fcvt_s_wu(UInt a1, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_UNARY64_FI("fcvt.s.wu"); +} +UInt riscv64g_calculate_fflags_fcvt_l_s(Float a1, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_UNARY64_IF("fcvt.l.s"); +} +UInt riscv64g_calculate_fflags_fcvt_lu_s(Float a1, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_UNARY64_IF("fcvt.lu.s"); +} +UInt riscv64g_calculate_fflags_fcvt_s_l(ULong a1, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_UNARY64_FI("fcvt.s.l"); +} +UInt riscv64g_calculate_fflags_fcvt_s_lu(ULong a1, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_UNARY64_FI("fcvt.s.lu"); +} +UInt riscv64g_calculate_fflags_fsqrt_d(Double a1, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_UNARY64_F("fsqrt.d"); +} +UInt riscv64g_calculate_fflags_fcvt_s_d(Double a1, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_UNARY64_F("fcvt.s.d"); +} +UInt riscv64g_calculate_fflags_fcvt_w_d(Double a1, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_UNARY64_IF("fcvt.w.d"); +} +UInt riscv64g_calculate_fflags_fcvt_wu_d(Double a1, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_UNARY64_IF("fcvt.wu.d"); +} +UInt riscv64g_calculate_fflags_fcvt_l_d(Double a1, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_UNARY64_IF("fcvt.l.d"); +} +UInt riscv64g_calculate_fflags_fcvt_lu_d(Double a1, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_UNARY64_IF("fcvt.lu.d"); +} +UInt riscv64g_calculate_fflags_fcvt_d_l(ULong a1, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_UNARY64_FI("fcvt.d.l"); +} +UInt riscv64g_calculate_fflags_fcvt_d_lu(ULong a1, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_UNARY64_FI("fcvt.d.lu"); +} + +#if defined(__riscv) && (__riscv_xlen == 64) +/* clang-format off */ +#define CALCULATE_FFLAGS_BINARY64(inst) \ + do { \ + UInt res; \ + __asm__ __volatile__( \ + "csrr t0, fcsr\n\t" \ + "csrw frm, %[rm]\n\t" \ + "csrw fflags, zero\n\t" \ + inst " %[a1], %[a1], %[a2]\n\t" \ + "csrr %[res], fflags\n\t" \ + "csrw fcsr, t0\n\t" \ + : [res] "=r"(res) \ + : [a1] "f"(a1), [a2] "f"(a2), [rm] "r"(rm_RISCV) \ + : "t0"); \ + return res; \ + } while (0) +#define CALCULATE_FFLAGS_BINARY64_IFF(inst) \ + do { \ + UInt res; \ + __asm__ __volatile__( \ + "csrr t0, fcsr\n\t" \ + "csrw frm, %[rm]\n\t" \ + "csrw fflags, zero\n\t" \ + inst " t1, %[a1], %[a2]\n\t" \ + "csrr %[res], fflags\n\t" \ + "csrw fcsr, t0\n\t" \ + : [res] "=r"(res) \ + : [a1] "f"(a1), [a2] "f"(a2), [rm] "r"(rm_RISCV) \ + : "t0", "t1"); \ + return res; \ + } while (0) +/* clang-format on */ +#else +/* No simulated version is currently implemented. */ +#define CALCULATE_FFLAGS_BINARY64(inst) \ + do { \ + (void)rm_RISCV; \ + return 0; \ + } while (0) +#define CALCULATE_FFLAGS_BINARY64_IFF(inst) \ + do { \ + (void)rm_RISCV; \ + return 0; \ + } while (0) +#endif + +/* CALLED FROM GENERATED CODE: CLEAN HELPERS */ +UInt riscv64g_calculate_fflags_fadd_s(Float a1, Float a2, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_BINARY64("fadd.s"); +} +UInt riscv64g_calculate_fflags_fmul_s(Float a1, Float a2, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_BINARY64("fmul.s"); +} +UInt riscv64g_calculate_fflags_fdiv_s(Float a1, Float a2, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_BINARY64("fdiv.s"); +} +UInt riscv64g_calculate_fflags_fadd_d(Double a1, Double a2, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_BINARY64("fadd.d"); +} +UInt riscv64g_calculate_fflags_fmul_d(Double a1, Double a2, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_BINARY64("fmul.d"); +} +UInt riscv64g_calculate_fflags_fdiv_d(Double a1, Double a2, UInt rm_RISCV) +{ + CALCULATE_FFLAGS_BINARY64("fdiv.d"); +} +UInt riscv64g_calculate_fflags_fmin_s(Float a1, Float a2) +{ + UInt rm_RISCV = 0; /* unused */ + CALCULATE_FFLAGS_BINARY64("fmin.s"); +} +UInt riscv64g_calculate_fflags_fmax_s(Float a1, Float a2) +{ + UInt rm_RISCV = 0; /* unused */ + CALCULATE_FFLAGS_BINARY64("fmax.s"); +} +UInt riscv64g_calculate_fflags_feq_s(Float a1, Float a2) +{ + UInt rm_RISCV = 0; /* unused */ + CALCULATE_FFLAGS_BINARY64_IFF("feq.s"); +} +UInt riscv64g_calculate_fflags_flt_s(Float a1, Float a2) +{ + UInt rm_RISCV = 0; /* unused */ + CALCULATE_FFLAGS_BINARY64_IFF("flt.s"); +} +UInt riscv64g_calculate_fflags_fle_s(Float a1, Float a2) +{ + UInt rm_RISCV = 0; /* unused */ + CALCULATE_FFLAGS_BINARY64_IFF("fle.s"); +} +UInt riscv64g_calculate_fflags_fmin_d(Double a1, Double a2) +{ + UInt rm_RISCV = 0; /* unused */ + CALCULATE_FFLAGS_BINARY64("fmin.d"); +} +UInt riscv64g_calculate_fflags_fmax_d(Double a1, Double a2) +{ + UInt rm_RISCV = 0; /* unused */ + CALCULATE_FFLAGS_BINARY64("fmax.d"); +} +UInt riscv64g_calculate_fflags_feq_d(Double a1, Double a2) +{ + UInt rm_RISCV = 0; /* unused */ + CALCULATE_FFLAGS_BINARY64_IFF("feq.d"); +} +UInt riscv64g_calculate_fflags_flt_d(Double a1, Double a2) +{ + UInt rm_RISCV = 0; /* unused */ + CALCULATE_FFLAGS_BINARY64_IFF("flt.d"); +} +UInt riscv64g_calculate_fflags_fle_d(Double a1, Double a2) +{ + UInt rm_RISCV = 0; /* unused */ + CALCULATE_FFLAGS_BINARY64_IFF("fle.d"); +} + +#if defined(__riscv) && (__riscv_xlen == 64) +/* clang-format off */ +#define CALCULATE_FFLAGS_TERNARY64(inst) \ + do { \ + UInt res; \ + __asm__ __volatile__( \ + "csrr t0, fcsr\n\t" \ + "csrw frm, %[rm]\n\t" \ + "csrw fflags, zero\n\t" \ + inst " %[a1], %[a1], %[a2], %[a3]\n\t" \ + "csrr %[res], fflags\n\t" \ + "csrw fcsr, t0\n\t" \ + : [res] "=r"(res) \ + : [a1] "f"(a1), [a2] "f"(a2), [a3] "f"(a3), [rm] "r"(rm_RISCV) \ + : "t0"); \ + return res; \ + } while (0) +/* clang-format on */ +#else +/* No simulated version is currently implemented. */ +#define CALCULATE_FFLAGS_TERNARY64(inst) \ + do { \ + (void)rm_RISCV; \ + return 0; \ + } while (0) +#endif + +/* CALLED FROM GENERATED CODE: CLEAN HELPERS */ +UInt riscv64g_calculate_fflags_fmadd_s(Float a1, + Float a2, + Float a3, + UInt rm_RISCV) +{ + CALCULATE_FFLAGS_TERNARY64("fmadd.s"); +} +UInt riscv64g_calculate_fflags_fmadd_d(Double a1, + Double a2, + Double a3, + UInt rm_RISCV) +{ + CALCULATE_FFLAGS_TERNARY64("fmadd.d"); +} + +#if defined(__riscv) && (__riscv_xlen == 64) +/* clang-format off */ +#define CALCULATE_FCLASS(inst) \ + do { \ + ULong res; \ + __asm__ __volatile__( \ + inst " %[res], %[a1]\n\t" \ + : [res] "=r"(res) \ + : [a1] "f"(a1)); \ + return res; \ + } while (0) +/* clang-format on */ +#else +/* No simulated version is currently implemented. */ +#define CALCULATE_FCLASS(inst) \ + do { \ + return 0; \ + } while (0) +#endif + +/* CALLED FROM GENERATED CODE: CLEAN HELPERS */ +ULong riscv64g_calculate_fclass_s(Float a1) { CALCULATE_FCLASS("fclass.s"); } +ULong riscv64g_calculate_fclass_d(Double a1) { CALCULATE_FCLASS("fclass.d"); } + +/*------------------------------------------------------------*/ +/*--- Flag-helpers translation-time function specialisers. ---*/ +/*--- These help iropt specialise calls the above run-time ---*/ +/*--- flags functions. ---*/ +/*------------------------------------------------------------*/ + +IRExpr* guest_riscv64_spechelper(const HChar* function_name, + IRExpr** args, + IRStmt** precedingStmts, + Int n_precedingStmts) +{ + return NULL; +} + +/*------------------------------------------------------------*/ +/*--- Helpers for dealing with, and describing, guest ---*/ +/*--- state as a whole. ---*/ +/*------------------------------------------------------------*/ + +/* Initialise the entire riscv64 guest state. */ +/* VISIBLE TO LIBVEX CLIENT */ +void LibVEX_GuestRISCV64_initialise(/*OUT*/ VexGuestRISCV64State* vex_state) +{ + vex_bzero(vex_state, sizeof(*vex_state)); +} + +/* Figure out if any part of the guest state contained in minoff .. maxoff + requires precise memory exceptions. If in doubt return True (but this + generates significantly slower code). + + By default we enforce precise exns for guest x2 (sp), x8 (fp) and pc only. + These are the minimum needed to extract correct stack backtraces from riscv64 + code. + + Only x2 (sp) is needed in mode VexRegUpdSpAtMemAccess. +*/ +Bool guest_riscv64_state_requires_precise_mem_exns(Int minoff, + Int maxoff, + VexRegisterUpdates pxControl) +{ + Int fp_min = offsetof(VexGuestRISCV64State, guest_x8); + Int fp_max = fp_min + 8 - 1; + Int sp_min = offsetof(VexGuestRISCV64State, guest_x2); + Int sp_max = sp_min + 8 - 1; + Int pc_min = offsetof(VexGuestRISCV64State, guest_pc); + Int pc_max = pc_min + 8 - 1; + + if (maxoff < sp_min || minoff > sp_max) { + /* No overlap with sp. */ + if (pxControl == VexRegUpdSpAtMemAccess) + return False; /* We only need to check stack pointer. */ + } else + return True; + + if (maxoff < fp_min || minoff > fp_max) { + /* No overlap with fp. */ + } else + return True; + + if (maxoff < pc_min || minoff > pc_max) { + /* No overlap with pc. */ + } else + return True; + + return False; +} + +#define ALWAYSDEFD(field) \ + { \ + offsetof(VexGuestRISCV64State, field), \ + (sizeof((VexGuestRISCV64State*)0)->field) \ + } + +VexGuestLayout riscv64guest_layout = { + /* Total size of the guest state, in bytes. */ + .total_sizeB = sizeof(VexGuestRISCV64State), + + /* Describe the stack pointer. */ + .offset_SP = offsetof(VexGuestRISCV64State, guest_x2), + .sizeof_SP = 8, + + /* Describe the frame pointer. */ + .offset_FP = offsetof(VexGuestRISCV64State, guest_x8), + .sizeof_FP = 8, + + /* Describe the instruction pointer. */ + .offset_IP = offsetof(VexGuestRISCV64State, guest_pc), + .sizeof_IP = 8, + + /* Describe any sections to be regarded by Memcheck as 'always-defined'. */ + .n_alwaysDefd = 6, + + .alwaysDefd = { + /* 0 */ ALWAYSDEFD(guest_x0), + /* 1 */ ALWAYSDEFD(guest_pc), + /* 2 */ ALWAYSDEFD(guest_EMNOTE), + /* 3 */ ALWAYSDEFD(guest_CMSTART), + /* 4 */ ALWAYSDEFD(guest_CMLEN), + /* 5 */ ALWAYSDEFD(guest_NRADDR), + }, +}; + +/*--------------------------------------------------------------------*/ +/*--- end guest_riscv64_helpers.c ---*/ +/*--------------------------------------------------------------------*/ diff --git a/VEX/priv/guest_riscv64_toIR.c b/VEX/priv/guest_riscv64_toIR.c new file mode 100644 index 0000000000..93ea5a173d --- /dev/null +++ b/VEX/priv/guest_riscv64_toIR.c @@ -0,0 +1,3511 @@ + +/*--------------------------------------------------------------------*/ +/*--- begin guest_riscv64_toIR.c ---*/ +/*--------------------------------------------------------------------*/ + +/* + This file is part of Valgrind, a dynamic binary instrumentation + framework. + + Copyright (C) 2020-2023 Petr Pavlu + pet...@da... + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, see <http://www.gnu.org/licenses/>. + + The GNU General Public License is contained in the file COPYING. +*/ + +/* Translates riscv64 code to IR. */ + +/* "Special" instructions. + + This instruction decoder can decode four special instructions which mean + nothing natively (are no-ops as far as regs/mem are concerned) but have + meaning for supporting Valgrind. A special instruction is flagged by + a 16-byte preamble: + + 00305013 00d05013 03305013 03d05013 + (srli zero, zero, 3; srli zero, zero, 13 + srli zero, zero, 51; srli zero, zero, 61) + + Following that, one of the following 4 are allowed (standard interpretation + in parentheses): + + 00a56533 (or a0, a0, a0) a3 = client_request ( a4 ) + 00b5e5b3 (or a1, a1, a1) a3 = guest_NRADDR + 00c66633 (or a2, a2, a2) branch-and-link-to-noredir t0 + 00d6e6b3 (or a3, a3, a3) IR injection + + Any other bytes following the 16-byte preamble are illegal and constitute + a failure in instruction decoding. This all assumes that the preamble will + never occur except in specific code fragments designed for Valgrind to catch. +*/ + +#include "libvex_guest_riscv64.h" + +#include "guest_riscv64_defs.h" +#include "main_globals.h" +#include "main_util.h" + +/*------------------------------------------------------------*/ +/*--- Debugging output ---*/ +/*------------------------------------------------------------*/ + +#define DIP(format, args...) \ + do { \ + if (vex_traceflags & VEX_TRACE_FE) \ + vex_printf(format, ##args); \ + } while (0) + +#define DIS(buf, format, args...) \ + do { \ + if (vex_traceflags & VEX_TRACE_FE) \ + vex_sprintf(buf, format, ##args); \ + } while (0) + +/*------------------------------------------------------------*/ +/*--- Helper bits and pieces for deconstructing the ---*/ +/*--- riscv64 insn stream. ---*/ +/*------------------------------------------------------------*/ + +/* Do a little-endian load of a 32-bit word, regardless of the endianness of the + underlying host. */ +static inline UInt getUIntLittleEndianly(const UChar* p) +{ + UInt w = 0; + w = (w << 8) | p[3]; + w = (w << 8) | p[2]; + w = (w << 8) | p[1]; + w = (w << 8) | p[0]; + return w; +} + +/* Do read of an instruction, which can be 16-bit (compressed) or 32-bit in + size. */ +static inline UInt getInsn(const UChar* p) +{ + Bool is_compressed = (p[0] & 0x3) != 0x3; + UInt w = 0; + if (!is_compressed) { + w = (w << 8) | p[3]; + w = (w << 8) | p[2]; + } + w = (w << 8) | p[1]; + w = (w << 8) | p[0]; + return w; +} + +/* Produce _uint[_bMax:_bMin]. */ +#define SLICE_UInt(_uint, _bMax, _bMin) \ + ((((UInt)(_uint)) >> (_bMin)) & \ + (UInt)((1ULL << ((_bMax) - (_bMin) + 1)) - 1ULL)) + +/*------------------------------------------------------------*/ +/*--- Helpers for constructing IR. ---*/ +/*------------------------------------------------------------*/ + +/* Create an expression to produce a 64-bit constant. */ +static IRExpr* mkU64(ULong i) { return IRExpr_Const(IRConst_U64(i)); } + +/* Create an expression to produce a 32-bit constant. */ +static IRExpr* mkU32(UInt i) { return IRExpr_Const(IRConst_U32(i)); } + +/* Create an expression to produce an 8-bit constant. */ +static IRExpr* mkU8(UInt i) +{ + vassert(i < 256); + return IRExpr_Const(IRConst_U8((UChar)i)); +} + +/* Create an expression to read a temporary. */ +static IRExpr* mkexpr(IRTemp tmp) { return IRExpr_RdTmp(tmp); } + +/* Create an unary-operation expression. */ +static IRExpr* unop(IROp op, IRExpr* a) { return IRExpr_Unop(op, a); } + +/* Create a binary-operation expression. */ +static IRExpr* binop(IROp op, IRExpr* a1, IRExpr* a2) +{ + return IRExpr_Binop(op, a1, a2); +} + +/* Create a ternary-operation expression. */ +static IRExpr* triop(IROp op, IRExpr* a1, IRExpr* a2, IRExpr* a3) +{ + return IRExpr_Triop(op, a1, a2, a3); +} + +/* Create a quaternary-operation expression. */ +static IRExpr* qop(IROp op, IRExpr* a1, IRExpr* a2, IRExpr* a3, IRExpr* a4) +{ + return IRExpr_Qop(op, a1, a2, a3, a4); +} + +/* Create an expression to load a value from memory (in the little-endian + order). */ +static IRExpr* loadLE(IRType ty, IRExpr* addr) +{ + return IRExpr_Load(Iend_LE, ty, addr); +} + +/* Add a statement to the list held by irsb. */ +static void stmt(/*MOD*/ IRSB* irsb, IRStmt* st) { addStmtToIRSB(irsb, st); } + +/* Add a statement to assign a value to a temporary. */ +static void assign(/*MOD*/ IRSB* irsb, IRTemp dst, IRExpr* e) +{ + stmt(irsb, IRStmt_WrTmp(dst, e)); +} + +/* Generate a statement to store a value in memory (in the little-endian + order). */ +static void storeLE(/*MOD*/ IRSB* irsb, IRExpr* addr, IRExpr* data) +{ + stmt(irsb, IRStmt_Store(Iend_LE, addr, data)); +} + +/* Generate a new temporary of the given type. */ +static IRTemp newTemp(/*MOD*/ IRSB* irsb, IRType ty) +{ + vassert(isPlausibleIRType(ty)); + return newIRTemp(irsb->tyenv, ty); +} + +/* Sign-extend a 32/64-bit integer expression to 64 bits. */ +static IRExpr* widenSto64(IRType srcTy, IRExpr* e) +{ + switch (srcTy) { + case Ity_I64: + return e; + case Ity_I32: + return unop(Iop_32Sto64, e); + default: + vpanic("widenSto64(riscv64)"); + } +} + +/* Narrow a 64-bit integer expression to 32/64 bits. */ +static IRExpr* narrowFrom64(IRType dstTy, IRExpr* e) +{ + switch (dstTy) { + case Ity_I64: + return e; + case Ity_I32: + return unop(Iop_64to32, e); + default: + vpanic("narrowFrom64(riscv64)"); + } +} + +/*------------------------------------------------------------*/ +/*--- Offsets of various parts of the riscv64 guest state ---*/ +/*------------------------------------------------------------*/ + +#define OFFB_X0 offsetof(VexGuestRISCV64State, guest_x0) +#define OFFB_X1 offsetof(VexGuestRISCV64State, guest_x1) +#define OFFB_X2 offsetof(VexGuestRISCV64State, guest_x2) +#define OFFB_X3 offsetof(VexGuestRISCV64State, guest_x3) +#define OFFB_X4 offsetof(VexGuestRISCV64State, guest_x4) +#define OFFB_X5 offsetof(VexGuestRISCV64State, guest_x5) +#define OFFB_X6 offsetof(VexGuestRISCV64State, guest_x6) +#define OFFB_X7 offsetof(VexGuestRISCV64State, guest_x7) +#define OFFB_X8 offsetof(VexGuestRISCV64State, guest_x8) +#define OFFB_X9 offsetof(VexGuestRISCV64State, guest_x9) +#define OFFB_X10 offsetof(VexGuestRISCV64State, guest_x10) +#define OFFB_X11 offsetof(VexGuestRISCV64State, guest_x11) +#define OFFB_X12 offsetof(VexGuestRISCV64State, guest_x12) +#define OFFB_X13 offsetof(VexGuestRISCV64State, guest_x13) +#define OFFB_X14 offsetof(VexGuestRISCV64State, guest_x14) +#define OFFB_X15 offsetof(VexGuestRISCV64State, guest_x15) +#define OFFB_X16 offsetof(VexGuestRISCV64State, guest_x16) +#define OFFB_X17 offsetof(VexGuestRISCV64State, guest_x17) +#define OFFB_X18 offsetof(VexGuestRISCV64State, guest_x18) +#define OFFB_X19 offsetof(VexGuestRISCV64State, guest_x19) +#define OFFB_X20 offsetof(VexGuestRISCV64State, guest_x20) +#define OFFB_X21 offsetof(VexGuestRISCV64State, guest_x21) +#define OFFB_X22 offsetof(VexGuestRISCV64State, guest_x22) +#define OFFB_X23 offsetof(VexGuestRISCV64State, guest_x23) +#define OFFB_X24 offsetof(VexGuestRISCV64State, guest_x24) +#define OFFB_X25 offsetof(VexGuestRISCV64State, guest_x25) +#define OFFB_X26 offsetof(VexGuestRISCV64State, guest_x26) +#define OFFB_X27 offsetof(VexGuestRISCV64State, guest_x27) +#define OFFB_X28 offsetof(VexGuestRISCV64State, guest_x28) +#define OFFB_X29 offsetof(VexGuestRISCV64State, guest_x29) +#define OFFB_X30 offsetof(VexGuestRISCV64State, guest_x30) +#define OFFB_X31 offsetof(VexGuestRISCV64State, guest_x31) +#define OFFB_PC offsetof(VexGuestRISCV64State, guest_pc) + +#define OFFB_F0 offsetof(VexGuestRISCV64State, guest_f0) +#define OFFB_F1 offsetof(VexGuestRISCV64State, guest_f1) +#define OFFB_F2 offsetof(VexGuestRISCV64State, guest_f2) +#define OFFB_F3 offsetof(VexGuestRISCV64State, guest_f3) +#define OFFB_F4 offsetof(VexGuestRISCV64State, guest_f4) +#define OFFB_F5 offsetof(VexGuestRISCV64State, guest_f5) +#define OFFB_F6 offsetof(VexGuestRISCV64State, guest_f6) +#define OFFB_F7 offsetof(VexGuestRISCV64State, guest_f7) +#define OFFB_F8 offsetof(VexGuestRISCV64State, guest_f8) +#define OFFB_F9 offsetof(VexGuestRISCV64State, guest_f9) +#define OFFB_F10 offsetof(VexGuestRISCV64State, guest_f10) +#define OFFB_F11 offsetof(VexGuestRISCV64State, guest_f11) +#define OFFB_F12 offsetof(VexGuestRISCV64State, guest_f12) +#define OFFB_F13 offsetof(VexGuestRISCV64State, guest_f13) +#define OFFB_F14 offsetof(VexGuestRISCV64State, guest_f14) +#define OFFB_F15 offsetof(VexGuestRISCV64State, guest_f15) +#define OFFB_F16 offsetof(VexGuestRISCV64State, guest_f16) +#define OFFB_F17 offsetof(VexGuestRISCV64State, guest_f17) +#define OFFB_F18 offsetof(VexGuestRISCV64State, guest_f18) +#define OFFB_F19 offsetof(VexGuestRISCV64State, guest_f19) +#define OFFB_F20 offsetof(VexGuestRISCV64State, guest_f20) +#define OFFB_F21 offsetof(VexGuestRISCV64State, guest_f21) +#define OFFB_F22 offsetof(VexGuestRISCV64State, guest_f22) +#define OFFB_F23 offsetof(VexGuestRISCV64State, guest_f23) +#define OFFB_F24 offsetof(VexGuestRISCV64State, guest_f24) +#define OFFB_F25 offsetof(VexGuestRISCV64State, guest_f25) +#define OFFB_F26 offsetof(VexGuestRISCV64State, guest_f26) +#define OFFB_F27 offsetof(VexGuestRISCV64State, guest_f27) +#define OFFB_F28 offsetof(VexGuestRISCV64State, guest_f28) +#define OFFB_F29 offsetof(VexGuestRISCV64State, guest_f29) +#define OFFB_F30 offsetof(VexGuestRISCV64State, guest_f30) +#define OFFB_F31 offsetof(VexGuestRISCV64State, guest_f31) +#define OFFB_FCSR offsetof(VexGuestRISCV64State, guest_fcsr) + +#define OFFB_EMNOTE offsetof(VexGuestRISCV64State, guest_EMNOTE) +#define OFFB_CMSTART offsetof(VexGuestRISCV64State, guest_CMSTART) +#define OFFB_CMLEN offsetof(VexGuestRISCV64State, guest_CMLEN) +#define OFFB_NRADDR offsetof(VexGuestRISCV64State, guest_NRADDR) + +#define OFFB_LLSC_SIZE offsetof(VexGuestRISCV64State, guest_LLSC_SIZE) +#define OFFB_LLSC_ADDR offsetof(VexGuestRISCV64State, guest_LLSC_ADDR) +#define OFFB_LLSC_DATA offsetof(VexGuestRISCV64State, guest_LLSC_DATA) + +/*------------------------------------------------------------*/ +/*--- Integer registers ---*/ +/*------------------------------------------------------------*/ + +static Int offsetIReg64(UInt iregNo) +{ + switch (iregNo) { + case 0: + return OFFB_X0; + case 1: + return OFFB_X1; + case 2: + return OFFB_X2; + case 3: + return OFFB_X3; + case 4: + return OFFB_X4; + case 5: + return OFFB_X5; + case 6: + return OFFB_X6; + case 7: + return OFFB_X7; + case 8: + return OFFB_X8; + case 9: + return OFFB_X9; + case 10: + return OFFB_X10; + case 11: + return OFFB_X11; + case 12: + return OFFB_X12; + case 13: + return OFFB_X13; + case 14: + return OFFB_X14; + case 15: + return OFFB_X15; + case 16: + return OFFB_X16; + case 17: + return OFFB_X17; + case 18: + return OFFB_X18; + case 19: + return OFFB_X19; + case 20: + return OFFB_X20; + case 21: + return OFFB_X21; + case 22: + return OFFB_X22; + case 23: + return OFFB_X23; + case 24: + return OFFB_X24; + case 25: + return OFFB_X25; + case 26: + return OFFB_X26; + case 27: + return OFFB_X27; + case 28: + return OFFB_X28; + case 29: + return OFFB_X29; + case 30: + return OFFB_X30; + case 31: + return OFFB_X31; + default: + vassert(0); + } +} + +/* Obtain ABI name of a register. */ +static const HChar* nameIReg(UInt iregNo) +{ + vassert(iregNo < 32); + static const HChar* names[32] = { + "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "s0", "s1", "a0", + "a1", "a2", "a3", "a4", "a5", "a6", "a7", "s2", "s3", "s4", "s5", + "s6", "s7", "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6"}; + return names[iregNo]; +} + +/* Read a 64-bit value from a guest integer register. */ +static IRExpr* getIReg64(UInt iregNo) +{ + vassert(iregNo < 32); + return IRExpr_Get(offsetIReg64(iregNo), Ity_I64); +} + +/* Write a 64-bit value into a guest integer register. */ +static void putIReg64(/*OUT*/ IRSB* irsb, UInt iregNo, /*IN*/ IRExpr* e) +{ + vassert(iregNo > 0 && iregNo < 32); + vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I64); + stmt(irsb, IRStmt_Put(offsetIReg64(iregNo), e)); +} + +/* Read a 32-bit value from a guest integer register. */ +static IRExpr* getIReg32(UInt iregNo) +{ + vassert(iregNo < 32); + return unop(Iop_64to32, IRExpr_Get(offsetIReg64(iregNo), Ity_I64)); +} + +/* Write a 32-bit value into a guest integer register. */ +static void putIReg32(/*OUT*/ IRSB* irsb, UInt iregNo, /*IN*/ IRExpr* e) +{ + vassert(iregNo > 0 && iregNo < 32); + vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I32); + stmt(irsb, IRStmt_Put(offsetIReg64(iregNo), unop(Iop_32Sto64, e))); +} + +/* Write an address into the guest pc. */ +static void putPC(/*OUT*/ IRSB* irsb, /*IN*/ IRExpr* e) +{ + vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I64); + stmt(irsb, IRStmt_Put(OFFB_PC, e)); +} + +/*------------------------------------------------------------*/ +/*--- Floating-point registers ---*/ +/*------------------------------------------------------------*/ + +static Int offsetFReg(UInt fregNo) +{ + switch (fregNo) { + case 0: + return OFFB_F0; + case 1: + return OFFB_F1; + case 2: + return OFFB_F2; + case 3: + return OFFB_F3; + case 4: + return OFFB_F4; + case 5: + return OFFB_F5; + case 6: + return OFFB_F6; + case 7: + return OFFB_F7; + case 8: + return OFFB_F8; + case 9: + return OFFB_F9; + case 10: + return OFFB_F10; + case 11: + return OFFB_F11; + case 12: + return OFFB_F12; + case 13: + return OFFB_F13; + case 14: + return OFFB_F14; + case 15: + return OFFB_F15; + case 16: + return OFFB_F16; + case 17: + return OFFB_F17; + case 18: + return OFFB_F18; + case 19: + return OFFB_F19; + case 20: + return OFFB_F20; + case 21: + return OFFB_F21; + case 22: + return OFFB_F22; + case 23: + return OFFB_F23; + case 24: + return OFFB_F24; + case 25: + return OFFB_F25; + case 26: + return OFFB_F26; + case 27: + return OFFB_F27; + case 28: + return OFFB_F28; + case 29: + return OFFB_F29; + case 30: + return OFFB_F30; + case 31: + return OFFB_F31; + default: + vassert(0); + } +} + +/* Obtain ABI name of a register. */ +static const HChar* nameFReg(UInt fregNo) +{ + vassert(fregNo < 32); + static const HChar* names[32] = { + "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", + "fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", + "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7", + "fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11"}; + return names[fregNo]; +} + +/* Read a 64-bit value from a guest floating-point register. */ +static IRExpr* getFReg64(UInt fregNo) +{ + vassert(fregNo < 32); + return IRExpr_Get(offsetFReg(fregNo), Ity_F64); +} + +/* Write a 64-bit value into a guest floating-point register. */ +static void putFReg64(/*OUT*/ IRSB* irsb, UInt fregNo, /*IN*/ IRExpr* e) +{ + vassert(fregNo < 32); + vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_F64); + stmt(irsb, IRStmt_Put(offsetFReg(fregNo), e)); +} + +/* Read a 32-bit value from a guest floating-point register. */ +static IRExpr* getFReg32(UInt fregNo) +{ + vassert(fregNo < 32); + /* Note that the following access depends on the host being little-endian + which is checked in disInstr_RISCV64(). */ + /* TODO Check that the value is correctly NaN-boxed. If not then return + the 32-bit canonical qNaN, as mandated by the RISC-V ISA. */ + return IRExpr_Get(offsetFReg(fregNo), Ity_F32); +} + +/* Write a 32-bit value into a guest floating-point register. */ +static void putFReg32(/*OUT*/ IRSB* irsb, UInt fregNo, /*IN*/ IRExpr* e) +{ + vassert(fregNo < 32); + vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_F32); + /* Note that the following access depends on the host being little-endian + which is checked in disInstr_RISCV64(). */ + Int offset = offsetFReg(fregNo); + stmt(irsb, IRStmt_Put(offset, e)); + /* Write 1's in the upper bits of the target 64-bit register to create + a NaN-boxed value, as mandated by the RISC-V ISA. */ + stmt(irsb, IRStmt_Put(offset + 4, mkU32(0xffffffff))); + /* TODO Check that this works with Memcheck. */ +} + +/* Read a 32-bit value from the fcsr. */ +static IRExpr* getFCSR(void) { return IRExpr_Get(OFFB_FCSR, Ity_I32); } + +/* Write a 32-bit value into the fcsr. */ +static void putFCSR(/*OUT*/ IRSB* irsb, /*IN*/ IRExpr* e) +{ + vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I32); + stmt(irsb, IRStmt_Put(OFFB_FCSR, e)); +} + +/* Accumulate exception flags in fcsr. */ +static void accumulateFFLAGS(/*OUT*/ IRSB* irsb, /*IN*/ IRExpr* e) +{ + vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I32); + putFCSR(irsb, binop(Iop_Or32, getFCSR(), binop(Iop_And32, e, mkU32(0x1f)))); +} + +/* Generate IR to get hold of the rounding mode in both RISC-V and IR + formats. A floating-point operation can use either a static rounding mode + encoded in the instruction, or a dynamic rounding mode held in fcsr. Bind the + final result to the passed temporaries (which are allocated by the function). + */ +static void mk_get_rounding_mode(/*MOD*/ IRSB* irsb, + /*OUT*/ IRTemp* rm_RISCV, + /*OUT*/ IRTemp* rm_IR, + UInt inst_rm_RISCV) +{ + /* + rounding mode | RISC-V | IR + -------------------------------------------- + to nearest, ties to even | 000 | 0000 + to zero | 001 | 0011 + to +infinity | 010 | 0010 + to -infinity | 011 | 0001 + to nearest, ties away from 0 | 100 | 0100 + invalid | 101 | 1000 + invalid | 110 | 1000 + dynamic | 111 | 1000 + + The 'dynamic' value selects the mode from fcsr. Its value is valid when + encoded in the instruction but naturally invalid when found in fcsr. + + Static mode is known at the decode time and can be directly expressed by + a respective rounding mode IR constant. + + Dynamic mode requires a runtime mapping from the RISC-V to the IR mode. + It can be implemented using the following transformation: + t0 = fcsr_rm_RISCV - 20 + t1 = t0 >> 2 + t2 = fcsr_rm_RISCV + 3 + t3 = t2 ^ 3 + rm_IR = t1 & t3 + */ + *rm_RISCV = newTemp(irsb, Ity_I32); + *rm_IR = newTemp(irsb, Ity_I32); + switch (inst_rm_RISCV) { + case 0b000: + assign(irsb, *rm_RISCV, mkU32(0)); + assign(irsb, *rm_IR, mkU32(Irrm_NEAREST)); + break; + case 0b001: + assign(irsb, *rm_RISCV, mkU32(1)); + assign(irsb, *rm_IR, mkU32(Irrm_ZERO)); + break; + case 0b010: + assign(irsb, *rm_RISCV, mkU32(2)); + assign(irsb, *rm_IR, mkU32(Irrm_PosINF)); + break; + case 0b011: + assign(irsb, *rm_RISCV, mkU32(3)); + assign(irsb, *rm_IR, mkU32(Irrm_NegINF)); + break; + case 0b100: + assign(irsb, *rm_RISCV, mkU32(4)); + assign(irsb, *rm_IR, mkU32(Irrm_NEAREST_TIE_AWAY_0)); + break; + case 0b101: + assign(irsb, *rm_RISCV, mkU32(5)); + assign(irsb, *rm_IR, mkU32(Irrm_INVALID)); + break; + case 0b110: + assign(irsb, *rm_RISCV, mkU32(6)); + assign(irsb, *rm_IR, mkU32(Irrm_INVALID)); + break; + case 0b111: { + assign(irsb, *rm_RISCV, + binop(Iop_And32, binop(Iop_Shr32, getFCSR(), mkU8(5)), mkU32(7))); + IRTemp t0 = newTemp(irsb, Ity_I32); + assign(irsb, t0, binop(Iop_Sub32, mkexpr(*rm_RISCV), mkU32(20))); + IRTemp t1 = newTemp(irsb, Ity_I32); + assign(irsb, t1, binop(Iop_Shr32, mkexpr(t0), mkU8(2))); + IRTemp t2 = newTemp(irsb, Ity_I32); + assign(irsb, t2, binop(Iop_Add32, mkexpr(*rm_RISCV), mkU32(3))); + IRTemp t3 = newTemp(irsb, Ity_I32); + assign(irsb, t3, binop(Iop_Xor32, mkexpr(t2), mkU32(3))); + assign(irsb, *rm_IR, binop(Iop_And32, mkexpr(t1), mkexpr(t3))); + break; + } + default: + vassert(0); + } +} + +/*------------------------------------------------------------*/ +/*--- Name helpers ---*/ +/*------------------------------------------------------------*/ + +/* Obtain an acquire/release atomic-instruction suffix. */ +static const HChar* nameAqRlSuffix(UInt aqrl) +{ + switch (aqrl) { + case 0b00: + return ""; + case 0b01: + return ".rl"; + case 0b10: + return ".aq"; + case 0b11: + return ".aqrl"; + default: + vpanic("nameAqRlSuffix(riscv64)"); + } +} + +/* Obtain a control/status register name. */ +static const HChar* nameCSR(UInt csr) +{ + switch (csr) { + case 0x001: + return "fflags"; + case 0x002: + return "frm"; + case 0x003: + return "fcsr"; + default: + vpanic("nameCSR(riscv64)"); + } +} + +/* Obtain a floating-point rounding-mode operand string. */ +static const HChar* nameRMOperand(UInt rm) +{ + switch (rm) { + case 0b000: + return ", rne"; + case 0b001: + return ", rtz"; + case 0b010: + return ", rdn"; + case 0b011: + return ", rup"; + case 0b100: + return ", rmm"; + case 0b101: + return ", <invalid>"; + case 0b110: + return ", <invalid>"; + case 0b111: + return ""; /* dyn */ + default: + vpanic("nameRMOperand(riscv64)"); + } +} + +/*------------------------------------------------------------*/ +/*--- Disassemble a single instruction ---*/ +/*------------------------------------------------------------*/ + +/* A macro to fish bits out of 'insn' which is a local variable to all + disassembly functions. */ +#define INSN(_bMax, _bMin) SLICE_UInt(insn, (_bMax), (_bMin)) + +static Bool dis_RV64C(/*MB_OUT*/ DisResult* dres, + /*OUT*/ IRSB* irsb, + UInt insn, + Addr guest_pc_curr_instr, + Bool sigill_diag) +{ + vassert(INSN(1, 0) == 0b00 || INSN(1, 0) == 0b01 || INSN(1, 0) == 0b10); + + /* ---- RV64C compressed instruction set, quadrant 0 ----- */ + + /* ------------- c.addi4spn rd, nzuimm[9:2] -------------- */ + if (INSN(1, 0) == 0b00 && INSN(15, 13) == 0b000) { + UInt rd = INSN(4, 2) + 8; + UInt nzuimm9_2 = + INSN(10, 7) << 4 | INSN(12, 11) << 2 | INSN(5, 5) << 1 | INSN(6, 6); + if (nzuimm9_2 == 0) { + /* Invalid C.ADDI4SPN, fall through. */ + } else { + ULong uimm = nzuimm9_2 << 2; + putIReg64(irsb, rd, + binop(Iop_Add64, getIReg64(2 /*x2/sp*/), mkU64(uimm))); + DIP("c.addi4spn %s, %llu\n", nameIReg(rd), uimm); + return True; + } + } + + /* -------------- c.fld rd, uimm[7:3](rs1) --------------- */ + if (INSN(1, 0) == 0b00 && INSN(15, 13) == 0b001) { + UInt rd = INSN(4, 2) + 8; + UInt rs1 = INSN(9, 7) + 8; + UInt uimm7_3 = INSN(6, 5) << 3 | INSN(12, 10); + ULong uimm = uimm7_3 << 3; + putFReg64(irsb, rd, + loadLE(Ity_F64, binop(Iop_Add64, getIReg64(rs1), mkU64(uimm)))); + DIP("c.fld %s, %llu(%s)\n", nameFReg(rd), uimm, nameIReg(rs1)); + return True; + } + + /* --------------- c.lw rd, uimm[6:2](rs1) --------------- */ + if (INSN(1, 0) == 0b00 && INSN(15, 13) == 0b010) { + UInt rd = INSN(4, 2) + 8; + UInt rs1 = INSN(9, 7) + 8; + UInt uimm6_2 = INSN(5, 5) << 4 | INSN(12, 10) << 1 | INSN(6, 6); + ULong uimm = uimm6_2 << 2; + putIReg64( + irsb, rd, + unop(Iop_32Sto64, + loadLE(Ity_I32, binop(Iop_Add64, getIReg64(rs1), mkU64(uimm))))); + DIP("c.lw %s, %llu(%s)\n", nameIReg(rd), uimm, nameIReg(rs1)); + return True; + } + + /* --------------- c.ld rd, uimm[7:3](rs1) --------------- */ + if (INSN(1, 0) == 0b00 && INSN(15, 13) == 0b011) { + UInt rd = INSN(4, 2) + 8; + UInt rs1 = INSN(9, 7) + 8; + UInt uimm7_3 = INSN(6, 5) << 3 | INSN(12, 10); + ULong uimm = uimm7_3 << 3; + putIReg64(irsb, rd, + loadLE(Ity_I64, binop(Iop_Add64, getIReg64(rs1), mkU64(uimm)))); + DIP("c.ld %s, %llu(%s)\n", nameIReg(rd), uimm, nameIReg(rs1)); + return True; + } + + /* -------------- c.fsd rs2, uimm[7:3](rs1) -------------- */ + if (INSN(1, 0) == 0b00 && INSN(15, 13) == 0b101) { + UInt rs1 = INSN(9, 7) + 8; + UInt rs2 = INSN(4, 2) + 8; + UInt uimm7_3 = INSN(6, 5) << 3 | INSN(12, 10); + ULong uimm = uimm7_3 << 3; + storeLE(irsb, binop(Iop_Add64, getIReg64(rs1), mkU64(uimm)), + getFReg64(rs2)); + DIP("c.fsd %s, %llu(%s)\n", nameFReg(rs2), uimm, nameIReg(rs1)); + return True; + } + + /* -------------- c.sw rs2, uimm[6:2](rs1) --------------- */ + if (INSN(1, 0) == 0b00 && INSN(15, 13) == 0b110) { + UInt rs1 = INSN(9, 7) + 8; + UInt rs2 = INSN(4, 2) + 8; + UInt uimm6_2 = INSN(5, 5) << 4 | INSN(12, 10) << 1 | INSN(6, 6); + ULong uimm = uimm6_2 << 2; + storeLE(irsb, binop(Iop_Add64, getIReg64(rs1), mkU64(uimm)), + unop(Iop_64to32, getIReg64(rs2))); + DIP("c.sw %s, %llu(%s)\n", nameIReg(rs2), uimm, nameIReg(rs1)); + return True; + } + + /* -------------- c.sd rs2, uimm[7:3](rs1) --------------- */ + if (INSN(1, 0) == 0b00 && INSN(15, 13) == 0b111) { + UInt rs1 = INSN(9, 7) + 8; + UInt rs2 = INSN(4, 2) + 8; + UInt uimm7_3 = INSN(6, 5) << 3 | INSN(12, 10); + ULong uimm = uimm7_3 << 3; + storeLE(irsb, binop(Iop_Add64, getIReg64(rs1), mkU64(uimm)), + getIReg64(rs2)); + DIP("c.sd %s, %llu(%s)\n", nameIReg(rs2), uimm, nameIReg(rs1)); + return True; + } + + /* ---- RV64C compressed instruction set, quadrant 1 ----- */ + + /* ------------------------ c.nop ------------------------ */ + if (INSN(15, 0) == 0b0000000000000001) { + DIP("c.nop\n"); + return True; + } + + /* -------------- c.addi rd_rs1, nzimm[5:0] -------------- */ + if (INSN(1, 0) == 0b01 && INSN(15, 13) == 0b000) { + UInt rd_rs1 = INSN(11, 7); + UInt nzimm5_0 = INSN(12, 12) << 5 | INSN(6, 2); + if (rd_rs1 == 0 || nzimm5_0 == 0) { + /* Invalid C.ADDI, fall through. */ + } else { + ULong simm = vex_sx_to_64(nzimm5_0, 6); + putIReg64(irsb, rd_rs1, + binop(Iop_Add64, getIReg64(rd_rs1), mkU64(simm))); + DIP("c.addi %s, %lld\n", nameIReg(rd_rs1), (Long)simm); + return True; + } + } + + /* -------------- c.addiw rd_rs1, imm[5:0] --------------- */ + if (INSN(1, 0) == 0b01 && INSN(15, 13) == 0b001) { + UInt rd_rs1 = INSN(11, 7); + UInt imm5_0 = INSN(12, 12) << 5 | INSN(6, 2); + if (rd_rs1 == 0) { + /* Invalid C.ADDIW, fall through. */ + } else { + UInt simm = (UInt)vex_sx_to_64(imm5_0, 6); + putIReg32(irsb, rd_rs1, + binop(Iop_Add32, getIReg32(rd_rs1), mkU32(simm))); + DIP("c.addiw %s, %d\n", nameIReg(rd_rs1), (Int)simm); + return True; + } + } + + /* ------------------ c.li rd, imm[5:0] ------------------ */ + if (INSN(1, 0) == 0b01 && INSN(15, 13) == 0b010) { + UInt rd = INSN(11, 7); + UInt imm5_0 = INSN(12, 12) << 5 | INSN(6, 2); + if (rd == 0) { + /* Invalid C.LI, fall through. */ + } else { + ULong simm = vex_sx_to_64(imm5_0, 6); + putIReg64(irsb, rd, mkU64(simm)); + DIP("c.li %s, %lld\n", nameIReg(rd), (Long)simm); + return True; + } + } + + /* ---------------- c.addi16sp nzimm[9:4] ---------------- */ + if (INSN(1, 0) == 0b01 && INSN(15, 13) == 0b011) { + UInt rd_rs1 = INSN(11, 7); + UInt nzimm9_4 = INSN(12, 12) << 5 | INSN(4, 3) << 3 | INSN(5, 5) << 2 | + INSN(2, 2) << 1 | INSN(6, 6); + if (rd_rs1 != 2 || nzimm9_4 == 0) { + /* Invalid C.ADDI16SP, fall through. */ + } else { + ULong simm = vex_sx_to_64(nzimm9_4 << 4, 10); + putIReg64(irsb, rd_rs1, + binop(Iop_Add64, getIReg64(rd_rs1), mkU64(simm))); + DIP("c.addi16sp %lld\n", (Long)sim... [truncated message content] |