diff --git a/src/main/scala/xiangshan/XSCore.scala b/src/main/scala/xiangshan/XSCore.scala index 0588de4dc66..e28eb467d52 100644 --- a/src/main/scala/xiangshan/XSCore.scala +++ b/src/main/scala/xiangshan/XSCore.scala @@ -210,8 +210,9 @@ trait HasXSParameter { ) val dcacheParameters = DCacheParameters( - tagECC = Some("none"), - dataECC = Some("none"), + tagECC = Some("secded"), + dataECC = Some("secded"), + replacer = Some("setplru"), nMissEntries = 16, nProbeEntries = 16, nReleaseEntries = 16, diff --git a/src/main/scala/xiangshan/backend/issue/ReservationStation.scala b/src/main/scala/xiangshan/backend/issue/ReservationStation.scala index b2dbf09737b..1836430f7fa 100644 --- a/src/main/scala/xiangshan/backend/issue/ReservationStation.scala +++ b/src/main/scala/xiangshan/backend/issue/ReservationStation.scala @@ -212,7 +212,7 @@ class ReservationStationSelect val fastPortsCnt = fastPortsCfg.size val slowPortsCnt = slowPortsCfg.size require(nonBlocked==fastWakeup) - val replayDelay = VecInit(Seq(5, 10, 25, 25).map(_.U(5.W))) + val replayDelay = VecInit(Seq(5, 10, 40, 40).map(_.U(6.W))) val io = IO(new Bundle { val redirect = Flipped(ValidIO(new Redirect)) diff --git a/src/main/scala/xiangshan/cache/DCache.scala b/src/main/scala/xiangshan/cache/DCache.scala index 8cc5e72d782..554ab610829 100644 --- a/src/main/scala/xiangshan/cache/DCache.scala +++ b/src/main/scala/xiangshan/cache/DCache.scala @@ -3,7 +3,7 @@ package xiangshan.cache import chisel3._ import chisel3.util._ import freechips.rocketchip.tilelink.{ClientMetadata, TLClientParameters, TLEdgeOut} -import utils.{Code, RandomReplacement, XSDebug, SRAMTemplate} +import utils.{Code, ReplacementPolicy, XSDebug, SRAMTemplate, ParallelOR} import scala.math.max @@ -15,10 +15,11 @@ case class DCacheParameters ( nSets: Int = 64, nWays: Int = 8, - rowBits: Int = 64, + rowBits: Int = 128, nTLBEntries: Int = 32, tagECC: Option[String] = None, dataECC: Option[String] = None, + replacer: Option[String] = Some("random"), nMissEntries: Int = 1, nProbeEntries: Int = 1, nReleaseEntries: Int = 1, @@ -30,8 +31,7 @@ case class DCacheParameters def tagCode: Code = Code.fromString(tagECC) def dataCode: Code = Code.fromString(dataECC) - - def replacement = new RandomReplacement(nWays) + def replacement = ReplacementPolicy.fromString(replacer, nWays, nSets) } trait HasDCacheParameters extends HasL1CacheParameters { @@ -60,7 +60,7 @@ trait HasDCacheParameters extends HasL1CacheParameters { require(full_divide(beatBits, rowBits), s"beatBits($beatBits) must be multiple of rowBits($rowBits)") // this is a VIPT L1 cache require(pgIdxBits >= untagBits, s"page aliasing problem: pgIdxBits($pgIdxBits) < untagBits($untagBits)") - require(rowWords == 1, "Our DCache Implementation assumes rowWords == 1") + // require(rowWords == 1, "Our DCache Implementation assumes rowWords == 1") } abstract class DCacheModule extends L1CacheModule @@ -103,15 +103,20 @@ class L1DataReadReq extends DCacheBundle { // Now, we can write a cache-block in a single cycle class L1DataWriteReq extends L1DataReadReq { - val wmask = Vec(blockRows, Bits(rowWords.W)) - val data = Vec(blockRows, Bits(encRowBits.W)) + val wmask = Bits(blockRows.W) + val data = Vec(blockRows, Bits(rowBits.W)) +} + +class ReplacementAccessBundle extends DCacheBundle { + val set = UInt(log2Up(nSets).W) + val way = UInt(log2Up(nWays).W) } -abstract class AbstractDataArray extends DCacheModule { +abstract class TransposeAbstractDataArray extends DCacheModule { val io = IO(new DCacheBundle { val read = Vec(LoadPipelineWidth, Flipped(DecoupledIO(new L1DataReadReq))) val write = Flipped(DecoupledIO(new L1DataWriteReq)) - val resp = Output(Vec(LoadPipelineWidth, Vec(nWays, Vec(blockRows, Bits(encRowBits.W))))) + val resp = Output(Vec(LoadPipelineWidth, Vec(blockRows, Bits(encRowBits.W)))) val nacks = Output(Vec(LoadPipelineWidth, Bool())) }) @@ -141,10 +146,8 @@ abstract class AbstractDataArray extends DCacheModule { def dumpResp() = { (0 until LoadPipelineWidth) map { w => XSDebug(s"DataArray ReadResp channel: $w\n") - (0 until nWays) map { i => - (0 until blockRows) map { r => - XSDebug(s"way: $i cycle: $r data: %x\n", io.resp(w)(i)(r)) - } + (0 until blockRows) map { r => + XSDebug(s"cycle: $r data: %x\n", io.resp(w)(r)) } } } @@ -165,61 +168,131 @@ abstract class AbstractDataArray extends DCacheModule { } } -class DuplicatedDataArray extends AbstractDataArray -{ +class TransposeDuplicatedDataArray extends TransposeAbstractDataArray { val singlePort = true - // write is always ready - io.write.ready := true.B + val readHighPriority = true + def eccBits = encWordBits - wordBits + + def getECCFromEncWord(encWord: UInt) = { + require(encWord.getWidth == encWordBits) + encWord(encWordBits - 1, wordBits) + } + + def getECCFromRow(row: UInt) = { + require(row.getWidth == rowBits) + VecInit((0 until rowWords).map { w => + val word = row(wordBits * (w + 1) - 1, wordBits * w) + getECCFromEncWord(cacheParams.dataCode.encode(word)) + }) + } + val waddr = (io.write.bits.addr >> blockOffBits).asUInt() + val raddrs = io.read.map(r => (r.bits.addr >> blockOffBits).asUInt) + io.write.ready := (if (readHighPriority) { + if (singlePort) { + !VecInit(io.read.map(_.valid)).asUInt.orR + } else { + !(Cat(io.read.zipWithIndex.map { case (r, i) => r.valid && raddrs(i) === waddr }).orR) + } + } else { + true.B + }) + for (j <- 0 until LoadPipelineWidth) { - val raddr = (io.read(j).bits.addr >> blockOffBits).asUInt() + val raddr = raddrs(j) + val rmask = io.read(j).bits.rmask // for single port SRAM, do not allow read and write in the same cycle // for dual port SRAM, raddr === waddr is undefined behavior val rwhazard = if(singlePort) io.write.valid else io.write.valid && waddr === raddr - io.read(j).ready := !rwhazard - - for (w <- 0 until nWays) { - for (r <- 0 until blockRows) { - val resp = Seq.fill(rowWords)(Wire(Bits(encWordBits.W))) - io.resp(j)(w)(r) := Cat((0 until rowWords).reverse map (k => resp(k))) - - for (k <- 0 until rowWords) { - val array = Module(new SRAMTemplate( - Bits(encWordBits.W), - set=nSets, - way=1, - shouldReset=false, - holdRead=false, - singlePort=singlePort - )) - // data write - val wen = io.write.valid && io.write.bits.way_en(w) && io.write.bits.wmask(r)(k) - array.io.w.req.valid := wen - array.io.w.req.bits.apply( - setIdx=waddr, - data=io.write.bits.data(r)(encWordBits*(k+1)-1,encWordBits*k), - waymask=1.U - ) - - // data read - val ren = io.read(j).valid && io.read(j).bits.way_en(w) && io.read(j).bits.rmask(r) - array.io.r.req.valid := ren - array.io.r.req.bits.apply(setIdx=raddr) - resp(k) := array.io.r.resp.data(0) + io.read(j).ready := (if (readHighPriority) true.B else !rwhazard) + + // use way_en to select a way after data read out + assert(!(RegNext(io.read(j).fire() && PopCount(io.read(j).bits.way_en) > 1.U))) + val way_en = RegNext(io.read(j).bits.way_en) + + for (r <- 0 until blockRows) { + val resp = Wire(Vec(rowWords, Vec(nWays, Bits(wordBits.W)))) + val resp_chosen = Wire(Vec(rowWords, Bits(wordBits.W))) + val ecc_resp = Wire(Vec(rowWords, Vec(nWays, Bits(eccBits.W)))) + val ecc_resp_chosen = Wire(Vec(rowWords, Bits(eccBits.W))) + + val ecc_array = Module(new SRAMTemplate( + Vec(rowWords, Bits(eccBits.W)), + set = nSets, + way = nWays, + shouldReset = false, + holdRead = false, + singlePort = singlePort + )) + + ecc_array.io.w.req.valid := io.write.valid && io.write.bits.wmask(r) + ecc_array.io.w.req.bits.apply( + setIdx = waddr, + data = getECCFromRow(io.write.bits.data(r)), + waymask = io.write.bits.way_en + ) + when (ecc_array.io.w.req.valid) { + XSDebug(p"write in ecc sram ${j.U} row ${r.U}: setIdx=${Hexadecimal(ecc_array.io.w.req.bits.setIdx)} ecc(0)=${Hexadecimal(getECCFromRow(io.write.bits.data(r))(0))} ecc(1)=${Hexadecimal(getECCFromRow(io.write.bits.data(r))(1))} waymask=${Hexadecimal(io.write.bits.way_en)}\n") + } + + ecc_array.io.r.req.valid := io.read(j).valid && rmask(r) + ecc_array.io.r.req.bits.apply(setIdx = raddr) + + for (w <- 0 until nWays) { + val data_array = Module(new SRAMTemplate( + Bits(rowBits.W), + set = nSets, + way = 1, + shouldReset = false, + holdRead = false, + singlePort = singlePort + )) + + // data write + val wen = io.write.valid && io.write.bits.way_en(w) && io.write.bits.wmask(r) + data_array.io.w.req.valid := wen + data_array.io.w.req.bits.apply( + setIdx = waddr, + data = io.write.bits.data(r), + waymask = 1.U + ) + when (wen) { + XSDebug(p"write in data sram ${j.U} row ${r.U} way ${w.U}: setIdx=${Hexadecimal(data_array.io.w.req.bits.setIdx)} data=${Hexadecimal(io.write.bits.data(r))}\n") } + + // data read + // read all ways and choose one after resp + val ren = io.read(j).valid && rmask(r) + data_array.io.r.req.valid := ren + data_array.io.r.req.bits.apply(setIdx = raddr) + (0 until rowWords).foreach(k => resp(k)(w) := data_array.io.r.resp.data(0)(wordBits * (k + 1) - 1, wordBits * k)) + (0 until rowWords).foreach(k => ecc_resp(k)(w) := ecc_array.io.r.resp.data(w)(k)) } + for (k <- 0 until rowWords) { + resp_chosen(k) := Mux1H(way_en, resp(k)) + ecc_resp_chosen(k) := Mux1H(way_en, ecc_resp(k)) + // assert(!RegNext(cacheParams.dataCode.decode(Cat(ecc_resp_chosen(k), resp_chosen(k))).uncorrectable && + // way_en.orR && + // RegNext(io.read(j).fire() && rmask(r)))) + } + io.resp(j)(r) := Cat((0 until rowWords).reverse map {k => Cat(ecc_resp_chosen(k), resp_chosen(k))})// resp_chosen.asUInt + } + io.nacks(j) := false.B } } class L1MetadataArray(onReset: () => L1Metadata) extends DCacheModule { val rstVal = onReset() + val metaBits = rstVal.getWidth + val encMetaBits = cacheParams.tagCode.width(metaBits) + val io = IO(new Bundle { val read = Flipped(Decoupled(new L1MetaReadReq)) val write = Flipped(Decoupled(new L1MetaWriteReq)) - val resp = Output(Vec(nWays, new L1Metadata)) + val resp = Output(Vec(nWays, UInt(encMetaBits.W))) }) val rst_cnt = RegInit(0.U(log2Up(nSets+1).W)) val rst = rst_cnt < nSets.U @@ -229,14 +302,11 @@ class L1MetadataArray(onReset: () => L1Metadata) extends DCacheModule { val rmask = Mux(rst || (nWays == 1).B, (-1).asSInt, io.read.bits.way_en.asSInt).asBools when (rst) { rst_cnt := rst_cnt + 1.U } - val metaBits = rstVal.getWidth - val encMetaBits = cacheParams.tagCode.width(metaBits) - val tag_array = Module(new SRAMTemplate(UInt(encMetaBits.W), set=nSets, way=nWays, shouldReset=false, holdRead=false, singlePort=true)) // tag write - val wen = rst || io.write.valid + val wen = rst || io.write.fire() tag_array.io.w.req.valid := wen tag_array.io.w.req.bits.apply( setIdx=waddr, @@ -244,13 +314,13 @@ class L1MetadataArray(onReset: () => L1Metadata) extends DCacheModule { waymask=VecInit(wmask).asUInt) // tag read - tag_array.io.r.req.valid := io.read.fire() + val ren = io.read.fire() + tag_array.io.r.req.valid := ren tag_array.io.r.req.bits.apply(setIdx=io.read.bits.idx) - io.resp := tag_array.io.r.resp.data.map(rdata => - cacheParams.tagCode.decode(rdata).corrected.asTypeOf(rstVal)) + io.resp := tag_array.io.r.resp.data - io.read.ready := !wen - io.write.ready := !rst + io.write.ready := !ren + io.read.ready := !rst def dumpRead() = { when (io.read.fire()) { @@ -266,35 +336,40 @@ class L1MetadataArray(onReset: () => L1Metadata) extends DCacheModule { } } - def dumpResp() = { - (0 until nWays) map { i => - XSDebug(s"MetaArray Resp: way: $i tag: %x coh: %x\n", - io.resp(i).tag, io.resp(i).coh.state) - } - } + // def dumpResp() = { + // (0 until nWays) map { i => + // XSDebug(s"MetaArray Resp: way: $i tag: %x coh: %x\n", + // io.resp(i).tag, io.resp(i).coh.state) + // } + // } def dump() = { dumpRead dumpWrite - dumpResp + // dumpResp } } class DuplicatedMetaArray extends DCacheModule { + def onReset = L1Metadata(0.U, ClientMetadata.onReset) + val metaBits = onReset.getWidth + val encMetaBits = cacheParams.tagCode.width(metaBits) + val io = IO(new DCacheBundle { val read = Vec(LoadPipelineWidth, Flipped(DecoupledIO(new L1MetaReadReq))) val write = Flipped(DecoupledIO(new L1MetaWriteReq)) - val resp = Output(Vec(LoadPipelineWidth, Vec(nWays, new L1Metadata))) + val resp = Output(Vec(LoadPipelineWidth, Vec(nWays, UInt(encMetaBits.W)))) }) - - def onReset = L1Metadata(0.U, ClientMetadata.onReset) val meta = Seq.fill(LoadPipelineWidth) { Module(new L1MetadataArray(onReset _)) } for (w <- 0 until LoadPipelineWidth) { - meta(w).io.write <> io.write + // meta(w).io.write <> io.write + meta(w).io.write.valid := io.write.valid + meta(w).io.write.bits := io.write.bits meta(w).io.read <> io.read(w) io.resp(w) <> meta(w).io.resp } + io.write.ready := VecInit(meta.map(_.io.write.ready)).asUInt.andR def dumpRead() = { (0 until LoadPipelineWidth) map { w => @@ -312,18 +387,18 @@ class DuplicatedMetaArray extends DCacheModule { } } - def dumpResp() = { - (0 until LoadPipelineWidth) map { w => - (0 until nWays) map { i => - XSDebug(s"MetaArray Resp: channel: $w way: $i tag: %x coh: %x\n", - io.resp(w)(i).tag, io.resp(w)(i).coh.state) - } - } - } + // def dumpResp() = { + // (0 until LoadPipelineWidth) map { w => + // (0 until nWays) map { i => + // XSDebug(s"MetaArray Resp: channel: $w way: $i tag: %x coh: %x\n", + // io.resp(w)(i).tag, io.resp(w)(i).coh.state) + // } + // } + // } def dump() = { dumpRead dumpWrite - dumpResp + // dumpResp } } diff --git a/src/main/scala/xiangshan/cache/DCacheWrapper.scala b/src/main/scala/xiangshan/cache/DCacheWrapper.scala index 3c9d9188422..44a2d9bce14 100644 --- a/src/main/scala/xiangshan/cache/DCacheWrapper.scala +++ b/src/main/scala/xiangshan/cache/DCacheWrapper.scala @@ -88,7 +88,6 @@ class DCacheLoadIO extends DCacheWordIO // cycle 0: virtual address: req.addr // cycle 1: physical address: s1_paddr val s1_paddr = Output(UInt(PAddrBits.W)) - val s1_data = Input(Vec(nWays, UInt(DataBits.W))) val s2_hit_way = Input(UInt(nWays.W)) } @@ -135,7 +134,7 @@ class DCacheImp(outer: DCache) extends LazyModuleImp(outer) with HasDCacheParame //---------------------------------------- // core data structures - val dataArray = Module(new DuplicatedDataArray) + val dataArray = Module(new TransposeDuplicatedDataArray) val metaArray = Module(new DuplicatedMetaArray) /* dataArray.dump() @@ -164,8 +163,8 @@ class DCacheImp(outer: DCache) extends LazyModuleImp(outer) with HasDCacheParame // MainPipe contend MetaRead with Load 0 // give priority to MainPipe val MetaReadPortCount = 2 - val MainPipeMetaReadPort = 0 - val LoadPipeMetaReadPort = 1 + val MainPipeMetaReadPort = 1 + val LoadPipeMetaReadPort = 0 val metaReadArb = Module(new Arbiter(new L1MetaReadReq, MetaReadPortCount)) @@ -191,8 +190,8 @@ class DCacheImp(outer: DCache) extends LazyModuleImp(outer) with HasDCacheParame // give priority to MainPipe val DataReadPortCount = 2 - val MainPipeDataReadPort = 0 - val LoadPipeDataReadPort = 1 + val MainPipeDataReadPort = 1 + val LoadPipeDataReadPort = 0 val dataReadArb = Module(new Arbiter(new L1DataReadReq, DataReadPortCount)) @@ -278,7 +277,7 @@ class DCacheImp(outer: DCache) extends LazyModuleImp(outer) with HasDCacheParame val mainPipeReq_fire = mainPipeReq_valid && mainPipe.io.req.ready val mainPipeReq_req = RegEnable(mainPipeReqArb.io.out.bits, mainPipeReqArb.io.out.fire()) - mainPipeReqArb.io.out.ready := mainPipe.io.req.ready + mainPipeReqArb.io.out.ready := mainPipeReq_fire || !mainPipeReq_valid mainPipe.io.req.valid := mainPipeReq_valid mainPipe.io.req.bits := mainPipeReq_req @@ -314,6 +313,18 @@ class DCacheImp(outer: DCache) extends LazyModuleImp(outer) with HasDCacheParame assert (!bus.d.fire()) } + //---------------------------------------- + // update replacement policy + val replacer = cacheParams.replacement + val access_bundles = ldu.map(_.io.replace_access) ++ Seq(mainPipe.io.replace_access) + val sets = access_bundles.map(_.bits.set) + val touch_ways = Seq.fill(LoadPipelineWidth + 1)(Wire(ValidIO(UInt(log2Up(nWays).W)))) + (touch_ways zip access_bundles).map{ case (w, access) => + w.valid := access.valid + w.bits := access.bits.way + } + replacer.access(sets, touch_ways) + // dcache should only deal with DRAM addresses when (bus.a.fire()) { diff --git a/src/main/scala/xiangshan/cache/LoadPipe.scala b/src/main/scala/xiangshan/cache/LoadPipe.scala index 78e2f6eb247..c14a1767ebd 100644 --- a/src/main/scala/xiangshan/cache/LoadPipe.scala +++ b/src/main/scala/xiangshan/cache/LoadPipe.scala @@ -6,74 +6,84 @@ import freechips.rocketchip.tilelink.ClientMetadata import utils.XSDebug -class LoadPipe extends DCacheModule -{ - val io = IO(new DCacheBundle{ +class LoadPipe extends DCacheModule { + def metaBits = (new L1Metadata).getWidth + def encMetaBits = cacheParams.tagCode.width(metaBits) + def getMeta(encMeta: UInt): UInt = { + require(encMeta.getWidth == encMetaBits) + encMeta(metaBits - 1, 0) + } + + val io = IO(new DCacheBundle { // incoming requests - val lsu = Flipped(new DCacheLoadIO) + val lsu = Flipped(new DCacheLoadIO) // req got nacked in stage 0? val nack = Input(Bool()) // meta and data array read port val data_read = DecoupledIO(new L1DataReadReq) - val data_resp = Input(Vec(nWays, Vec(blockRows, Bits(encRowBits.W)))) + val data_resp = Input(Vec(blockRows, Bits(encRowBits.W))) val meta_read = DecoupledIO(new L1MetaReadReq) - val meta_resp = Input(Vec(nWays, new L1Metadata)) + val meta_resp = Input(Vec(nWays, UInt(encMetaBits.W))) // send miss request to miss queue val miss_req = DecoupledIO(new MissReq) + + // update state vec in replacement algo + val replace_access = ValidIO(new ReplacementAccessBundle) }) + val s1_ready = Wire(Bool()) + val s2_ready = Wire(Bool()) // LSU requests // it you got nacked, you can directly passdown - val not_nacked_ready = io.meta_read.ready && io.data_read.ready + val not_nacked_ready = io.meta_read.ready && s1_ready val nacked_ready = true.B // ready can wait for valid - io.lsu.req.ready := io.lsu.req.valid && ((!io.nack && not_nacked_ready) || (io.nack && nacked_ready)) - io.meta_read.valid := io.lsu.req.valid && !io.nack - io.data_read.valid := io.lsu.req.valid && !io.nack + io.lsu.req.ready := (!io.nack && not_nacked_ready) || (io.nack && nacked_ready) + io.meta_read.valid := io.lsu.req.fire() && !io.nack val meta_read = io.meta_read.bits - val data_read = io.data_read.bits // Tag read for new requests - meta_read.idx := get_idx(io.lsu.req.bits.addr) + meta_read.idx := get_idx(io.lsu.req.bits.addr) meta_read.way_en := ~0.U(nWays.W) - meta_read.tag := DontCare - // Data read for new requests - data_read.addr := io.lsu.req.bits.addr - data_read.way_en := ~0.U(nWays.W) - // only needs to read the specific row - data_read.rmask := UIntToOH(get_row(io.lsu.req.bits.addr)) + meta_read.tag := DontCare // Pipeline // -------------------------------------------------------------------------------- // stage 0 val s0_valid = io.lsu.req.fire() val s0_req = io.lsu.req.bits + val s0_fire = s0_valid && s1_ready - assert(!(s0_valid && s0_req.cmd =/= MemoryOpConstants.M_XRD), "LoadPipe only accepts load req") + assert(RegNext(!(s0_valid && s0_req.cmd =/= MemoryOpConstants.M_XRD)), "LoadPipe only accepts load req") dump_pipeline_reqs("LoadPipe s0", s0_valid, s0_req) - // -------------------------------------------------------------------------------- // stage 1 - val s1_req = RegNext(s0_req) - val s1_valid = RegNext(s0_valid, init = false.B) + val s1_valid = RegInit(false.B) + val s1_req = RegEnable(s0_req, s0_fire) // in stage 1, load unit gets the physical address val s1_addr = io.lsu.s1_paddr val s1_nack = RegNext(io.nack) + val s1_fire = s1_valid && s2_ready + s1_ready := !s1_valid || s1_fire + + when (s0_fire) { s1_valid := true.B } + .elsewhen (s1_fire) { s1_valid := false.B } dump_pipeline_reqs("LoadPipe s1", s1_valid, s1_req) // tag check - val meta_resp = io.meta_resp + val meta_resp = VecInit(io.meta_resp.map(r => getMeta(r).asTypeOf(new L1Metadata))) def wayMap[T <: Data](f: Int => T) = VecInit((0 until nWays).map(f)) val s1_tag_eq_way = wayMap((w: Int) => meta_resp(w).tag === (get_tag(s1_addr))).asUInt val s1_tag_match_way = wayMap((w: Int) => s1_tag_eq_way(w) && meta_resp(w).coh.isValid()).asUInt val s1_tag_match = s1_tag_match_way.orR + assert(RegNext(PopCount(s1_tag_match_way) <= 1.U), "tag should not match with more than 1 way") val s1_fake_meta = Wire(new L1Metadata) s1_fake_meta.tag := get_tag(s1_addr) @@ -81,70 +91,82 @@ class LoadPipe extends DCacheModule // when there are no tag match, we give it a Fake Meta // this simplifies our logic in s2 stage - val s1_hit_meta = Mux(s1_tag_match, Mux1H(s1_tag_match_way, wayMap((w: Int) => meta_resp(w))), s1_fake_meta) + val s1_hit_meta = Mux(s1_tag_match, Mux1H(s1_tag_match_way, wayMap((w: Int) => meta_resp(w))), s1_fake_meta) val s1_hit_coh = s1_hit_meta.coh + // data read + val data_read = io.data_read.bits + data_read.addr := s1_addr + data_read.way_en := s1_tag_match_way + // only needs to read the specific row + data_read.rmask := UIntToOH(get_row(s1_addr)) + io.data_read.valid := s1_fire && !s1_nack + + io.replace_access.valid := RegNext(io.meta_read.fire()) && s1_tag_match && s1_valid + io.replace_access.bits.set := get_idx(s1_req.addr) + io.replace_access.bits.way := OHToUInt(s1_tag_match_way) - // select the row we are interested in - val s1_data = Wire(Vec(nWays, UInt(encRowBits.W))) - val data_resp = io.data_resp - for (w <- 0 until nWays) { s1_data(w) := data_resp(w)(get_row(s1_addr)) } - - // select the word - // the index of word in a row, in case rowBits != wordBits - val s1_word_idx = if (rowWords == 1) 0.U else s1_addr(log2Up(rowWords*wordBytes)-1, log2Up(wordBytes)) - - // load data gen - val s1_data_words = Wire(Vec(nWays, Vec(rowWords, UInt(encWordBits.W)))) - for (w <- 0 until nWays) { - for (r <- 0 until rowWords) { - s1_data_words(w)(r) := s1_data(w)(encWordBits * (r + 1) - 1, encWordBits * r) - } - } - - val s1_words = (0 until nWays) map (i => s1_data_words(i)(s1_word_idx)) - - val s1_decoded = (0 until nWays) map (i => cacheParams.dataCode.decode(s1_words(i))) - val s1_word_decoded = VecInit((0 until nWays) map (i => s1_decoded(i).corrected)) - (0 until nWays) map (i => assert (!(s1_valid && s1_tag_match && (i.U === OHToUInt(s1_tag_match_way)) && s1_decoded(i).uncorrectable))) + // tag ecc check + (0 until nWays).foreach(w => assert(!RegNext(s1_valid && s1_tag_match_way(w) && cacheParams.tagCode.decode(io.meta_resp(w)).uncorrectable))) - io.lsu.s1_data := s1_word_decoded + io.replace_access.valid := RegNext(io.meta_read.fire()) && s1_tag_match && s1_valid + io.replace_access.bits.set := get_idx(s1_req.addr) + io.replace_access.bits.way := OHToUInt(s1_tag_match_way) // -------------------------------------------------------------------------------- // stage 2 - val s2_req = RegNext(s1_req) - val s2_valid = RegNext(s1_valid && !io.lsu.s1_kill, init = false.B) - val s2_addr = RegNext(s1_addr) + // val s2_valid = RegEnable(next = s1_valid && !io.lsu.s1_kill, init = false.B, enable = s1_fire) + val s2_valid = RegInit(false.B) + val s2_req = RegEnable(s1_req, s1_fire) + val s2_addr = RegEnable(s1_addr, s1_fire) + s2_ready := true.B - dump_pipeline_reqs("LoadPipe s2", s2_valid, s2_req) + when (s1_fire) { s2_valid := !io.lsu.s1_kill } + .elsewhen(io.lsu.resp.fire()) { s2_valid := false.B } + dump_pipeline_reqs("LoadPipe s2", s2_valid, s2_req) + // hit, miss, nack, permission checking - val s2_tag_match_way = RegNext(s1_tag_match_way) - val s2_tag_match = RegNext(s1_tag_match) + val s2_tag_match_way = RegEnable(s1_tag_match_way, s1_fire) + val s2_tag_match = RegEnable(s1_tag_match, s1_fire) - val s2_hit_meta = RegNext(s1_hit_meta) - val s2_hit_coh = RegNext(s1_hit_coh) + val s2_hit_meta = RegEnable(s1_hit_meta, s1_fire) + val s2_hit_coh = RegEnable(s1_hit_coh, s1_fire) val s2_has_permission = s2_hit_coh.onAccess(s2_req.cmd)._1 - val s2_new_hit_coh = s2_hit_coh.onAccess(s2_req.cmd)._3 + val s2_new_hit_coh = s2_hit_coh.onAccess(s2_req.cmd)._3 val s2_hit = s2_tag_match && s2_has_permission && s2_hit_coh === s2_new_hit_coh - // generate data - val s2_data = RegNext(s1_word_decoded) - // select the way out - val s2_data_muxed = Mux1H(s2_tag_match_way, s2_data) - // when req got nacked, upper levels should replay this request // nacked or not - val s2_nack_hit = RegNext(s1_nack) + val s2_nack_hit = RegEnable(s1_nack, s1_fire) // can no allocate mshr for load miss val s2_nack_no_mshr = io.miss_req.valid && !io.miss_req.ready // Bank conflict on data arrays // For now, we use DuplicatedDataArray, so no bank conflicts - val s2_nack_data = false.B - + val s2_nack_data = false.B val s2_nack = s2_nack_hit || s2_nack_no_mshr || s2_nack_data + // select the row we are interested in + val data_resp = io.data_resp + val s2_data = data_resp(get_row(s2_addr)) + + // select the word + // the index of word in a row, in case rowBits != wordBits + val s2_word_idx = if (rowWords == 1) 0.U else s2_addr(log2Up(rowWords*wordBytes)-1, log2Up(wordBytes)) + + // load data gen + val s2_data_words = Wire(Vec(rowWords, UInt(encWordBits.W))) + for (w <- 0 until rowWords) { + s2_data_words(w) := s2_data(encWordBits * (w + 1) - 1, encWordBits * w) + } + val s2_word = s2_data_words(s2_word_idx) + // val s2_decoded = cacheParams.dataCode.decode(s2_word) + // val s2_word_decoded = s2_decoded.corrected + val s2_word_decoded = s2_word(wordBits - 1, 0) + assert(RegNext(!(s2_valid && s2_hit && !s2_nack && cacheParams.dataCode.decode(s2_word).uncorrectable))) + + // only dump these signals when they are actually valid dump_pipeline_valids("LoadPipe s2", "s2_hit", s2_valid && s2_hit) dump_pipeline_valids("LoadPipe s2", "s2_nack", s2_valid && s2_nack) @@ -152,18 +174,18 @@ class LoadPipe extends DCacheModule dump_pipeline_valids("LoadPipe s2", "s2_nack_no_mshr", s2_valid && s2_nack_no_mshr) // send load miss to miss queue - io.miss_req.valid := s2_valid && !s2_nack_hit && !s2_nack_data && !s2_hit - io.miss_req.bits := DontCare + io.miss_req.valid := s2_valid && !s2_nack_hit && !s2_nack_data && !s2_hit + io.miss_req.bits := DontCare io.miss_req.bits.source := LOAD_SOURCE.U - io.miss_req.bits.cmd := s2_req.cmd - io.miss_req.bits.addr := get_block_addr(s2_addr) - io.miss_req.bits.coh := s2_hit_coh + io.miss_req.bits.cmd := s2_req.cmd + io.miss_req.bits.addr := get_block_addr(s2_addr) + io.miss_req.bits.coh := s2_hit_coh // send back response val resp = Wire(ValidIO(new DCacheWordResp)) - resp.valid := s2_valid - resp.bits := DontCare - resp.bits.data := s2_data_muxed + resp.valid := s2_valid + resp.bits := DontCare + resp.bits.data := s2_word_decoded // on miss or nack, upper level should replay request // but if we successfully sent the request to miss queue // upper level does not need to replay request @@ -173,13 +195,14 @@ class LoadPipe extends DCacheModule io.lsu.resp.valid := resp.valid io.lsu.resp.bits := resp.bits - assert(!(resp.valid && !io.lsu.resp.ready)) + io.lsu.s2_hit_way := s2_tag_match_way + assert(RegNext(!(resp.valid && !io.lsu.resp.ready)), "lsu should be ready in s2") when (resp.valid) { resp.bits.dump() } - io.lsu.s2_hit_way := s2_tag_match_way + assert(RegNext(s1_ready && s2_ready), "load pipeline should never be blocked") // ------- // Debug logging functions diff --git a/src/main/scala/xiangshan/cache/MainPipe.scala b/src/main/scala/xiangshan/cache/MainPipe.scala index ff03fb32f7e..718f2f1a4eb 100644 --- a/src/main/scala/xiangshan/cache/MainPipe.scala +++ b/src/main/scala/xiangshan/cache/MainPipe.scala @@ -60,8 +60,10 @@ class MainPipeResp extends DCacheBundle } } -class MainPipe extends DCacheModule -{ +class MainPipe extends DCacheModule { + def metaBits = (new L1Metadata).getWidth + def encMetaBits = cacheParams.tagCode.width(metaBits) + val io = IO(new DCacheBundle { // req and resp val req = Flipped(DecoupledIO(new MainPipeReq)) @@ -72,11 +74,11 @@ class MainPipe extends DCacheModule // meta/data read/write val data_read = DecoupledIO(new L1DataReadReq) - val data_resp = Input(Vec(nWays, Vec(blockRows, Bits(encRowBits.W)))) + val data_resp = Input(Vec(blockRows, Bits(encRowBits.W))) val data_write = DecoupledIO(new L1DataWriteReq) val meta_read = DecoupledIO(new L1MetaReadReq) - val meta_resp = Input(Vec(nWays, new L1Metadata)) + val meta_resp = Input(Vec(nWays, UInt(encMetaBits.W))) val meta_write = DecoupledIO(new L1MetaWriteReq) // write back @@ -84,37 +86,47 @@ class MainPipe extends DCacheModule // lrsc locked block should block probe val lrsc_locked_block = Output(Valid(UInt(PAddrBits.W))) + + // update state vec in replacement algo + val replace_access = ValidIO(new ReplacementAccessBundle) }) + def getMeta(encMeta: UInt): UInt = { + require(encMeta.getWidth == encMetaBits) + encMeta(metaBits - 1, 0) + } + // assign default value to output signals - io.req.ready := false.B - io.miss_resp.valid := false.B + io.req.ready := false.B + io.miss_req.valid := false.B + io.miss_req.bits := DontCare + io.miss_resp.valid := false.B io.store_resp.valid := false.B - io.amo_resp.valid := false.B + io.amo_resp.valid := false.B - io.data_read.valid := false.B + io.data_read.valid := false.B io.data_write.valid := false.B - io.data_write.bits := DontCare - io.meta_read.valid := false.B + io.data_write.bits := DontCare + io.meta_read.valid := false.B io.meta_write.valid := false.B - io.meta_write.bits := DontCare + io.meta_write.bits := DontCare - io.wb_req.valid := false.B - io.wb_req.bits := DontCare + io.wb_req.valid := false.B + io.wb_req.bits := DontCare io.lrsc_locked_block.valid := false.B - io.lrsc_locked_block.bits := DontCare + io.lrsc_locked_block.bits := DontCare // Pipeline - // TODO: add full bypass for meta and data, bypass should be based on block address match - val stall = Wire(Bool()) + val s1_s0_set_conflict, s2_s0_set_conflict, s3_s0_set_conflict = Wire(Bool()) + val set_conflict = s1_s0_set_conflict || s2_s0_set_conflict || s3_s0_set_conflict + val s1_ready, s2_ready, s3_ready = Wire(Bool()) + val s3_valid = RegInit(false.B) + val update_meta, need_write_data = Wire(Bool()) // -------------------------------------------------------------------------------- // stage 0 - // read meta and data - - // valid: this pipeline has valid req - // fire: req fired and will appear in next pipeline stage + // read meta val s0_valid = io.req.valid val s0_fire = io.req.fire() val s0_req = io.req.bits @@ -129,13 +141,30 @@ class MainPipe extends DCacheModule val word_full_overwrite = Wire(Vec(blockRows, Bits(rowWords.W))) val word_write = Wire(Vec(blockRows, Bits(rowWords.W))) for (i <- 0 until blockRows) { - word_full_overwrite(i) := VecInit((0 until rowWords) map { w => word_mask(i)(w).andR }).asUInt - word_write(i) := VecInit((0 until rowWords) map { w => word_mask(i)(w).orR }).asUInt + word_full_overwrite(i) := VecInit((0 until rowWords).map { w => word_mask(i)(w).andR }).asUInt + word_write(i) := VecInit((0 until rowWords).map { w => word_mask(i)(w).orR }).asUInt } - val row_full_overwrite = VecInit(word_full_overwrite.map(w => w.andR)).asUInt - val row_write = VecInit(word_write.map(w => w.orR)).asUInt + val row_full_overwrite = VecInit(word_full_overwrite.map(_.andR)).asUInt + val row_write = VecInit(word_write.map(_.orR)).asUInt val full_overwrite = row_full_overwrite.andR + // sanity check + when (s0_fire) { + OneHot.checkOneHot(Seq(s0_req.miss, s0_req.probe)) + } + assert(!RegNext(s0_fire && s0_req.miss && !full_overwrite), "miss req should full overwrite") + + val meta_ready = io.meta_read.ready + val data_ready = io.data_read.ready + io.req.ready := meta_ready && !set_conflict && s1_ready && !(s3_valid && update_meta) + + io.meta_read.valid := io.req.valid && !set_conflict/* && s1_ready*/ && !(s3_valid && update_meta) + val meta_read = io.meta_read.bits + meta_read.idx := get_idx(s0_req.addr) + meta_read.way_en := ~0.U(nWays.W) + meta_read.tag := DontCare + + // generata rmask here and use it in stage 1 // If req comes form MissQueue, it must be a full overwrite, // but we still need to read data array // since we may do replacement @@ -144,109 +173,68 @@ class MainPipe extends DCacheModule // If it's partial mask, no need to read full masked words. // If it's a AMO(not from MissQueue), only need to read the specific word. // If it's probe, read it all. - - // do not left out !s0_req.probe, - // if it's a probe, all data mask fields are useless - // don't worry about duplicate conditions - // backend tools will remove them - val miss_need_data = s0_req.miss + val miss_need_data = s0_req.miss val store_need_data = !s0_req.miss && !s0_req.probe && s0_req.source === STORE_SOURCE.U && !full_overwrite val amo_need_data = !s0_req.miss && !s0_req.probe && s0_req.source === AMO_SOURCE.U val probe_need_data = s0_req.probe - + val need_data = miss_need_data || store_need_data || amo_need_data || probe_need_data - val meta_read = io.meta_read.bits - val data_read = io.data_read.bits - - val s1_s0_set_conflict = Wire(Bool()) - val s2_s0_set_conflict = Wire(Bool()) - val s3_s0_set_conflict = Wire(Bool()) - val set_conflict = s1_s0_set_conflict || s2_s0_set_conflict || s3_s0_set_conflict - - // sanity check - when (s0_fire) { - when (s0_req.miss) { - assert (full_overwrite) - } - OneHot.checkOneHot(Seq(s0_req.miss, s0_req.probe)) - } - - val meta_ready = io.meta_read.ready - val data_ready = !need_data || io.data_read.ready - io.req.ready := meta_ready && data_ready && !set_conflict && !stall - - io.meta_read.valid := io.req.valid && !set_conflict && !stall - io.data_read.valid := io.req.valid && need_data && !set_conflict && !stall - - // Tag read for new requests - meta_read.idx := get_idx(s0_req.addr) - meta_read.way_en := ~0.U(nWays.W) - meta_read.tag := DontCare - - // Data read for new requests - val rowWordBits = log2Floor(rowWords) - val amo_row = s0_req.word_idx >> rowWordBits + def rowWordBits = log2Floor(rowWords) + val amo_row = s0_req.word_idx >> rowWordBits val amo_word = if (rowWordBits == 0) 0.U else s0_req.word_idx(rowWordBits - 1, 0) val amo_word_addr = s0_req.addr + (s0_req.word_idx << wordOffBits) val store_rmask = row_write & ~row_full_overwrite - val amo_rmask = UIntToOH(amo_row) - val full_rmask = ~0.U(blockRows.W) - val none_rmask = 0.U(blockRows.W) + val amo_rmask = UIntToOH(amo_row) + val full_rmask = ~0.U(blockRows.W) + val none_rmask = 0.U(blockRows.W) - val rmask = Mux(store_need_data, store_rmask, + val s0_rmask = Mux(store_need_data, store_rmask, Mux(amo_need_data, amo_rmask, - Mux(probe_need_data || miss_need_data, full_rmask, none_rmask))) + Mux(probe_need_data || miss_need_data, full_rmask, none_rmask))) // generate wmask here and use it in stage 2 val store_wmask = word_write - val amo_wmask = WireInit(VecInit((0 until blockRows) map (i => 0.U(rowWords.W)))) - amo_wmask(amo_row) := VecInit((0 until rowWords) map (w => w.U === amo_word)).asUInt - val full_wmask = VecInit((0 until blockRows) map (i => ~0.U(rowWords.W))) - val none_wmask = VecInit((0 until blockRows) map (i => 0.U(rowWords.W))) - - data_read.addr := s0_req.addr - data_read.way_en := ~0.U(nWays.W) - - data_read.rmask := rmask + val amo_wmask = WireInit(VecInit((0 until blockRows).map(i => 0.U(rowWords.W)))) + amo_wmask(amo_row) := VecInit((0 until rowWords).map(w => w.U === amo_word)).asUInt + val full_wmask = VecInit((0 until blockRows).map(i => ~0.U(rowWords.W))) + val none_wmask = VecInit((0 until blockRows).map(i => 0.U(rowWords.W))) dump_pipeline_reqs("MainPipe s0", s0_valid, s0_req) - // -------------------------------------------------------------------------------- // stage 1 - // read out meta, check hit or miss - // TODO: add stalling - + // read data, get meta, check hit or miss val s1_valid = RegInit(false.B) - val s1_fire = s1_valid && !stall + val s1_need_data = RegEnable(need_data, s0_fire) + val s1_fire = s1_valid && s2_ready && (!s1_need_data || io.data_read.fire()) val s1_req = RegEnable(s0_req, s0_fire) + val s1_set = get_idx(s1_req.addr) - val s1_rmask = RegEnable(rmask, s0_fire) + val s1_rmask = RegEnable(s0_rmask, s0_fire) val s1_store_wmask = RegEnable(store_wmask, s0_fire) - val s1_amo_wmask = RegEnable(amo_wmask, s0_fire) - val s1_full_wmask = RegEnable(full_wmask, s0_fire) - val s1_none_wmask = RegEnable(none_wmask, s0_fire) + val s1_amo_wmask = RegEnable(amo_wmask, s0_fire) - val s1_amo_row = RegEnable(amo_row, s0_fire) - val s1_amo_word = RegEnable(amo_word, s0_fire) + val s1_amo_row = RegEnable(amo_row, s0_fire) + val s1_amo_word = RegEnable(amo_word, s0_fire) val s1_amo_word_addr = RegEnable(amo_word_addr, s0_fire) s1_s0_set_conflict := s1_valid && get_idx(s1_req.addr) === get_idx(s0_req.addr) - when (s0_fire) { s1_valid := true.B } - when (!s0_fire && s1_fire) { s1_valid := false.B } - - dump_pipeline_reqs("MainPipe s1", s1_valid, s1_req) - - val meta_resp_latched = Reg(Vec(nWays, new L1Metadata)) - val meta_resp = Mux(RegNext(next = stall, init = false.B), meta_resp_latched, io.meta_resp) - when (stall) { - meta_resp_latched := meta_resp + when (s0_fire) { + s1_valid := true.B + }.elsewhen (s1_fire) { + s1_valid := false.B } + s1_ready := !s1_valid || s1_fire + + // tag match + val ecc_meta_resp = WireInit(VecInit(Seq.fill(nWays)(0.U(encMetaBits.W)))) + ecc_meta_resp := Mux(RegNext(s0_fire), io.meta_resp, RegNext(ecc_meta_resp)) + val meta_resp = ecc_meta_resp.map(m => getMeta(m).asTypeOf(new L1Metadata)) + - // tag check def wayMap[T <: Data](f: Int => T) = VecInit((0 until nWays).map(f)) val s1_tag_eq_way = wayMap((w: Int) => meta_resp(w).tag === (get_tag(s1_req.addr))).asUInt val s1_tag_match_way = wayMap((w: Int) => s1_tag_eq_way(w) && meta_resp(w).coh.isValid()).asUInt @@ -263,7 +251,8 @@ class MainPipe extends DCacheModule // replacement policy val replacer = cacheParams.replacement - val s1_repl_way_en = UIntToOH(replacer.way) + val s1_repl_way_en = WireInit(0.U(nWays.W)) + s1_repl_way_en := Mux(RegNext(s0_fire), UIntToOH(replacer.way(s1_set)), RegNext(s1_repl_way_en)) val s1_repl_meta = Mux1H(s1_repl_way_en, wayMap((w: Int) => meta_resp(w))) val s1_repl_coh = s1_repl_meta.coh @@ -274,21 +263,17 @@ class MainPipe extends DCacheModule val s1_meta = Mux(s1_need_replacement, s1_repl_meta, s1_hit_meta) val s1_coh = Mux(s1_need_replacement, s1_repl_coh, s1_hit_coh) - // for now, since we are using random replacement - // we only need to update replacement states after every valid replacement decision - // we only do replacement when we are true miss(not permission miss) - when (s1_fire) { - when (s1_need_replacement) { - replacer.miss - } - } + // read data + io.data_read.valid := s1_valid/* && s2_ready*/ && s1_need_data && !(s3_valid && need_write_data) + val data_read = io.data_read.bits + data_read.rmask := s1_rmask + data_read.way_en := s1_way_en + data_read.addr := s1_req.addr - // s1 data - val s1_data_resp_latched = Reg(Vec(nWays, Vec(blockRows, Bits(encRowBits.W)))) - val s1_data_resp = Mux(RegNext(next = stall, init = false.B), s1_data_resp_latched, io.data_resp) - when (stall) { - s1_data_resp_latched := s1_data_resp - } + // tag ecc check + (0 until nWays).foreach(w => assert(!(s1_valid && s1_tag_match_way(w) && cacheParams.tagCode.decode(ecc_meta_resp(w)).uncorrectable))) + + dump_pipeline_reqs("MainPipe s1", s1_valid, s1_req) // -------------------------------------------------------------------------------- // stage 2 @@ -297,58 +282,53 @@ class MainPipe extends DCacheModule // all other stuff, permission checking, write/amo stuff stay in s3 // we only change cache internal states(lr/sc counter, tag/data array) in s3 val s2_valid = RegInit(false.B) - val s2_fire = s2_valid && !stall + val s2_fire = s2_valid && s3_ready val s2_req = RegEnable(s1_req, s1_fire) + s2_ready := !s2_valid || s2_fire - val s2_rmask = RegEnable(s1_rmask, s1_fire) + val s2_rmask = RegEnable(s1_rmask, s1_fire) val s2_store_wmask = RegEnable(s1_store_wmask, s1_fire) - val s2_amo_wmask = RegEnable(s1_amo_wmask, s1_fire) - val s2_full_wmask = RegEnable(s1_full_wmask, s1_fire) - val s2_none_wmask = RegEnable(s1_none_wmask, s1_fire) + val s2_amo_wmask = RegEnable(s1_amo_wmask, s1_fire) - val s2_amo_row = RegEnable(s1_amo_row, s1_fire) - val s2_amo_word = RegEnable(s1_amo_word, s1_fire) + val s2_amo_row = RegEnable(s1_amo_row, s1_fire) + val s2_amo_word = RegEnable(s1_amo_word, s1_fire) val s2_amo_word_addr = RegEnable(s1_amo_word_addr, s1_fire) - - s2_s0_set_conflict := s2_valid && get_idx(s2_req.addr) === get_idx(s0_req.addr) + s2_s0_set_conflict := s2_valid && get_idx(s2_req.addr) === get_idx(s0_req.addr) when (s1_fire) { s2_valid := true.B } - when (!s1_fire && s2_fire) { s2_valid := false.B } - - dump_pipeline_reqs("MainPipe s2", s2_valid, s2_req) + .elsewhen(s2_fire) { s2_valid := false.B } - val s2_tag_match_way = RegEnable(s1_tag_match_way, s1_fire) - val s2_tag_match = RegEnable(s1_tag_match, s1_fire) - - val s2_hit_meta = RegEnable(s1_hit_meta, s1_fire) - val s2_hit_coh = RegEnable(s1_hit_coh, s1_fire) + val s2_tag_match_way = RegEnable(s1_tag_match_way, s1_fire) + val s2_tag_match = RegEnable(s1_tag_match, s1_fire) + val s2_hit_meta = RegEnable(s1_hit_meta, s1_fire) + val s2_hit_coh = RegEnable(s1_hit_coh, s1_fire) val s2_has_permission = s2_hit_coh.onAccess(s2_req.cmd)._1 - val s2_new_hit_coh = s2_hit_coh.onAccess(s2_req.cmd)._3 + val s2_new_hit_coh = s2_hit_coh.onAccess(s2_req.cmd)._3 - val s2_repl_meta = RegEnable(s1_repl_meta, s1_fire) - val s2_repl_coh = RegEnable(s1_repl_coh, s1_fire) - val s2_repl_way_en = RegEnable(s1_repl_way_en, s1_fire) + val s2_repl_meta = RegEnable(s1_repl_meta, s1_fire) + val s2_repl_coh = s2_repl_meta.coh + val s2_repl_way_en = RegEnable(s1_repl_way_en, s1_fire) - // only true miss request(not permission miss) need to do replacement - // we use repl meta when we really need to a replacement val s2_need_replacement = RegEnable(s1_need_replacement, s1_fire) - val s2_way_en = RegEnable(s1_way_en, s1_fire) - val s2_meta = RegEnable(s1_meta, s1_fire) - val s2_coh = RegEnable(s1_coh, s1_fire) - val s2_data_resp = RegEnable(s1_data_resp, s1_fire) + val s2_way_en = RegEnable(s1_way_en, s1_fire) + val s2_meta = RegEnable(s1_meta, s1_fire) + val s2_coh = s2_meta.coh // we will treat it as a hit // if we need to update meta from Trunk to Dirty // go update it val s2_hit = s2_tag_match && s2_has_permission - val s2_amo_hit = s2_hit && !s2_req.miss && !s2_req.probe && s2_req.source === AMO_SOURCE.U + val s2_amo_hit = s2_hit && !s2_req.miss && !s2_req.probe && s2_req.source === AMO_SOURCE.U when (s2_valid) { XSDebug("MainPipe: s2 s2_tag_match: %b s2_has_permission: %b s2_hit: %b s2_need_replacement: %b s2_way_en: %x s2_state: %d\n", s2_tag_match, s2_has_permission, s2_hit, s2_need_replacement, s2_way_en, s2_coh.state) } + val data_resp = WireInit(VecInit(Seq.fill(blockRows)(0.U(encRowBits.W)))) + data_resp := Mux(RegNext(s1_fire), io.data_resp, RegNext(data_resp)) + // generate write data val s2_store_data_merged = Wire(Vec(blockRows, UInt(rowBits.W))) @@ -357,15 +337,12 @@ class MainPipe extends DCacheModule ((~full_wmask & old_data) | (full_wmask & new_data)) } - val s2_data = Mux1H(s2_way_en, s2_data_resp) - - // TODO: deal with ECC errors val s2_data_decoded = (0 until blockRows) map { r => (0 until rowWords) map { w => - val data = s2_data(r)(encWordBits * (w + 1) - 1, encWordBits * w) + val data = data_resp(r)(encWordBits * (w + 1) - 1, encWordBits * w) val decoded = cacheParams.dataCode.decode(data) - assert(!(s2_valid && s2_hit && s2_rmask(r) && decoded.uncorrectable)) - decoded.corrected + assert(!RegNext(s2_valid && s2_hit && s2_rmask(r) && decoded.uncorrectable)) + data(wordBits - 1, 0) } } @@ -387,73 +364,53 @@ class MainPipe extends DCacheModule val s2_amo_word_data = VecInit((0 until rowWords) map (w => s2_amo_row_data(wordBits * (w + 1) - 1, wordBits * w))) val s2_data_word = s2_amo_word_data(s2_amo_word) + dump_pipeline_reqs("MainPipe s2", s2_valid, s2_req) // -------------------------------------------------------------------------------- // stage 3 // do permission checking, write/amo stuff in s3 // we only change cache internal states(lr/sc counter, tag/data array) in s3 - val s3_valid = RegInit(false.B) - val s3_fire = s3_valid && !stall + val s3_fire = Wire(Bool()) val s3_req = RegEnable(s2_req, s2_fire) + s3_ready := !s3_valid || s3_fire - val s3_rmask = RegEnable(s2_rmask, s2_fire) + val s3_rmask = RegEnable(s2_rmask, s2_fire) val s3_store_wmask = RegEnable(s2_store_wmask, s2_fire) - val s3_amo_wmask = RegEnable(s2_amo_wmask, s2_fire) - val s3_full_wmask = RegEnable(s2_full_wmask, s2_fire) - val s3_none_wmask = RegEnable(s2_none_wmask, s2_fire) + val s3_amo_wmask = RegEnable(s2_amo_wmask, s2_fire) - val s3_amo_row = RegEnable(s2_amo_row, s2_fire) - val s3_amo_word = RegEnable(s2_amo_word, s2_fire) + val s3_amo_row = RegEnable(s2_amo_row, s2_fire) + val s3_amo_word = RegEnable(s2_amo_word, s2_fire) val s3_amo_word_addr = RegEnable(s2_amo_word_addr, s2_fire) - val s3_data_word = RegEnable(s2_data_word, s2_fire) + val s3_data_word = RegEnable(s2_data_word, s2_fire) val s3_store_data_merged = RegEnable(s2_store_data_merged, s2_fire) - val s3_data_decoded = RegEnable(VecInit(s2_data_decoded.flatten).asUInt, s2_fire) + val s3_data_decoded = RegEnable(VecInit(s2_data_decoded.flatten).asUInt, s2_fire) s3_s0_set_conflict := s3_valid && get_idx(s3_req.addr) === get_idx(s0_req.addr) when (s2_fire) { s3_valid := true.B } - when (!s2_fire && s3_fire) { s3_valid := false.B } - - dump_pipeline_reqs("MainPipe s3", s3_valid, s3_req) + .elsewhen (s3_fire) { s3_valid := false.B } - val s3_tag_match_way = RegEnable(s2_tag_match_way, s2_fire) - val s3_tag_match = RegEnable(s2_tag_match, s2_fire) - - val s3_hit_meta = RegEnable(s2_hit_meta, s2_fire) - val s3_hit_coh = RegEnable(s2_hit_coh, s2_fire) + val s3_tag_match_way = RegEnable(s2_tag_match_way, s2_fire) + val s3_tag_match = RegEnable(s2_tag_match, s2_fire) + val s3_hit_meta = RegEnable(s2_hit_meta, s2_fire) + val s3_hit_coh = RegEnable(s2_hit_coh, s2_fire) val s3_has_permission = s3_hit_coh.onAccess(s3_req.cmd)._1 - val s3_new_hit_coh = s3_hit_coh.onAccess(s3_req.cmd)._3 + val s3_new_hit_coh = s3_hit_coh.onAccess(s3_req.cmd)._3 - val s3_repl_meta = RegEnable(s2_repl_meta, s2_fire) - val s3_repl_coh = RegEnable(s2_repl_coh, s2_fire) - val s3_repl_way_en = RegEnable(s2_repl_way_en, s2_fire) + val s3_repl_meta = RegEnable(s2_repl_meta, s2_fire) + val s3_repl_coh = s3_repl_meta.coh + val s3_repl_way_en = RegEnable(s2_repl_way_en, s2_fire) - // only true miss request(not permission miss) need to do replacement - // we use repl meta when we really need to a replacement val s3_need_replacement = RegEnable(s2_need_replacement, s2_fire) - val s3_way_en = RegEnable(s2_way_en, s2_fire) - val s3_meta = RegEnable(s2_meta, s2_fire) - val s3_coh = RegEnable(s2_coh, s2_fire) - val s3_data_resp = RegEnable(s2_data_resp, s2_fire) + val s3_way_en = RegEnable(s2_way_en, s2_fire) + val s3_meta = RegEnable(s2_meta, s2_fire) + val s3_coh = s3_meta.coh // -------------------------------------------------------------------------------- // Permission checking val miss_new_coh = s3_coh.onGrant(s3_req.cmd, s3_req.miss_param) - when (s3_valid) { - // permission checking for miss refill - when (s3_req.miss) { - // if miss refill req hits in dcache - // make sure it has enough permission to complete this cmd - assert (miss_new_coh.isValid()) - - when (s3_tag_match) { - // if miss refill req hits in dcache - // then the old permission should be lower than new permission - // otherwise we would not miss - assert (s3_hit_coh.state < miss_new_coh.state) - } - } - } + assert(!RegNext(s3_valid && s3_req.miss && !miss_new_coh.isValid())) + assert(!RegNext(s3_valid && s3_req.miss && s3_tag_match && !(s3_hit_coh.state < miss_new_coh.state))) // Determine what state to go to based on Probe param val (probe_has_dirty_data, probe_shrink_param, probe_new_coh) = s3_coh.onProbe(s3_req.probe_param) @@ -464,36 +421,34 @@ class MainPipe extends DCacheModule // go update it val s3_hit = s3_tag_match && s3_has_permission val s3_store_hit = s3_hit && !s3_req.miss && !s3_req.probe && s3_req.source === STORE_SOURCE.U - val s3_amo_hit = s3_hit && !s3_req.miss && !s3_req.probe && s3_req.source === AMO_SOURCE.U + val s3_amo_hit = s3_hit && !s3_req.miss && !s3_req.probe && s3_req.source === AMO_SOURCE.U when (s3_valid) { XSDebug("MainPipe: s3 s3_tag_match: %b s3_has_permission: %b s3_hit: %b s3_need_replacement: %b s3_way_en: %x s3_state: %d\n", s3_tag_match, s3_has_permission, s3_hit, s3_need_replacement, s3_way_en, s3_coh.state) } + dump_pipeline_reqs("MainPipe s3", s3_valid, s3_req) + // -------------------------------------------------------------------------------- // Write to MetaArray - - // whether we need to update meta - // miss should always update meta + // store only update meta when it hits and needs to update Trunk to Dirty val miss_update_meta = s3_req.miss val probe_update_meta = s3_req.probe && s3_tag_match && s3_coh =/= probe_new_coh - // store only update meta when it hits and needs to update Trunk to Dirty val store_update_meta = s3_store_hit && s3_hit_coh =/= s3_new_hit_coh val amo_update_meta = s3_amo_hit && s3_hit_coh =/= s3_new_hit_coh - val update_meta = miss_update_meta || probe_update_meta || store_update_meta || amo_update_meta + update_meta := miss_update_meta || probe_update_meta || store_update_meta || amo_update_meta val new_coh = Mux(miss_update_meta, miss_new_coh, Mux(probe_update_meta, probe_new_coh, - Mux(store_update_meta || amo_update_meta, s3_new_hit_coh, ClientMetadata.onReset))) + Mux(store_update_meta || amo_update_meta, s3_new_hit_coh, ClientMetadata.onReset))) - io.meta_write.valid := s3_fire && update_meta - io.meta_write.bits.idx := get_idx(s3_req.addr) - io.meta_write.bits.data.coh := new_coh + io.meta_write.valid := s3_fire && update_meta + io.meta_write.bits.idx := get_idx(s3_req.addr) + io.meta_write.bits.way_en := s3_way_en io.meta_write.bits.data.tag := get_tag(s3_req.addr) - io.meta_write.bits.way_en := s3_way_en - + io.meta_write.bits.data.coh := new_coh // -------------------------------------------------------------------------------- // LR, SC and AMO @@ -549,7 +504,6 @@ class MainPipe extends DCacheModule } assert(debug_sc_fail_cnt < 100.U, "L1DCache failed too many SCs in a row") - // -------------------------------------------------------------------------------- // Write to DataArray // Miss: @@ -564,11 +518,11 @@ class MainPipe extends DCacheModule // generate write mask // which word do we need to write - val wmask = Mux(s3_req.miss, s3_full_wmask, - Mux(s3_store_hit, s3_store_wmask, - Mux(s3_can_do_amo_write, s3_amo_wmask, - s3_none_wmask))) - val need_write_data = VecInit(wmask.map(w => w.orR)).asUInt.orR + val wmask = Mux(s3_req.miss, full_wmask, + Mux(s3_store_hit, s3_store_wmask, + Mux(s3_can_do_amo_write, s3_amo_wmask, + none_wmask))) + need_write_data := VecInit(wmask.map(w => w.orR)).asUInt.orR // generate write data // AMO hits @@ -591,35 +545,20 @@ class MainPipe extends DCacheModule }) } - - // ECC encode data - val s3_wdata_merged = Wire(Vec(blockRows, UInt(encRowBits.W))) - for (i <- 0 until blockRows) { - s3_wdata_merged(i) := Cat((0 until rowWords).reverse map { w => - val wdata = s3_amo_data_merged(i)(wordBits * (w + 1) - 1, wordBits * w) - val wdata_encoded = cacheParams.dataCode.encode(wdata) - wdata_encoded - }) - } - val data_write = io.data_write.bits io.data_write.valid := s3_fire && need_write_data - data_write.rmask := DontCare - data_write.way_en := s3_way_en - data_write.addr := s3_req.addr - data_write.wmask := wmask - data_write.data := s3_wdata_merged - - assert(!(io.data_write.valid && !io.data_write.ready)) + data_write.rmask := DontCare + data_write.way_en := s3_way_en + data_write.addr := s3_req.addr + data_write.wmask := VecInit(wmask.map(_.orR)).asUInt + data_write.data := s3_amo_data_merged // -------------------------------------------------------------------------------- // Writeback // whether we need to write back a block // TODO: add support for ProbePerm // Now, we only deal with ProbeBlock - val miss_writeback = s3_need_replacement && s3_coh === ClientStates.Dirty - // even probe missed, we still need to use write back to send ProbeAck NtoN response - // val probe_writeback = s3_req.probe && s3_tag_match && s3_coh.state =/= probe_new_coh.state + val miss_writeback = s3_need_replacement && s3_coh === ClientStates.Dirty val probe_writeback = s3_req.probe val need_writeback = miss_writeback || probe_writeback @@ -631,58 +570,66 @@ class MainPipe extends DCacheModule val writeback_data = s3_coh === ClientStates.Dirty val wb_req = io.wb_req.bits - io.wb_req.valid := s3_valid && need_writeback - wb_req.addr := writeback_addr - wb_req.param := writeback_param + io.wb_req.valid := s3_fire && need_writeback + wb_req.addr := writeback_addr + wb_req.param := writeback_param wb_req.voluntary := miss_writeback - wb_req.hasData := writeback_data - wb_req.data := s3_data_decoded + wb_req.hasData := writeback_data + wb_req.data := s3_data_decoded - stall := io.wb_req.valid && !io.wb_req.ready - when (stall) { - XSDebug("stall\n") - } + s3_fire := s3_valid && (!need_writeback || io.wb_req.ready) && + (!update_meta || io.meta_write.ready) && + (!need_write_data || io.data_write.ready) + + // -------------------------------------------------------------------------------- + // update replacement policy + io.replace_access.valid := RegNext(s3_fire) && (RegNext(update_meta) || RegNext(need_write_data)) + io.replace_access.bits.set := RegNext(get_idx(s3_req.addr)) + io.replace_access.bits.way := RegNext(s3_way_en) + + // -------------------------------------------------------------------------------- + // update replacement policy + io.replace_access.valid := RegNext(s3_fire) && (RegNext(update_meta) || RegNext(need_write_data)) + io.replace_access.bits.set := RegNext(get_idx(s3_req.addr)) + io.replace_access.bits.way := RegNext(s3_way_en) // -------------------------------------------------------------------------------- // send store/amo miss to miss queue val store_amo_miss = !s3_req.miss && !s3_req.probe && !s3_hit && (s3_req.source === STORE_SOURCE.U || s3_req.source === AMO_SOURCE.U) - io.miss_req.valid := s3_fire && store_amo_miss - io.miss_req.bits.source := s3_req.source - io.miss_req.bits.cmd := s3_req.cmd - io.miss_req.bits.addr := s3_req.addr + io.miss_req.valid := s3_fire && store_amo_miss + io.miss_req.bits.source := s3_req.source + io.miss_req.bits.cmd := s3_req.cmd + io.miss_req.bits.addr := s3_req.addr io.miss_req.bits.store_data := s3_req.store_data io.miss_req.bits.store_mask := s3_req.store_mask - io.miss_req.bits.word_idx := s3_req.word_idx - io.miss_req.bits.amo_data := s3_req.amo_data - io.miss_req.bits.amo_mask := s3_req.amo_mask - io.miss_req.bits.coh := s3_coh - io.miss_req.bits.id := s3_req.id + io.miss_req.bits.word_idx := s3_req.word_idx + io.miss_req.bits.amo_data := s3_req.amo_data + io.miss_req.bits.amo_mask := s3_req.amo_mask + io.miss_req.bits.coh := s3_coh + io.miss_req.bits.id := s3_req.id // -------------------------------------------------------------------------------- // send response val resp = Wire(new MainPipeResp) - // TODO: add amo data out resp.data := DontCare - resp.id := s3_req.id + resp.id := s3_req.id resp.miss := store_amo_miss resp.replay := io.miss_req.valid && !io.miss_req.ready - io.miss_resp.valid := s3_fire && s3_req.miss - io.miss_resp.bits := resp + io.miss_resp.valid := s3_fire && s3_req.miss + io.miss_resp.bits := resp io.miss_resp.bits.id := s3_req.miss_id - io.store_resp.valid := s3_fire && s3_req.source === STORE_SOURCE.U - io.store_resp.bits := resp + io.store_resp.valid := s3_fire && s3_req.source === STORE_SOURCE.U + io.store_resp.bits := resp - io.amo_resp.valid := s3_fire && s3_req.source === AMO_SOURCE.U - io.amo_resp.bits := resp + io.amo_resp.valid := s3_fire && s3_req.source === AMO_SOURCE.U + io.amo_resp.bits := resp io.amo_resp.bits.data := Mux(s3_sc, s3_sc_resp, s3_data_word) // reuse this field to pass lr sc valid to commit // nemu use this to see whether lr sc counter is still valid io.amo_resp.bits.id := lrsc_valid - - when (io.req.fire()) { io.req.bits.dump() } @@ -719,4 +666,5 @@ class MainPipe extends DCacheModule req.dump() } } + } diff --git a/src/main/scala/xiangshan/mem/pipeline/LoadUnit.scala b/src/main/scala/xiangshan/mem/pipeline/LoadUnit.scala index 500399408a3..5c51a431a31 100644 --- a/src/main/scala/xiangshan/mem/pipeline/LoadUnit.scala +++ b/src/main/scala/xiangshan/mem/pipeline/LoadUnit.scala @@ -286,10 +286,6 @@ class LoadUnit extends XSModule with HasLoadHelper { val sqIdxMaskReg = RegNext(UIntToMask(load_s0.io.in.bits.uop.sqIdx.value, StoreQueueSize)) io.lsq.forward.sqIdxMask := sqIdxMaskReg - // use s2_hit_way to select data received in s1 - load_s2.io.dcacheResp.bits.data := Mux1H(io.dcache.s2_hit_way, RegNext(io.dcache.s1_data)) - assert(load_s2.io.dcacheResp.bits.data === io.dcache.resp.bits.data) - XSDebug(load_s0.io.out.valid, p"S0: pc ${Hexadecimal(load_s0.io.out.bits.uop.cf.pc)}, lId ${Hexadecimal(load_s0.io.out.bits.uop.lqIdx.asUInt)}, " + p"vaddr ${Hexadecimal(load_s0.io.out.bits.vaddr)}, mask ${Hexadecimal(load_s0.io.out.bits.mask)}\n")