-
Notifications
You must be signed in to change notification settings - Fork 5.2k
JIT: Implement greedy RPO-based block layout #101473
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 6 commits
f58556d
de7a43a
17fbe7c
1084d7e
d1bf04a
662258e
98b4bd9
587b5bf
23c6840
ffdf8ad
1d267cd
9b08c4e
9dfe4dc
04cecca
fbea160
2abe1a2
d2f5ad6
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -3412,9 +3412,23 @@ bool Compiler::fgReorderBlocks(bool useProfile) | |
} | ||
} | ||
|
||
// If we will be reordering blocks, ensure the false target of a BBJ_COND block is its next block | ||
if (useProfile) | ||
{ | ||
if (JitConfig.JitDoReversePostOrderLayout()) | ||
{ | ||
fgDoReversePostOrderLayout(); | ||
|
||
#ifdef DEBUG | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You can just remove this whole |
||
if (expensiveDebugCheckLevel >= 2) | ||
{ | ||
fgDebugCheckBBlist(); | ||
} | ||
#endif // DEBUG | ||
|
||
return true; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I assume it's unlikely that we won't move blocks here? The other variant tries to keep track, but I'm not sure it is worth the extra bookkeeping. All we are doing by sometimes returning false is speeding up the checked jit a tiny bit. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That was my reasoning for hard-coding the return value. |
||
} | ||
|
||
// We will be reordering blocks, so ensure the false target of a BBJ_COND block is its next block | ||
for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->Next()) | ||
{ | ||
if (block->KindIs(BBJ_COND) && !block->NextIs(block->GetFalseTarget())) | ||
|
@@ -4522,6 +4536,121 @@ bool Compiler::fgReorderBlocks(bool useProfile) | |
#pragma warning(pop) | ||
#endif | ||
|
||
//----------------------------------------------------------------------------- | ||
// fgDoReversePostOrderLayout: Reorder blocks using a greedy RPO of the method's | ||
// main body (i.e. non-EH) successors. | ||
// | ||
// Notes: | ||
// This won't reorder blocks in the funclet section, if there are any. | ||
// In the main method body, this will reorder blocks in the same EH region | ||
// to avoid making any regions non-contiguous. | ||
// | ||
void Compiler::fgDoReversePostOrderLayout() | ||
{ | ||
#ifdef DEBUG | ||
if (verbose) | ||
{ | ||
printf("*************** In fgDoReversePostOrderLayout()\n"); | ||
|
||
printf("\nInitial BasicBlocks"); | ||
fgDispBasicBlocks(verboseTrees); | ||
printf("\n"); | ||
} | ||
#endif // DEBUG | ||
|
||
// Compute DFS of main function body's blocks, using profile data to determine the order successors are visited in | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is now a DFS of everything, right? Not just the main function? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, updated. |
||
// | ||
FlowGraphDfsTree* const dfsTree = fgComputeDfs</* skipEH */ true, /* useProfile */ true>(); | ||
|
||
// Fast path: We don't have any EH regions, so just reorder the blocks | ||
// | ||
if (compHndBBtabCount == 0) | ||
{ | ||
for (unsigned i = dfsTree->GetPostOrderCount() - 1; i != 0; i--) | ||
{ | ||
BasicBlock* const block = dfsTree->GetPostOrder(i); | ||
BasicBlock* const blockToMove = dfsTree->GetPostOrder(i - 1); | ||
fgUnlinkBlock(blockToMove); | ||
fgInsertBBafter(block, blockToMove); | ||
} | ||
|
||
return; | ||
} | ||
|
||
// We have EH regions, so make sure the RPO doesn't make any regions non-contiguous | ||
// by only reordering blocks within the same region | ||
// | ||
for (unsigned i = dfsTree->GetPostOrderCount() - 1; i != 0; i--) | ||
{ | ||
BasicBlock* const block = dfsTree->GetPostOrder(i); | ||
BasicBlock* const blockToMove = dfsTree->GetPostOrder(i - 1); | ||
|
||
if (BasicBlock::sameEHRegion(block, blockToMove)) | ||
{ | ||
fgUnlinkBlock(blockToMove); | ||
fgInsertBBafter(block, blockToMove); | ||
} | ||
} | ||
|
||
// The RPO won't change the entry blocks of any try regions, but reordering can change the last block in a region | ||
// (for example, by pushing throw blocks unreachable via normal flow to the end of the region). | ||
// First, determine the new try region ends. | ||
// | ||
BasicBlock** const tryRegionEnds = new (this, CMK_Generic) BasicBlock* [compHndBBtabCount] {}; | ||
|
||
// If we aren't using EH funclets, fgFirstFuncletBB is nullptr, and we'll traverse the entire blocklist. | ||
// Else if we do have funclets, don't extend the end of a try region to include its funclet blocks. | ||
// | ||
for (BasicBlock* block = fgFirstBB; block != fgFirstFuncletBB; block = block->Next()) | ||
{ | ||
if (block->hasTryIndex()) | ||
{ | ||
tryRegionEnds[block->getTryIndex()] = block; | ||
} | ||
} | ||
|
||
// Now, update the EH descriptors | ||
// | ||
unsigned XTnum; | ||
EHblkDsc* HBtab; | ||
for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) | ||
{ | ||
BasicBlock* const tryEnd = tryRegionEnds[XTnum]; | ||
|
||
// We can have multiple EH descriptors map to the same try region, | ||
// but we will only update the try region's last block pointer at the index given by BasicBlock::getTryIndex, | ||
// so the duplicate EH descriptors' last block pointers can be null. | ||
// Tolerate this. | ||
// | ||
if (tryEnd == nullptr) | ||
{ | ||
continue; | ||
} | ||
|
||
// Update the end pointer of this try region to the new last block | ||
// | ||
HBtab->ebdTryLast = tryEnd; | ||
const unsigned enclosingTryIndex = HBtab->ebdEnclosingTryIndex; | ||
|
||
// If this try region is nested in another one, we might need to update its enclosing region's end block | ||
// | ||
if (enclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) | ||
{ | ||
BasicBlock* const enclosingTryEnd = tryRegionEnds[enclosingTryIndex]; | ||
|
||
// If multiple EH descriptors map to the same try region, | ||
// then the enclosing region's last block might be null in the table, so set it here. | ||
// Similarly, if the enclosing region ends right before the nested region begins, | ||
// extend the enclosing region's last block to the end of the nested region. | ||
// | ||
if ((enclosingTryEnd == nullptr) || enclosingTryEnd->NextIs(HBtab->ebdTryBeg)) | ||
{ | ||
tryRegionEnds[enclosingTryIndex] = tryEnd; | ||
} | ||
} | ||
} | ||
} | ||
|
||
//------------------------------------------------------------- | ||
// fgUpdateFlowGraphPhase: run flow graph optimization as a | ||
// phase, with no tail duplication | ||
|
Uh oh!
There was an error while loading. Please reload this page.