/* EPC Imp to C Translation Release 4 Version Apr 95 */ #include "imptoc.h" /* 14 Jun 96 Propogated the GLOBAL LAB FRAG into finalfrag in initfrag */ /* In case GIS does clever stuff and removes non global labs */ /* which move the Global label onto finalfrag */ /* Addeded 20.3 correction to Bupdateliveness */ /** bprocs20 */ /** 22 Feb 96 Added some adhoc code for Pentium dual issue which defeats */ /** the standard model as the unit(pipe) depends on the instr */ /** operands as well as the opcode. Also the nonstandard pairing */ /** of 2POPs of 2PUSH or cmp-jmp defeat the model */ /** bprocs19.9 */ /** 23 Oct 95 Took more care over PI_fragsize decrementing it when an instr */ /** is removed or deleted since C++ had an empty frag claiming to */ /** hold 150 instrs! (pds) */ /** bprocs19.8 */ /** 25 Jly 95 Corrected conversion od double opcodes(POWER opcodes) */ /** Added Check in Bnewinstr that there is a lastinstr */ /** bprocs19.7 */ /** Allowed repeated reorganisations. Needed after some */ /** peepholing which can only be done after the first reorg */ /** but may create opportunities for further reorganisation */ /** bprocs19.6 */ /** 01 Jun 95 Added an extra reorganisation to duplicate small frags */ /** that are unconditionall jumped to */ /** bprocs19.5 */ /** 23 May 95 Added Reorganisation 10 to Reorgfrag(pds) */ /** bprocs19.4 */ /** 08 Apr 95 .Routine BReinitialise for C++ to use for template fns (CDM)*/ /** bprocs19.3 */ /** 26 Jan 95 Fixed B get opcode for POWER opcodes that use the bottom */ /** bit as a discriminator */ /** bprocs19.2.i*/ /** 05 Jul 94 .Fixed B SetEntryLabel to maintain the label to a return */ /** fragment. Also set _preds field to 0 when undefined. (rt) */ /** bprocs19.1.i*/ /** 24 Jun 94 .Added error for undefined label and fixed problem with side */ /** entries in reorganisation 9. (rt) */ /** bprocs19.i */ /** 10 May 94 .Enable use of lastest GIS sources on MIPS. Also reintroduce */ /** PENTIUM specs which have been lost. Fixed CountFrag to handle */ /** case where dense switch in same fragment as switch jump. */ /** Removed incorrect FALLS THROUGH fix to reorganisation 1. */ /** Prevent deletion of the return fragment label. Make deleting */ /** of labels the first reorganisation. Fix problem with */ /** PerformIfThenElseOpt when nothing following else fragment. */ /** Changes to handle dynamic TargetVariant. (rt) */ /** bprocs18.2.3.i*/ /** 09 May 94 .Removed B SetArchInfo and ARCH PrintArchInfo as these are now */ /** redundant. (rt) */ /** bprocs18.2.2.i*/ /** 25 Apr 94 .Changed _final setting to ensure that fragments which */ /** are entry points are always final fragments. Also changed */ /** use of FIRST OF PAIR and SECOND OF PAIR to use new */ /** BARRIER INSTR flag. (rt) */ /** bprocs18.2.1.i*/ /** 20 Apr 94 .Changed definition of B CommitInstr. (rt) */ /** bprocs18.2.i*/ /** 18 Mar 94 .Reintroduced the routine B CurFragAddr. Changed definition */ /** of B SetGISOptions. (rt) */ /** bprocs18.1.i*/ /** 16 Mar 94 .Changed B MoveDelaySlot to not update register liveness */ /** information. (rt) */ /** bprocs18.i */ /** 21 Feb 94 .Prevented filling delay slots with instructions marked */ /** with NON ANNUL INSTR private property. (rt) */ /** bprocs17.i */ /** 06 Jan 94 .Removed uses of _switchid field of fragfmt and used */ /** _switchtab instead. Also fixed problem with ADJACENT LOOPS */ /** context. Changed flags passed to ARCH DisasInstr. (rt) */ /** bprocs16.i */ /** 11 Oct 93 .Merge of MIPS and USL C M88K code generators. Removed the */ /** redundant routines B UnhookFrag, B ClearFrag, B CurFragAddr and*/ /** B CurFragInstr. (rt) */ /**---------------------- START OF USL C M88K CHANGES --------------------------*/ /** bprocs4.3.i*/ /** 06 Oct 93 .B SetOptions included. (md) */ /** bprocs4.2.i*/ /** 21 Apr 93 .Initialised variable rs1 in SetupSchDescriptors. (pds) */ /**----------------------- END OF USL C M88K CHANGES ---------------------------*/ /** bprocs15.2.i*/ /** 14 Oct 93 .Updated fragment, instruction and predecessor descriptor */ /** allocation mechanism to handle multiple procedure levels. */ /** Made call to GIS code conditional on BUILD WITH GS flag */ /** being non-zero. (rt) */ /** bprocs15.1.i*/ /** 27 Aug 93 .Added B CopyFrag function for fragment duplication. Changed */ /** B SetEntryLabel to allow label to be defined more than once. (rt)*/ /** bprocs15.i */ /** 06 Aug 93 .Improved tracing so that memory allocation tracing is */ /** only printed for a selected procedure. Allowed coalescing */ /** of return fragment with immediately preceeding fragment. */ /** Added tracing for label deletion. Removed verbose parameter from*/ /** B TraceFrag and B TraceInstr. Added B ReconvergeLiveness */ /** for redoing register liveness information and remade liveness */ /** information after global scheduling. Added minor fix for */ /** F90 switch label deletion. Added fix to fragment */ /** reorganisation 1 for fall through fragments. (rt) */ /** bprocs14.i */ /** 25 Jul 93 .Added extra fragment tracing stage after delay slot */ /** handling after global scheduling. Fixed problem in */ /** if/then/else optimisation where then label reference */ /** instruction not being reset. Added new B TraceLabel */ /** routine for global scheduler. Also fixed B GetFixedLabel which*/ /** did not handle cleared labels correctly. (rt) */ /** bprocs13.i */ /** 25 Jul 93 .Fixed problem with reorganisation 8 which didn't always */ /** ensure that there was a control flow path between following */ /** fragments. Updated housekeeping for reorganisation 9 which */ /** resets fallen into and falls through flags for redundant */ /** fragments. (rt) */ /** bprocs12.i */ /** 21 Jul 93 .Fixed reorganisation 9 to disallow coalescing if any */ /** fragments in the block to be moved have ther NO COALESCE TAIL */ /** property set. Added facility to fill malloced blockes with */ /** a predefined pattern when allocated and again when freed. Also*/ /** fixed reorganisation 7 to reset return branch flag when */ /** appropriate. (rt) */ /** bprocs11.i */ /** 20 Jul 93 .Fixed reorganisations 6 and 7 to propagate return branch */ /** fragment flag. Fixed B MoveDelaySlot to set fragment */ /** fallen into/fallen out of properly when creating a new */ /** fragment. (rt) */ /** bprocs10.i */ /** 16 Jul 93 .Fixed problem with invalid coalescing of dense switch */ /** table fragments. (rt) */ /** bprocs9.i */ /** 15 Jul 93 .Modified B TraceFunction, B TraceFrag and B TraceInstr to */ /** flush the output buffer and select stdout before and after */ /** generating tracing. (rt) */ /** bprocs8.i */ /** 10 Jun 93 .Merge of MIPS and SPARC versions for port of GIS to */ /** MIPS. The redundant store elimination routines have been */ /** removed as they are longer considered to be generic enough */ /** to be included in this module. They will have to be moved */ /** into an architecture specific module if this file is taken */ /** back to VME or ported to Intel (where the redundant store */ /** elimination is likely to be useful). (rt) */ /**--------------------------- START OF MIPS CHANGES --------------------------*/ /** bprocs4.4 */ /** 28 May 93 .Improved coalescing of frags. (pds) */ /** bprocs4.3 */ /** 24 May 93 .Changes to improve deletion of unused labels. (pds) */ /** bprocs4.2 */ /** 16 Apr 93 .Minor changes when checking for procedure calls used */ /** to test returnreg (result) for link. (pds) */ /** bprocs4.1 */ /** 20 Mar 93 .Minor change to activate reguse integration for m88k. (pds) */ /** .Also really check the context for noalias in stead of having */ /** a comment to that effect but doing NOWT!. (pds) */ /** bprocs4 */ /** 13 Mar 93 .Reference point prior to 88110 dev. (pds) */ /** bprocs2.9 */ /** 12 Nov 92 .Additional conditional compilation for MIPs - */ /** includes improvements to stall factors where an instruction */ /** has a result used more than once the larger dependency is used*/ /** bprocs2.7 */ /** 20 Oct 92 .Changes to compute stall factor by following chains of */ /** dependents rather than estimating. Also improving the SPARC model*/ /** to allow for loads stealing the bus so less fillers needed than*/ /** were inserted by previous version. An unassigned variable was */ /** removed from SetupSchDescriptors. (pds) */ /** bprocs2.3 */ /** 30 Sep 92 .Changes to bookwritebackslot as most control registers */ /** do not require a writeback slot. (pds) */ /**---------------------------- END OF MIPS CHANGES ---------------------------*/ /** bprocs7.i */ /** 03 Mar 93 .Changed btracefrag and btraceinstr to handle NULL pointers */ /** being passed in. (rt) */ /** 03 Mar 93 .Common point for start of M88K GIS port. (rt) */ /** bprocs6.i */ /** 17 Feb 93 .Merge in changes from version 8.2.A of USL SPARC C code */ /** generator. (rt) */ /** bprocs5.i */ /** 07 Dec 92 .Various changes related to changes in the functional interface*/ /** with the global scheduler. (rt) */ /** bprocs4.i */ /** 30 Sep 92 .Merged in changes from USL/EPC SPARC C compiler prior to */ /** global scheduling development. (rt) */ /** bprocs3.i */ /** 29 Sep 92 .Merge of VME and RS/6000 versions of bprocs prior to global */ /** scheduling implementation. Some RS/6000 changes have been */ /** omitted from the merge since they introduce architecture */ /** specific tests into this module which is designed to be */ /** architecture independent. Most of these can be directly handled*/ /** by calls to functions in the architecture specific modules. */ /** However, future RS/6000 code generators using this module */ /** should set the TOC reload after a call as a delay slot */ /** instruction prior to calling this module for basic block */ /** scheduling. (rt) */ /***/ /**------------------------- START OF RS/6000 CHANGES --------------------------*/ /** 01 Sep 92 .Changes to CalculateClockSchedule for cases where two */ /** instructions are dispatched in the same cycle. Also improved */ /** calculation of stall factors for instructions producing output*/ /** not used in the current fragment. The uses are assumed to be */ /** in the first instruction in the successor fragment. (pds) */ /** 18 Jun 92 .B obtain frag was added. (pds) */ /** 22 Apr 92 .Flagged the RS/6000 TOC reload as a delay slot instruction */ /** so that it is not separated from the call. This is required */ /** for linker glueing. (pds) */ /** 13 Apr 92 .Fixed GetCandidateGroup to ignore def/def dependencies on */ /** on a certain set of registers. This is used to exempt the carry*/ /** bit on the RS/6000 from the write data hazard test. (rt) */ /**-------------------------- END OF RS/6000 CHANGES ---------------------------*/ /***/ /** bprocs1.10.1*/ /** 28 Jul 92 .Fixed reoragnisation 7 to ensure that there is always a */ /** fall through path from the inverted jump to the original */ /** jump destination. (rt) */ /** bprocs1.10 */ /** 17 Jul 92 .Fixed reorganisation 7 to correctly set the final field of */ /** the fragment containing the jump which is deleted. (rt) */ /** bprocs1.9 */ /** 22 Jun 92 .Fixed GenFullList to ensure resultant list is in ascending */ /** order. (rt) */ /** bprocs1.8 */ /** 30 Apr 92 .Fixed DoRedunStoreElim to clear the in, out, use and def */ /** lists for local accesses. (rt) */ /** bprocs1.7 */ /** 10 Apr 92 .Modified this module to work with the new vmeopt module. (rt) */ /** bprocs1.6 */ /** 09 Apr 92 .Improved fragment reorganisation 7 to handle empty fragments */ /** after the unconditional jump. (rt) */ /** 30 Mar 92 .Increased the maximum number of blocks in the redundant */ /** store analysis to 1024. (rt) */ /** 30 Mar 92 .Fixed DescMemOverlap to call architecture specific overlap */ /** testing function in all cases. (rt) */ /** 30 Mar 92 .Updated use of delay slot flags to use new DELAY SLOT EXISTS */ /** and DELAY SLOT UNUSED properties. (rt) */ /** 25 Mar 92 .Fixed fragment reorganisation 9 to work in case where */ /** destination fragment which is to be coalesced contains */ /** no instructions. (rt) */ /** 24 Mar 92 .Improved InitFrag to look through the instruction stream */ /** and delete any redundant BLOCKMARKs or LINENUMBERs. (rt) */ /** bprocs1.5 */ /** 18 Mar 92 .Fixed fragment reorganisation 5 so that if any member of */ /** the loop is not fallen into the optimisation is aborted. (rt) */ /** 17 Mar 92 .Added a buddy memory allocator to this module in order to */ /** reduce the number of calls made to the system malloc. This */ /** allocator grabs 128K chunks of memory. (rt) */ /** 13 Mar 92 .Added a B IsAliased function. (rt) */ /** 09 Mar 92 .Removed all uses of ItoS from this module. (rt) */ /** 06 Mar 92 .Renamed CheckIfUnaliased to B IsUnaliased and made it an */ /** externally available function. (rt) */ /** 01 Mar 92 .Fixed fragment reorganisation 8 to ensure that there is a */ /** marked fall through path between the old jump position and */ /** the destination. (rt) */ /** bprocs1.4 */ /** 19 Feb 92 .Added support for redundant store elimination. This uses a */ /** liveness analysis scheme similiar to that used for registers */ /** except that it is applied with respect to stack frame */ /** locations. The liveness information is calculated using the */ /** data flow analysis algorithm detailed on page 632 of Aho, */ /** Sethi and Ullman (Dragon book). (rt) */ /** 08 Jan 92 .Changed tracing mechanism. The routine B SetReportOn, */ /** B SetReportOff, B SetMemTraceOn and B SetMemTraceOff have been*/ /** deleted and replaced by a tracing enable bit vector passed to */ /** B Initialise. Changed malloc arrangement in the rescheduler */ /** so that it makes fewer calls. Improved memory allocation for */ /** fragments so that a free fragment record list is maintained in*/ /** a similiar fashion to instruction records. (rt) */ /** bprocs1.3.1*/ /** 09 Dec 91 .Fixed bug in fragment reorganisation 9 which left inconsistent*/ /** final pointers. Also fixed reorganisation 6 to handle case */ /** where destination is an empty fragment. (rt) */ /** bprocs1.3 */ /** 29 Nov 91 .Changed bannerline to be less than 80 characters long. Fixed */ /** SetupSchDesc to divide VMEPLI opcodes by 2 before indexing */ /** timing table. In CalculateClockSchedule, make advancement of */ /** of clock time by the latency dependent on rescheduling for a */ /** superscalar architecture. Add MEM OVERLAP STALL to take account*/ /** of extra delays imposed by aliases in the memory access */ /** hardware. Changed GetCandidateGroup to take account of register*/ /** liveness when calculating instruction dependencies. Changed */ /** B Locate Label to return label hash values in the range */ /** 1 to 32. */ /***/ /** Introduced calls to SetLabelRefs. This is used to count */ /** references to labels that would otherwise be invisible to the */ /** fragment reorganiser. */ /***/ /** Added code to reduce the reference count of destination */ /** fragments when a fragment containing dense switch jump code */ /** is deleted. */ /***/ /** Renamed fragbusy field in current procedure record to fragsize.*/ /** This gives an approximate count of the number of instructions in*/ /** a fragment. This may be used by the architecture specific module*/ /** to decide when to break fragments if they become unreasonably */ /** large. */ /***/ /** Changed GetCandidateGroup so that the maximum stall factor */ /** obtained for a biased descriptor is MAX STALL BIAS rather than 0.*/ /***/ /** Added a TSAREG base register. This is a pseudo base register */ /** for accessing a temporary storage area. On VMEPLI this */ /** corresponds to TOS. This is required to make memory accesses to*/ /** TOS distinct from those to the STACK area. */ /***/ /** Changed B new frag to chain the new fragment into the current */ /** point in the fragment chain. */ /***/ /** Added concept of an alternative latency to be associated with */ /** each instruction. This is used on VMEPLI to differentiate the */ /** different uses of the B register, either for arithmetic or as */ /** an addressing register. There is a different latency depending*/ /** on the subsequent use of the register. */ /***/ /** Added B Terminate routine to free dynamic memory allocated by the*/ /** backend optimiser to instructions, fragments and labels. (rt) */ /***/ /** bprocs1.2 */ /** 18 Oct 91 .Renamed uses of SWITCH TABLE FRAG to DSWITCH JUMP FRAG. */ /** Added routines to do live variable analysis of fragments in */ /** fragment reorganisation stage. The external routine */ /** B LiveSetAfterInstr now returns the set of live registers after*/ /** a given instruction in a fragment. The routine B UpdateLiveRegs*/ /** is used to update the liveness information after an optimisation*/ /** has been applied to a specific fragment. The liveness information*/ /** is calculated using the data flow analysis algorithm detailed on*/ /** page 632 of Aho, Sethi and Ullman (Dragon book). (rt) */ /***/ /** bprocs1.1 */ /** 14 Oct 91 .Added routine B TerminateFrag to end a fragment with a */ /** given label. Also changed storage management so that calls */ /** from this module are redirected to handlers in the architecture*/ /** specific module. (rt) */ /** bprocs1 */ /** 13 Sep 91 .This module was created from the fragment reorganisation */ /** and instruction rescheduling routines in m88kprocs60. This */ /** module is designed to contain all generic back end optimisation*/ /** code. This will be common to both the SPARC and M88K */ /** architectures and eventually to VME and 386. The code in this */ /** module operates upon the fragment and instruction chains */ /** generated by architecture specific modules. The instruction, */ /** fragment and label list handling from mprocs has been relocated*/ /** to this module. In order to maintain the call structure of the*/ /** code generator and the applicabilty of this module to non */ /** Emachine 4 code generators, no imports should be made except to*/ /** the architecture specific code module (m88kprocs, sprocs, */ /** vmeprocs, etc). Routines in mprocs or cprocs should NOT be */ /** called directly (only via a handler in the architecture code */ /** module). (rt) */ /***/ /********************************************************************************/ /***/ #define modulename ("bprocs") /***/ /**************************************************************************/ /** **/ /** DEFINITION FILE INCLUDES **/ /** **/ /**************************************************************************/ /***/ #include "cgtarget.h" /***/ #include "boconsts.h" /***/ #include "archdefs.h" /***/ #include "archsched.h" /***/ /***/ /**************************************************************************/ /** **/ /** GLOBAL VARIABLES AND CONSTANTS **/ /** **/ /**************************************************************************/ /***/ #define MAXITERATIONS 25 #define SENSIBLEITER 100 #define FREEFRAGMARKER (0x42424242) #define FREEINSTRMARKER (0x43434343) #define FREEPREDMARKER (0x45454545) #define EMPTYSPACEMARKER (0x47474747) /***/ /** Should be non-zero to force allocated memory chunks to be filled with */ /** a known pattern when allocated and again when freed - useful for tracking*/ /** down problems caused by non-initialisation of dynamic structures. */ #define FILLMALLOCSPACE 0 /***/ /** Constants for ValidLive flag */ #define NOLIVENESSINFO 0 /* no liveness information created yet */ #define RWLIVENESSINFO 1 /* valid liveness info that may be updated */ #define ROLIVENESSINFO 2 /* vestigal liveness info remains but */ /* fragment commitment has modified topology */ /* so modification of liveness is impossible */ /***/ #define bannerline ("---------------------------------------") /***/ #define ublabrecheads 255 /***/ /** Chains for free and allocated fragments, instructions and predecessor */ /** descriptors. There is a separate chain for every possible active procedure*/ /** level. */ static int fragalloc [20]; /* head of allocated fragment chains */ static int instralloc [20]; /* head of allocated instruction chains */ static int predalloc [20]; /* head of allocated predecessor desc chains */ static int freefrag [20]; /* the free fragment record chains */ static struct instrfmt * freeinstr [20]; /* the free instruction record chains */ static int freepred [20]; /* the free predecessor descriptor chains */ /***/ static int CurArena; /* the current arena being allocated */ static int fragno; /* frag index used during diagnostic printing */ static int changes; /* counts changes made during one iteration */ static int FragReport; /* set if fragment tracing is to be output */ static int GISFragReport; /* set for pre and post GIS fragment tracing */ static int ExtFragReport; /* set if extended fragment tracing to be gen */ static int ExtInstrTrace; /* set if extended fragment instruction tracing */ static int EntryTrace; /* print procedure names and numbers */ static int SchReport; /* set if basic block scheduler tracing reqd */ static int VerSchReport; /* set for verbose basic block scheduler trace */ static int ValidPreds; /* set if fragment predecessor edge lists valid */ static int RegionLabel; /* the current GIS unique region label */ static int Commiting; /* set if fragment commitment is being done */ static int LastCommitFrag; /* last fragment to be committed in chain */ static int CommitRegion; /* the label of the region being committed */ static int CommitProps; /* the properties of the region being committed */ static int ArchVariant; /* architecture variant for backend opt */ static int GISBuffZone; /* buffer zone size for global scheduler */ static int GISFloatOpt; /* float optimisation flag for global scheduler */ static int GISUnsafeOpt; /* unsafe optimisation level for global sched */ static int GISTrace; /* trace vector for global scheduler */ static int GISInhib; /* inhibit vector for global scheduler */ static int GISRegion; /* the GIS region for tracing/inhibition */ static int GISAttempt; /* the GIS schedule attempt for tracing/inhib */ static int GISPass; /* the GIS schedule pass for tracing/inhibition */ static int ValidLive; /* set if the liveness information is valid */ static int BlkTrace; /* set if memory alloc block tracing enabled */ static int MemTrace; /* set if frag and instr alloc tracing enabled */ static int BuddyTrace; /* set if tracing of buddy allocator required */ static int nextlab; /* the next free label number */ static int InhibVect; /* bit vector giving optimisations to inhibit */ static int RangeProc; /* procedure number of inhibition/tracing */ static int RangeFragLow; /* lower end of frag range to apply inhib/trace */ static int RangeFragHigh; /* upper end of frag range to apply inhib/trace */ static int CurInhibVect; /* the current inhibit vector */ static int TraceEnab; /* tracing enable flag */ static int BeenReorg; /* set if fragments have been reorganised */ /***/ static int labrecheads [ublabrecheads+1]; /***/ static struct procfmt *PI; /* info record for the current procedure */ /***/ /***/ /**************************************************************************/ /** **/ /** IMPORTS **/ /** **/ /**************************************************************************/ /***/ #if(Target==M88K) #define ARCHMalloc m88kmalloc extern int ARCHMalloc( int ); #define ARCHFree m88kfree extern void ARCHFree( int ); #define ARCHDisasInstr m88kdisasinstr extern void ARCHDisasInstr( struct instrfmt *,int *); #define ARCHPrintInstr m88kprintinstr extern void ARCHPrintInstr( struct instrfmt *); #define ARCHMemoryOverlap m88kmemoryoverlap extern int ARCHMemoryOverlap( struct instrfmt *,struct instrfmt *,int ); #define ARCHFillDelaySlot m88kfilldelayslot extern void ARCHFillDelaySlot( struct fragfmt *,struct instrfmt *); #define ARCHPrintArchInfo m88kprintarchinfo extern void ARCHPrintArchInfo( int ); #define ARCHDeleteSwitch m88kdeleteswitch extern void ARCHDeleteSwitch( struct fragfmt *); #define ARCHImproveSwitch m88kimproveswitch extern int ARCHImproveSwitch( struct fragfmt *); #define ARCHImproveReturn m88kimprovereturn extern int ARCHImproveReturn( struct fragfmt *); #define ARCHPrintSwitch m88kprintswitch extern void ARCHPrintSwitch( struct fragfmt *); #define ARCHPrintRegisters m88kprintregisters extern void ARCHPrintRegisters( int ,int ,int ); #define ARCHPrintEntryName m88kprintentryname extern void ARCHPrintEntryName( int ); #define ARCHCreateNOP m88kcreatenop extern int ARCHCreateNOP( void ); #define ARCHCreateUncond m88kcreateuncond extern int ARCHCreateUncond( int ); #define ARCHAskForLabelNum m88kaskforlabelnum extern int ARCHAskForLabelNum( void ); #define ARCHFlushOutput m88kflushoutput extern void ARCHFlushOutput( int ); #define ARCHInvertBranch m88kinvertbranch extern int ARCHInvertBranch( struct instrfmt *,int ,int ); #define ARCHSetLabelRefs m88ksetlabelrefs extern void ARCHSetLabelRefs( struct fragfmt *); #define ARCHConditionMatch m88kconditionmatch extern int ARCHConditionMatch( int ,int ); #define ARCHIsAddrLabel m88kisaddrlabel extern int ARCHIsAddrLabel( int ); #define ARCHHasAnnullableSlots m88khasannullableslots extern int ARCHHasAnnullableSlots( void ); ; #endif; /***/ #if(Target==SPARC) #define ARCHMalloc sparcmalloc extern int ARCHMalloc( int ); #define ARCHFree sparcfree extern void ARCHFree( int ); #define ARCHDisasInstr sparcdisasinstr extern void ARCHDisasInstr( struct instrfmt *,int *); #define ARCHPrintInstr sparcprintinstr extern void ARCHPrintInstr( struct instrfmt *); #define ARCHMemoryOverlap sparcmemoryoverlap extern int ARCHMemoryOverlap( struct instrfmt *,struct instrfmt *,int ); #define ARCHFillDelaySlot sparcfilldelayslot extern void ARCHFillDelaySlot( struct fragfmt *,struct instrfmt *); #define ARCHPrintArchInfo sparcprintarchinfo extern void ARCHPrintArchInfo( int ); #define ARCHDeleteSwitch sparcdeleteswitch extern void ARCHDeleteSwitch( struct fragfmt *); #define ARCHImproveSwitch sparcimproveswitch extern int ARCHImproveSwitch( struct fragfmt *); #define ARCHImproveReturn sparcimprovereturn extern int ARCHImproveReturn( struct fragfmt *); #define ARCHPrintSwitch sparcprintswitch extern void ARCHPrintSwitch( struct fragfmt *); #define ARCHPrintRegisters sparcprintregisters extern void ARCHPrintRegisters( int ,int ,int ,int ); #define ARCHPrintEntryName sparcprintentryname extern void ARCHPrintEntryName( int ); #define ARCHCreateNOP sparccreatenop extern int ARCHCreateNOP( void ); #define ARCHCreateUncond sparccreateuncond extern int ARCHCreateUncond( int ); #define ARCHAskForLabelNum sparcaskforlabelnum extern int ARCHAskForLabelNum( void ); #define ARCHFlushOutput sparcflushoutput extern void ARCHFlushOutput( int ); #define ARCHInvertBranch sparcinvertbranch extern int ARCHInvertBranch( struct instrfmt *,int ,int ); #define ARCHSetLabelRefs sparcsetlabelrefs extern void ARCHSetLabelRefs( struct fragfmt *); #define ARCHConditionMatch sparcconditionmatch extern int ARCHConditionMatch( int ,int ); #define ARCHIsAddrLabel sparcisaddrlabel extern int ARCHIsAddrLabel( int ); #define ARCHHasAnnullableSlots sparchasannullableslots extern int ARCHHasAnnullableSlots( void ); #endif; /***/ #if(Target==RS6) #define ARCHMalloc rs6malloc extern int ARCHMalloc( int ); #define ARCHFree rs6free extern void ARCHFree( int ); #define ARCHDisasInstr rs6disasinstr extern void ARCHDisasInstr( struct instrfmt *,int *); #define ARCHPrintInstr rs6printinstr extern void ARCHPrintInstr( struct instrfmt *); #define ARCHMemoryOverlap rs6memoryoverlap extern int ARCHMemoryOverlap( struct instrfmt *,struct instrfmt *,int ); #define ARCHFillDelaySlot rs6filldelayslot extern void ARCHFillDelaySlot( struct fragfmt *,struct instrfmt *); #define ARCHPrintArchInfo rs6printarchinfo extern void ARCHPrintArchInfo( int ); #define ARCHDeleteSwitch rs6deleteswitch extern void ARCHDeleteSwitch( struct fragfmt *); #define ARCHImproveSwitch rs6improveswitch extern int ARCHImproveSwitch( struct fragfmt *); #define ARCHImproveReturn rs6improvereturn extern int ARCHImproveReturn( struct fragfmt *); #define ARCHPrintSwitch rs6printswitch extern void ARCHPrintSwitch( struct fragfmt *); #define ARCHPrintRegisters rs6printregisters extern void ARCHPrintRegisters( int ,int ,int ); #define ARCHPrintEntryName rs6printentryname extern void ARCHPrintEntryName( int ); #define ARCHCreateNOP rs6createnop extern int ARCHCreateNOP( void ); #define ARCHCreateUncond rs6createuncond extern int ARCHCreateUncond( int ); #define ARCHAskForLabelNum rs6askforlabelnum extern int ARCHAskForLabelNum( void ); #define ARCHFlushOutput rs6flushoutput extern void ARCHFlushOutput( int ); #define ARCHInvertBranch rs6invertbranch extern int ARCHInvertBranch( struct instrfmt *,int ,int ); #define ARCHSetLabelRefs rs6setlabelrefs extern void ARCHSetLabelRefs( struct fragfmt *); #define ARCHConditionMatch rs6conditionmatch extern int ARCHConditionMatch( int ,int ); #define ARCHIsAddrLabel rs6isaddrlabel extern int ARCHIsAddrLabel( int ); #define ARCHHasAnnullableSlots rs6hasannullableslots extern int ARCHHasAnnullableSlots( void ); ; #endif; /***/ #if(Target==MIPS) #define ARCHMalloc mipsmalloc extern int ARCHMalloc( int ); #define ARCHFree mipsfree extern void ARCHFree( int ); #define ARCHDisasInstr mipsdisasinstr extern void ARCHDisasInstr( struct instrfmt *,int *); #define ARCHPrintInstr mipsprintinstr extern void ARCHPrintInstr( struct instrfmt *); #define ARCHMemoryOverlap mipsmemoryoverlap extern int ARCHMemoryOverlap( struct instrfmt *,struct instrfmt *,int ); #define ARCHFillDelaySlot mipsfilldelayslot extern void ARCHFillDelaySlot( struct fragfmt *,struct instrfmt *); #define ARCHDeleteSwitch mipsdeleteswitch extern void ARCHDeleteSwitch( struct fragfmt *); #define ARCHImproveSwitch mipsimproveswitch extern int ARCHImproveSwitch( struct fragfmt *); #define ARCHImproveReturn mipsimprovereturn extern int ARCHImproveReturn( struct fragfmt *); #define ARCHPrintSwitch mipsprintswitch extern void ARCHPrintSwitch( struct fragfmt *); #define ARCHPrintRegisters mipsprintregisters extern void ARCHPrintRegisters( int ,int ,int ); #define ARCHPrintEntryName mipsprintentryname extern void ARCHPrintEntryName( int ); #define ARCHCreateNOP mipscreatenop extern int ARCHCreateNOP( void ); #define ARCHCreateUncond mipscreateuncond extern int ARCHCreateUncond( int ); #define ARCHAskForLabelNum mipsaskforlabelnum extern int ARCHAskForLabelNum( void ); #define ARCHFlushOutput mipsflushoutput extern void ARCHFlushOutput( int ); #define ARCHInvertBranch mipsinvertbranch extern int ARCHInvertBranch( struct instrfmt *,int ,int ); #define ARCHSetLabelRefs mipssetlabelrefs extern void ARCHSetLabelRefs( struct fragfmt *); #define ARCHConditionMatch mipsconditionmatch extern int ARCHConditionMatch( int ,int ); #define ARCHIsAddrLabel mipsisaddrlabel extern int ARCHIsAddrLabel( int ); #define ARCHHasAnnullableSlots mipshasannullableslots extern int ARCHHasAnnullableSlots( void ); ; #endif; /***/ #if(Target==VMEPLI) #define ARCHMalloc vmemalloc extern int ARCHMalloc( int ); #define ARCHFree vmefree extern void ARCHFree( int ); #define ARCHDisasInstr vmedisasinstr extern void ARCHDisasInstr( struct instrfmt *,int *); #define ARCHPrintInstr vmeprintinstr extern void ARCHPrintInstr( struct instrfmt *); #define ARCHMemoryOverlap vmememoryoverlap extern int ARCHMemoryOverlap( struct instrfmt *,struct instrfmt *,int ); #define ARCHFillDelaySlot vmefilldelayslot extern void ARCHFillDelaySlot( struct fragfmt *,struct instrfmt *); #define ARCHPrintArchInfo vmeprintarchinfo extern void ARCHPrintArchInfo( int ); #define ARCHDeleteSwitch vmedeleteswitch extern void ARCHDeleteSwitch( struct fragfmt *); #define ARCHImproveSwitch vmeimproveswitch extern int ARCHImproveSwitch( struct fragfmt *); #define ARCHImproveReturn vmeimprovereturn extern int ARCHImproveReturn( struct fragfmt *); #define ARCHPrintSwitch vmeprintswitch extern void ARCHPrintSwitch( struct fragfmt *); #define ARCHPrintRegisters vmeprintregisters extern void ARCHPrintRegisters( int ,int ,int ); #define ARCHPrintEntryName vmeprintentryname extern void ARCHPrintEntryName( int ); #define ARCHCreateNOP vmecreatenop extern int ARCHCreateNOP( void ); #define ARCHCreateUncond vmecreateuncond extern int ARCHCreateUncond( int ); #define ARCHAskForLabelNum vmeaskforlabelnum extern int ARCHAskForLabelNum( void ); #define ARCHFlushOutput vmeflushoutput extern void ARCHFlushOutput( int ); #define ARCHInvertBranch vmeinvertbranch extern int ARCHInvertBranch( struct instrfmt *,int ,int ); #define ARCHSetLabelRefs vmesetlabelrefs extern void ARCHSetLabelRefs( struct fragfmt *); #define ARCHConditionMatch vmeconditionmatch extern int ARCHConditionMatch( int ,int ); #define ARCHIsAddrLabel vmeisaddrlabel extern int ARCHIsAddrLabel( int ); #define ARCHHasAnnullableSlots vmehasannullableslots extern int ARCHHasAnnullableSlots( void ); ; #endif; /***/ #if(Target==PENTIUM) #define ARCHMalloc p5malloc extern int ARCHMalloc( int ); #define ARCHFree p5free extern void ARCHFree( int ); #define ARCHDisasInstr p5disasinstr extern void ARCHDisasInstr( struct instrfmt *,int *); #define ARCHPrintInstr p5printinstr extern void ARCHPrintInstr( struct instrfmt *); #define ARCHMemoryOverlap p5memoryoverlap extern int ARCHMemoryOverlap( struct instrfmt *,struct instrfmt *,int ); #define ARCHFillDelaySlot p5filldelayslot extern void ARCHFillDelaySlot( struct fragfmt *,struct instrfmt *); #define ARCHPrintArchInfo p5printarchinfo extern void ARCHPrintArchInfo( int ); #define ARCHDeleteSwitch p5deleteswitch extern void ARCHDeleteSwitch( struct fragfmt *); #define ARCHImproveSwitch p5improveswitch extern int ARCHImproveSwitch( struct fragfmt *); #define ARCHImproveReturn p5improvereturn extern int ARCHImproveReturn( struct fragfmt *); #define ARCHPrintSwitch p5printswitch extern void ARCHPrintSwitch( struct fragfmt *); #define ARCHPrintRegisters p5printregisters extern void ARCHPrintRegisters( int ,int ,int ,int ); #define ARCHPrintEntryName p5printentryname extern void ARCHPrintEntryName( int ); #define ARCHCreateNOP p5createnop extern int ARCHCreateNOP( void ); #define ARCHCreateUncond p5createuncond extern int ARCHCreateUncond( int ); #define ARCHAskForLabelNum p5askforlabelnum extern int ARCHAskForLabelNum( void ); #define ARCHFlushOutput p5flushoutput extern void ARCHFlushOutput( int ); #define ARCHInvertBranch p5invertbranch extern int ARCHInvertBranch( struct instrfmt *,int ,int ); #define ARCHSetLabelRefs p5setlabelrefs extern void ARCHSetLabelRefs( struct fragfmt *); #define ARCHConditionMatch p5conditionmatch extern int ARCHConditionMatch( int ,int ); #define ARCHIsAddrLabel p5isaddrlabel extern int ARCHIsAddrLabel( int ); #define ARCHHasAnnullableSlots p5hasannullableslots extern int ARCHHasAnnullableSlots( void ); #endif; /***/ #if(BUILDWITHGS!=0) extern void GlobalSchedule( int ,int ,int ,int ,int ); ; #endif; /***/ /***/ /***/ /**************************************************************************/ /** **/ /** LOCAL SPECIFICATIONS **/ /** **/ /**************************************************************************/ /***/ static void SetVectors( int ); static void DoReschedFrag( int ); static void FreeSchDescriptors( void ); static void ClaimSchDescriptors( int ,int ); static void WalkFrags( void ()); static void WalkFinalFrags( void ()); void BPrintFrags( char * ); static void PrintFragInfo( struct fragfmt *); static void InitFrag( struct fragfmt *); static void CountFrag( struct fragfmt *); static void InitLiveRegAnalysis( struct fragfmt *); static void DoLiveRegAnalysis( struct fragfmt *); static void PrintFrag( struct fragfmt *); static void ReorgFrag( struct fragfmt *); static void GenPredecessors( struct fragfmt *); static void SchedProc( int ); static void PerformIfThenOpt( struct fragfmt *); static void PerformIfThenElseOpt( struct fragfmt *,int ); static void PerformUncondBranchOpt( struct fragfmt *,int ); static void PerformBranchOverOpt( struct fragfmt *,int ); void BBreakFrag( int ); struct instrfmt *BMachineInstr( struct instrfmt * ,int ); void BMoveBefore( struct fragfmt * ,struct fragfmt * ,struct instrfmt * ,struct instrfmt * ); void BMoveAfter( struct fragfmt * ,struct fragfmt * ,struct instrfmt * ,struct instrfmt * ); struct fragfmt*BPrevFrag( struct fragfmt *); static void MakeFragSkeletal( struct fragfmt *); void BUpdateLiveRegs( struct fragfmt *); void BLiveSetAfterInstr( struct fragfmt *,struct instrfmt *,int *,int *,int *,int *); static void AddPredFrag( int ,int ,int ,struct fragfmt *,struct fragfmt *); /***/ /***/ /********************************************************************************/ /***/ /** ERROR HANDLING **/ /********************************************************************************/ /***/ static void BError(char * ErrStr) { /****************************************************************/ /** Terminates the backend optimiser with an internal error **/ /****************************************************************/ ARCHFlushOutput(0); #if(Target!=VMEPLI) /* ****call on untranslateable imp support routine**** */; #endif; printf("\n***** GENERIC CODE OPTIMISER INTERNAL ERROR -\n %s\n\n",ErrStr); imp_stop(); } /*B Error*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ /********************************************************************************/ /** STORAGE MANAGEMENT **/ /********************************************************************************/ /***/ /** This section implements an exponential buddy allocator to give an interface*/ /** between the memory requests made by the backend optimiser and the dynamic*/ /** memory system on the targetted architecture. This reduces the number of */ /** calls made to this system and thus improves compile time performance. */ /***/ /** An exponential buddy allocator makes best use of the available storage */ /** when given requests for memory areas with sizes just less than or exactly*/ /** powers of two. To this end, all calls of the allocator from the backend */ /** should make their requests as close to powers of two in size as */ /** possible. */ /***/ #define dummy1forcb 0 /***/ /** The buddy allocator grabs its memory from the system in chunks of */ /** BUDDY CHUNK SIZE. Each chunk contains two pyramidal bit vectors called */ /** free and alloc. When a particular bit is set in the free vector then the */ /** corresponding block may be returned in response to an allocation request.*/ /** When a particular bit is set in the alloc bit vector then the corresponding*/ /** block is currently allocated to the backend optimiser. The minimum */ /** block size is determined by BUDDY MIN POWER. */ /***/ /** The pyramidal bit vectors are composed of a number of levels bwteen */ /** BUDDY MIN POWER and BUDDY MAX POWER inclusive. Each ascendant bit */ /** vector is half the size of the previous one. The structure of the */ /** pyramid elucidates the buddy relationship. Each bit at level n */ /** corresponds to two buddy bits at level n-1. At the top level, */ /** BUDDY MAX POWER, there is only a single bit which represents a block */ /** of size BUDDY MAX ALLOC. At the level below that there are two bits */ /** corresponding to two blocks of size BUDDY MAX ALLOC/2 and so on until */ /** we reach level BUDDY MIN POWER with blocks of size 2BUDDY MIN POWER. */ /***/ #define dummy2forcb 0 /** Requests for areas of size greater than BUDDY MAX ALLOC are passed */ /** directly to the system malloc and an individual chunk for the area */ /** is created. */ /***/ /** An overview of the buddy algorithm is as follows: */ /***/ /** To allocate a block: */ /** (1) The smallest block of storage that is at least as big as the request*/ /** is selected as the candidate block. */ /** (2) The candidate is checked for size and, if large enough, is split into*/ /** two smaller blocks (buddies); otherwise the candidate block is */ /** returned as the block satisfying the request. */ /** (3) One of the buddies (the one with the lower address) is selected */ /** as the new candidate and the other is inserted in the free storage */ /** pool. The algorithm proceeds to step (2). */ /***/ /** To free a block: */ /** (1) The buddy of the newly freed block is located. */ /** (2) The buddy is inspected to see that it is whole (not split into */ /** subbuddies) and free. If both conditions are met, then the buddy */ /** is removed from free storage and merged with the newly freed block */ /** to create a larger block. This larger block is then taken to be the */ /** newly freed block and the execution proceeds with (1). */ /** (3) If it is impossible to merge, then the newly freed block is returned*/ /** to the free storage area. */ /***/ /** The storage allocator also incorporates an arena system. An arena groups */ /** a number of allocated memory areas into a single entity so that they may */ /** be freed by a single call. After a call to enter an arena is made, all */ /** subsequent allocation requests are remembered and may all be freed by */ /** an atomic call to leave the arena. Any level of arena nesting is allowed.*/ /** This mechanism allows related information to be stored in a single arena */ /** so that individual lists of allocated areas do not have to be kept in order*/ /** to free them after their lifetime ends. Allocation requests may also be made*/ /** with a flag meaning that they are exempt from the arena system. Space */ /** for these requests is allocated in the root arena. */ /***/ /***/ /** Number of bytes of red tape for a non-buddy memory chunk */ #define NORMALCHUNKSTART 16 /***/ /** Number of bytes of red tape for a buddy memory chunk */ #define BUDDYCHUNKSTART 304 /***/ /** Maximum size for a single request that may be handled by the buddy system*/ #define BUDDYMAXALLOC (131072) /***/ /** Power of 2 for the smallest block allocatable by the buddy system */ #define BUDDYMINPOWER 8 /***/ /** Power of 2 for the largest block allocatable by the buddy system */ #define BUDDYMAXPOWER 17 /***/ /** Number of words - 1 in each pyramidal bit vector */ #define BUDDYARRAYTOP 35 /***/ /** Total space grabbed for a buddy memory chunk */ #define BUDDYCHUNKSIZE (BUDDYCHUNKSTART+BUDDYMAXALLOC) /***/ /** The space required for an arena information descriptor */ #define ARENAINFOSIZE 16 /***/ /** The word offsets in the pyramidal bit vector corresponding to each level */ static const int LevelStarts [BUDDYMAXPOWER-(BUDDYMINPOWER)+1] = { 0, 16,24,28,30,31,32,33, 34,35}; /***/ /** The number of words of bit vector in each level */ static const int LevelLengths [BUDDYMAXPOWER-(BUDDYMINPOWER)+1] = { 16, 8,4,2,1,1,1,1, 1,1}; /***/ /** Format for arena information record - there is one of these for each */ /** active arena. The root arena is always active. */ struct arenainfo{ int headchunk; /* head of the chunks in the arena */ int tailchunk; /* tail of the chunks in the arena */ int childarena; /* any child arena information - 0 for */ /* the current arena */ int parentarena /* any parent arena information - o for */; }; /* the root arena */ /***/ /** Format for the red tape area of each memory chunk. The free and */ /** alloc arrays are only present in chunks containing buddy blocks. */ struct memchunk{ int prevchunk; /* previous memory chunk */ int nextchunk; /* next memory chunk */ int size; /* size of chunk in bytes */ int unused; /* unused chunk word */ int free [BUDDYARRAYTOP+1]; /* pyramid vector for free */ int alloc [BUDDYARRAYTOP+1]; }; /* pyramid vector for alloc */ /***/ /***/ static void BuddyStateTrace() { /*******************************************************************************/ /** Generates tracing information giving the state of each memory chunk and **/ /** the pyramidal bit vectors in each buddy chunk. **/ /*******************************************************************************/ struct arenainfo *arena; struct memchunk *chunk; int nextarena,nextchunk,start,temp,i,j,k; printf("\nBUDDY DYNAMIC MEMORY STATE:\n\n"); /* go through all the memory chunks and print info about each one */ nextarena=CurArena; while (nextarena!=0) { arena=(struct arenainfo*)(nextarena); printf("ARENA=%8x\n\n",nextarena); nextchunk=arena->headchunk; while (nextchunk!=0) { /* print the address and size of the memory chunk */ chunk=(struct memchunk*)(nextchunk); printf(" CHUNK: Address=%8x Size=%d\n",nextchunk,chunk->size); /* if the chunk is in the buddy system then print the state */ /* of its free and allocated blocks */ if (chunk->size==BUDDYMAXALLOC) { for (i=BUDDYMINPOWER; i<=BUDDYMAXPOWER; i++) { /* print the state of the free blocks at the current level */ printf(" F%2d:",i); start=LevelStarts [i-(BUDDYMINPOWER)]; for (j=start; j<=(start+LevelLengths [i-(BUDDYMINPOWER)])-1; j++) { /* reverse the state word to increase the readibility */ /* of the tracing */ temp=0; for (k=0; k<=31; k++) { if ((chunk->free [j]&(1<alloc [j]&(1<nextchunk; printf("\n"); } printf("\n"); nextarena=arena->parentarena; } } /*BuddyStateTrace*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static int ReserveSpace(struct memchunk *chunk,int level) { /*******************************************************************************/ /** Reserves space in the given memory chunk. Level indicates the minimum **/ /** level in the pyramid from which a block of sufficient size may be **/ /** allocated. This block is of size 2level. Returns the address of the **/ /** block or zero if the chunk didn't contain enough free space. **/ /*******************************************************************************/ int start,vector,mask,startpos,index,i,j,k; /* go through all the ascending block sizes looking for a free block */ for (i=level; i<=BUDDYMAXPOWER; i++) { /* go through all the free block bits associated with the current level */ /* looking for an allocatable block */ start=LevelStarts [i-(BUDDYMINPOWER)]; for (j=start; j<=(start+LevelLengths [i-(BUDDYMINPOWER)])-1; j++) { if ((chunk->free [j]&(~chunk->alloc [j]))!=(0)) { /* the current word contains a free block - determine if */ /* it is in the first or second half of the word */ vector=chunk->free [j]&(~chunk->alloc [j]); if ((vector&0xFFFF)==0) { startpos=((j-start)<<5)+16; mask=1<<16; } else { startpos=(j-start)<<5; mask=1; } /* get the actual position of the free block - in terms of */ /* a byte offset from the start of the memory chunk */ while ((vector&mask)==0) { startpos++; mask=mask<<1; } startpos=startpos<=level+1; k--) { index=((unsigned)startpos>>(k+5))+LevelStarts [k-(BUDDYMINPOWER)]; mask=1<<(((unsigned)startpos>>k)&31); chunk->free [index]=chunk->free [index]&(~mask); index=((unsigned)startpos>>(k+4))+LevelStarts [k-1-(BUDDYMINPOWER)]; mask=3<<(((unsigned)startpos>>(k-1))&30); chunk->free [index]=chunk->free [index]|mask; } /* mark the block being returned as allocated */ index=((unsigned)startpos>>(level+5))+LevelStarts [level-(BUDDYMINPOWER)]; mask=1<<(((unsigned)startpos>>level)&31); chunk->free [index]=chunk->free [index]&(~mask); chunk->alloc [index]=chunk->alloc [index]|mask; /* return the address of the newly allocated block */ return ((int)chunk+BUDDYCHUNKSTART)+startpos; } } } /* if the chunk was full then return a NULL */ return 0; } /*ReserveSpace*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BMalloc(int size,int exempt,int areaid) { /*******************************************************************************/ /** Grabs an area of memory size bytes long and returns its start address. **/ /** Areaid is an identifier for the area for which space is being claimed and **/ /** it is used as a diagnostic. Exempt is non-zero if the area should be **/ /** allocated in the root arena, otherwise the area is allocated in current **/ /** arena and is freed by the next LeaveArena call. Uses a buddy allocator to **/ /** obtain space from a pool of larger chunks allocated from the system **/ /** malloc. All returned areas are 8-byte aligned. **/ /*******************************************************************************/ struct arenainfo *arena; struct memchunk *curchunk,*newchunk; char area [21] ; int nextchunk,startlevel,mask,spacead,origsize,ad,unknown,i; int nextarena; /* find the name of the area if necessary */ if ((TraceEnab!=0)&&(BlkTrace!=0)) { unknown=1; if ((areaid>=0)&&(areaid<=MAMAXNAMEDAREA)) { strcpy(area,MemoryAreaNames [areaid]); unknown=0; } } /* get the arena in which the area should be allocated */ if (exempt==0) { arena=(struct arenainfo*)(CurArena); } else { nextarena=CurArena; do { arena=(struct arenainfo*)(nextarena); nextarena=arena->parentarena; } while (!(nextarena==0)) ; } /* handle the area allocation */ origsize=size; if (size>BUDDYMAXALLOC) { /* if we are requesting a memory area of greater size then that */ /* which can be handled by the buddy system then just malloc space */ /* for the block directly */ ad=ARCHMalloc(size+NORMALCHUNKSTART); if (ad==0) BError("No space for memory chunk"); /* print tracing information for the new memory chunk */ if ((TraceEnab!=0)&&(BlkTrace!=0)) { printf("\n\n",ad); } /* fill the newly allocated block with the empty space marker */ /* if necessary */ #if(FILLMALLOCSPACE!=0) for (i=0; i<=((size+NORMALCHUNKSTART)-4); i+=4) { (*(int *)(ad+i))=EMPTYSPACEMARKER; }; #endif; /* add the newly allocated chunk to the tail of the memory chunk list */ newchunk=(struct memchunk*)(ad); if (arena->tailchunk==0) { arena->headchunk=ad; arena->tailchunk=ad; newchunk->prevchunk=0; newchunk->nextchunk=0; } else { curchunk=(struct memchunk*)(arena->tailchunk); curchunk->nextchunk=ad; newchunk->prevchunk=arena->tailchunk; newchunk->nextchunk=0; arena->tailchunk=ad; } newchunk->size=size; spacead=ad+NORMALCHUNKSTART; } else { /* allocate some space even if an area of size 0 is requested */ if (size==0) size=1; /* the actual size obtained must be a must be a multiple of the */ /* minimum size handled by the buddy system */ size=((size+(1<>1; } /* go through all the existing chunks looking for one with */ /* enough free space to accomodate the allocation */ nextchunk=arena->headchunk; while (nextchunk!=0) { curchunk=(struct memchunk*)(nextchunk); if (curchunk->size==BUDDYMAXALLOC) { spacead=ReserveSpace(curchunk,startlevel); if (spacead!=0) break ; } nextchunk=curchunk->nextchunk; } /* if all the blocks are so full that the allocation cannot be */ /* made then malloc a new memory chunk */ if (nextchunk==0) { ad=ARCHMalloc(BUDDYCHUNKSIZE); if (ad==0) BError("No space for buddy memory chunk"); /* print tracing information for the new memory chunk */ if ((TraceEnab!=0)&&(BlkTrace!=0)) { printf("\n",ad); } /* fill the newly allocated chunk with the empty space marker */ /* if necessary */ #if(FILLMALLOCSPACE!=0) for (i=0; i<=BUDDYCHUNKSIZE-4; i+=4) { (*(int *)(ad+i))=EMPTYSPACEMARKER; }; #endif; /* add the new chunk to the head of the arena chunk chain */ newchunk=(struct memchunk*)(ad); if (arena->headchunk==0) { arena->headchunk=ad; arena->tailchunk=ad; newchunk->prevchunk=0; newchunk->nextchunk=0; } else { curchunk=(struct memchunk*)(arena->headchunk); curchunk->prevchunk=ad; newchunk->prevchunk=0; newchunk->nextchunk=arena->headchunk; arena->headchunk=ad; } /* clear the free and allocated state vectors in the new chunk */ for (i=0; i<=BUDDYARRAYTOP; i++) { newchunk->free [i]=0; newchunk->alloc [i]=0; } newchunk->size=BUDDYMAXALLOC; /* mark the new chunk as containing a single free block of the */ /* maximal size and then allocate the required space from it */ newchunk->free [BUDDYARRAYTOP]=1; spacead=ReserveSpace(newchunk,startlevel); } } /* generate tracing if it is enabled */ if ((TraceEnab!=0)&&(BlkTrace!=0)) { printf(": %8x",origsize); if (exempt!=0) { if (unknown!=0) { printf(" arena exempt bytes for %d = ",areaid); } else { printf(" arena exempt bytes for %s = ",area); } } else { if (unknown!=0) { printf(" arena bytes for area %d = ",areaid); printf(" arena bytes for %s = ",area); } } printf("%8x\n",spacead); if (BuddyTrace!=0) BuddyStateTrace(); } /* return the address of the allocated space */ return spacead; } /*B Malloc*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static int FreeSpace(struct memchunk *chunk,int offset) { /*******************************************************************************/ /** Frees the space with the given byte offset in the given chunk. Returns **/ /** a non-zero result if this offset does not correspond to allocated space. **/ /*******************************************************************************/ int level,index,mask,i,j; if (offset==0) { /* if the block starts at the base of the chunk then it may be */ /* of any size */ level=BUDDYMAXPOWER; } else { /* determine the maximum size of the block from its offset from the */ /* start of the chunk */ level=BUDDYMINPOWER; mask=1<=BUDDYMINPOWER; i--) { index=((unsigned)offset>>(i+5))+LevelStarts [i-(BUDDYMINPOWER)]; mask=1<<(((unsigned)offset>>i)&31); if ((chunk->alloc [index]&mask)!=0) { /* we have found the block so now mark it as free again */ chunk->alloc [index]=chunk->alloc [index]&(~mask); chunk->free [index]=chunk->free [index]|mask; /* go through all the ascending levels checking to see */ /* if the buddy of the newly freed block is also free and, */ /* if so, coalescing them to give a larger free block */ for (j=i; j<=BUDDYMAXPOWER-1; j++) { index=((unsigned)offset>>(j+5))+LevelStarts [j-(BUDDYMINPOWER)]; mask=3<<(((unsigned)offset>>j)&30); if ((chunk->free [index]&mask)!=mask) return 0; chunk->free [index]=chunk->free [index]&(~mask); index=((unsigned)offset>>(j+6))+LevelStarts [j+1-(BUDDYMINPOWER)]; mask=1<<(((unsigned)offset>>(j+1))&31); chunk->free [index]=chunk->free [index]|mask; } return 0; } } /* if we didn't find the allocated block then the it can't have */ /* been allocated */ return 1; } /*FreeSpace*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BFree(int ad) { /*******************************************************************************/ /** Frees an area of memory previously allocated by B Malloc. Ad is the start **/ /** address of the area. **/ /*******************************************************************************/ struct arenainfo *arena; struct memchunk *curchunk,*tempchunk; int nextchunk,offset,error,freearea,nextarena,areafound; /* look through the list of memory chunks to determine which */ /* one the memory area belongs to */ areafound=0; freearea=0; nextarena=CurArena; while (nextarena!=0) { arena=(struct arenainfo*)(nextarena); nextchunk=arena->headchunk; while (nextchunk!=0) { curchunk=(struct memchunk*)(nextchunk); if (curchunk->size==BUDDYMAXALLOC) { if (((nextchunk+BUDDYCHUNKSTART)<=ad)&&(ad<((nextchunk+BUDDYCHUNKSTART)+BUDDYMAXALLOC))) { /* the area is inside the current buddy chunk so free it */ offset=(ad-nextchunk)-BUDDYCHUNKSTART; if ((offset&((1<free [BUDDYARRAYTOP]&1)!=0) { freearea=1; } areafound=1; break ; } } else if ((nextchunk+NORMALCHUNKSTART)==ad) { /* the area corresponds to a directly malloced block so just */ /* free the entire block */ areafound=1; freearea=1; break ; }; nextchunk=curchunk->nextchunk; } if (areafound!=0) break ; nextarena=arena->parentarena; } /* if the area wasn't found then it can't have been actually allocated */ if (areafound==0) BError("B Free: Area has not been allocated"); /* if the chunk is no longer used then it is freed */ if (freearea!=0) { /* the chunk is removed from the chunk chain */ if (curchunk->prevchunk!=0) { tempchunk=(struct memchunk*)(curchunk->prevchunk); tempchunk->nextchunk=curchunk->nextchunk; } else { arena->headchunk=curchunk->nextchunk; } if (curchunk->nextchunk!=0) { tempchunk=(struct memchunk*)(curchunk->nextchunk); tempchunk->prevchunk=curchunk->prevchunk; } else { arena->tailchunk=curchunk->prevchunk; } /* fill the freed space with a known pattern if required */ #if(FILLMALLOCSPACE!=0) int i,size; size=curchunk->size; if (size==BUDDYMAXALLOC) { size+=BUDDYCHUNKSTART; } else { size+=NORMALCHUNKSTART; } for (i=0; i<=size-4; i+=4) { (*(int *)(nextchunk+i))=EMPTYSPACEMARKER; }; } #endif; /* actually free the area */ ARCHFree(nextchunk); /* print tracing if required */ if ((TraceEnab!=0)&&(BlkTrace!=0)) { printf("\n",nextchunk); } } /* generate tracing if enabled */ if ((TraceEnab!=0)&&(BlkTrace!=0)) { printf(": %8x\n",ad); if (BuddyTrace!=0) BuddyStateTrace(); } } /*B Free*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BEnterArena() { /*******************************************************************************/ /** Enters a new arena for subsequent B Malloc calls. Such calls which are **/ /** not marked as exempt from the arena system will allocated space within **/ /** the current arena. When a call is made to B LeaveArena all the areas **/ /** allocated within the arena will be freed. Arenas may be nested to any **/ /** depth but there is a significant space penalty for each call. **/ /*******************************************************************************/ struct arenainfo *arena; int ad; /* grab some space for the arena information */ ad=ARCHMalloc(ARENAINFOSIZE); if (ad==0) BError("No space for new arena information"); /* set up the arena information */ arena=(struct arenainfo*)(ad); arena->headchunk=0; arena->tailchunk=0; arena->childarena=0; arena->parentarena=CurArena; CurArena=ad; /* generate tracing if enabled */ if ((TraceEnab!=0)&&(BlkTrace!=0)) { printf(": %8x\n",ad); if (BuddyTrace!=0) BuddyStateTrace(); } } /*B EnterArena*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BLeaveArena() { /*******************************************************************************/ /** Leaves the current arena. A previous call to B EnterArena must have been **/ /** made. All memory areas allocated within the arena are freed. **/ /*******************************************************************************/ struct arenainfo *arena; struct memchunk *chunk; int nextchunk; /* check we are actually in the context of an arena */ arena=(struct arenainfo*)(CurArena); if (arena->parentarena==0) { BError("B LeaveArena: Cannot leave root arena"); } /* go through all the chunks in the arena and free them all */ nextchunk=arena->headchunk; while (nextchunk!=0) { chunk=(struct memchunk*)(nextchunk); nextchunk=chunk->nextchunk; #if(FILLMALLOCSPACE!=0) { int i,size; size=chunk->size; if (size==BUDDYMAXALLOC) { size+=BUDDYCHUNKSTART; } else { size+=NORMALCHUNKSTART; } for (i=0; i<=size-4; i+=4) { (*(int *)((int)chunk+i))=EMPTYSPACEMARKER; } } #endif ARCHFree((int)chunk); if ((TraceEnab!=0)&&(BlkTrace!=0)) { printf("\n",(int)chunk); } } /* free the arena information */ CurArena=arena->parentarena; ARCHFree((int)arena); /* generate tracing if enabled */ if ((TraceEnab!=0)&&(BlkTrace!=0)) { printf(": %8x\n",(int)arena); if (BuddyTrace!=0) BuddyStateTrace(); } } /*B LeaveArena*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ /********************************************************************************/ /** LABEL RECORD MANAGEMENT **/ /********************************************************************************/ /***/ struct lrecfmt* BLabRecAd(int index) { /****************************************************************/ /** Provide the address of the label record with given index **/ /****************************************************************/ int I,J,K; I=(unsigned)index>>LRECBUCKETSHIFT; if (I>ublabrecheads) BError("labrecheads table overflow"); J=labrecheads [I]; if (J==0) { J=BMalloc(LRECBUCKETSIZE*LRECSIZE,1,MALABADADAREA); if (J==0) BError("No space for label records"); labrecheads [I]=J; for (K=0; K<=((LRECBUCKETSIZE*LRECSIZE)-4); K+=4) { (*(int *)(J+K))=0; } } return (struct lrecfmt*)(J+((index&(LRECBUCKETSIZE-1))<labnum; if (J!=0) { if (J==labnum) { if (mode==1) { if (lrec->frag!=NULL) { BError("B LocateLabel: Label redefinition"); } lrec->frag=PI->curfrag; } return I; } while (lrec->chain!=0) { I=lrec->chain; lrec=BLabRecAd(I); if (lrec->labnum==labnum) { if (mode==1) { if (lrec->frag!=NULL) { BError("B LocateLabel: Label redefinition"); } lrec->frag=PI->curfrag; } return I; } } lrec->chain=nextlab; I=nextlab; nextlab++; lrec=BLabRecAd(I); } lrec->labnum=labnum; lrec->frag=NULL; lrec->chain=0; lrec->freffrag=PI->curfrag; if (mode==1) lrec->frag=PI->curfrag; return I; } /*B LocateLabel*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ /***/ /********************************************************************************/ /** INSTRUCTION RECORD MANAGEMENT **/ /********************************************************************************/ /***/ static void createfreeinstrs() { /****************************************************************/ /** Create an additional block of free instruction records **/ /****************************************************************/ struct instrfmt *in,*free; int ad,i,alloc; ad=BMalloc(INSTRALLOCSIZE*INSTRFMTSIZE,1,MAINSTRS); if (ad==0) BError("No space for instruction records"); free=freeinstr [PI->proclevel]; alloc=instralloc [PI->proclevel]; for (i=0; i<=INSTRALLOCSIZE-1; i++) { in=(struct instrfmt*)(ad); in->previnstr=(struct instrfmt *)FREEINSTRMARKER; in->nextinstr=free; in->allocchain=alloc; free=in; alloc=ad; ad+=INSTRFMTSIZE; } freeinstr [PI->proclevel]=free; instralloc [PI->proclevel]=alloc; if ((TraceEnab!=0)&&(MemTrace!=0)) { printf(": free chain starts at %8x\n",(unsigned)freeinstr [PI->proclevel]); } } /*create free instrs*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static struct instrfmt *allocateinstr() { /****************************************************************/ /** Return the address of a free instruction record, **/ /** claiming more space if necessary. **/ /****************************************************************/ struct instrfmt *in; int ad,alloc; if (freeinstr [PI->proclevel]==NULL) { createfreeinstrs(); } in=freeinstr [PI->proclevel]; if (in->previnstr!=(struct instrfmt *)FREEINSTRMARKER) { BError("Bad instruction structure"); } freeinstr [PI->proclevel]=in->nextinstr; alloc=in->allocchain; memset(in,0,sizeof( struct instrfmt)); in->area=-1; in->u0.offset=-1; in->allocchain=alloc; if ((TraceEnab!=0)&&(MemTrace!=0)) { printf(": instr allocated at %8x\n",ad); } return in; } /*allocate instr*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BCopyFrag(struct fragfmt *source,struct fragfmt *dest) { /****************************************************************/ /** duplicates the contents of the source fragment into the **/ /** destination fragment maintaining the fragment links of the **/ /** destination **/ /****************************************************************/ int prevlink,nextlink,alloc; prevlink=(int)dest->prevfrag; nextlink=(int)dest->nextfrag; alloc=dest->allocchain; *dest=*source; dest->final=dest; dest->allocchain=alloc; dest->nextfrag=(struct fragfmt *)nextlink; dest->prevfrag=(struct fragfmt *)prevlink; } /*B CopyFrag*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BCopyInstr(struct instrfmt *source,struct instrfmt *dest) { /****************************************************************/ /** duplicates the contents of the source instruction into the **/ /** destination instruction maintaining the instructions links **/ /** of the destination **/ /****************************************************************/ struct instrfmt * prevlink,*nextlink; int alloc; prevlink=dest->previnstr; nextlink=dest->nextinstr; alloc=dest->allocchain; *dest=*source; dest->allocchain=alloc; dest->nextinstr=nextlink; dest->previnstr=prevlink; } /*B CopyInstr*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ struct instrfmt *BObtainInstr() { /****************************************************************/ /** allocates a new instruction and leaves it unchained **/ /****************************************************************/ struct instrfmt *newinstr; newinstr=allocateinstr(); if ((TraceEnab!=0)&&(MemTrace!=0)) { printf(": instr at %8x\n",(unsigned)newinstr); } return newinstr; } /*B ObtainInstr*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ struct instrfmt *BNewInstr() { /****************************************************************/ /** allocate a new instruction and chain **/ /****************************************************************/ struct fragfmt *frag; struct instrfmt *last,*instr; /* ensure that fragments do not become too long but do not break */ /* fragments on a delay slot owner and delay slot instruction */ /* boundary. RS6 calls have a sort of DS instr also so skip calls*/ if (PI->fragsize>MAXFRAGMENTSIZE) { frag=(struct fragfmt*)(PI->curfrag); last=(struct instrfmt*)(frag->lastinstr); if ((frag->lastinstr!=0)&&((last->props&(DELAYSLOTEXISTS|ANYCALL))==0)) BBreakFrag(0); } ValidLive=NOLIVENESSINFO; instr=allocateinstr(); frag=(struct fragfmt*)(PI->curfrag); if (frag->firstinstr!=0) { last=(struct instrfmt*)(frag->lastinstr); last->nextinstr=instr; instr->previnstr=frag->lastinstr; /*first in this fragment*/ } else { frag->firstinstr=instr; } PI->fragsize=PI->fragsize+1; frag->lastinstr=instr; if ((TraceEnab!=0)&&(MemTrace!=0)) { printf(": instr at %8x chained back to %8x\n",(unsigned)instr,(unsigned)instr->previnstr); } return instr; } /*B NewInstr*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void UpdatePredLists(struct fragfmt *frag) { /****************************************************************/ /** Updates the predecessor lists appropriately given that the **/ /** terminating jump in the given fragment has been deleted. **/ /****************************************************************/ struct fragfmt *curfrag; struct lrecfmt *lrec; struct preddescfmt *desc,*tempdesc; int nextdesc,lastdesc,nextfrag; if ((frag->props&DSWITCHJUMPFRAG)!=0) { /* a dense switch jump is being deleted so modify all */ /* the predecessor lists of dense case fragments which */ /* have this fragment as a predecessor */ nextfrag=(int)PI->procfrag; while (nextfrag!=0) { curfrag=(struct fragfmt*)(nextfrag); if ((curfrag->props&DSWITCHCASEFRAG)!=0) { curfrag=(struct fragfmt*)(curfrag->final); lastdesc=0; nextdesc=curfrag->preds; while (nextdesc!=0) { desc=(struct preddescfmt*)(nextdesc); if (desc->frag==(int)frag) { if (lastdesc==0) { curfrag->preds=desc->next; } else { tempdesc=(struct preddescfmt*)(lastdesc); tempdesc->next=desc->next; } desc->frag=FREEPREDMARKER; desc->next=freepred [PI->proclevel]; freepred [PI->proclevel]=(int)desc; break ; } lastdesc=nextdesc; nextdesc=desc->next; } } nextfrag=(int)curfrag->nextfrag; } } else if ((frag->labref!=0)&&(frag->labrefinstr!=NULL)) { /* we have a fragment which ends in a branch so find the destination */ /* fragment and remove this fragment from its predecessor list */ lrec=BLabRecAd(frag->labref); curfrag=lrec->frag; if (curfrag->final!=0) { curfrag=(struct fragfmt*)(curfrag->final); lastdesc=0; nextdesc=curfrag->preds; while (nextdesc!=0) { desc=(struct preddescfmt*)(nextdesc); if (desc->frag==(int)frag) { if (lastdesc==0) { curfrag->preds=desc->next; } else { tempdesc=(struct preddescfmt*)(lastdesc); tempdesc->next=desc->next; } desc->frag=FREEPREDMARKER; desc->next=freepred [PI->proclevel]; freepred [PI->proclevel]=(int)desc; break ; } lastdesc=nextdesc; nextdesc=desc->next; } } }; } /*UpdatePredLists*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BDeleteInstr(struct instrfmt *instr,int physicaldelete) { /****************************************************************/ /** Release an instruction record. This routine does the **/ /** necessary housekeeping to maintain the fragment **/ /** predecessor lists. If physicaldelete is non-zero then the **/ /** instruction is actually removed from the fragment and **/ /** added to the free instruction pool, otherwise it is merely **/ /** marked as deleted. **/ /****************************************************************/ struct fragfmt *frag; struct instrfmt *in,*prev,*next; in=instr; if ((TraceEnab!=0)&&(MemTrace!=0)) { printf(": instr at %8x unchained from prev at %8x and next at %8x\n", (unsigned)instr,(unsigned)in->previnstr,(unsigned)in->nextinstr); } frag=NULL; if (physicaldelete!=0) { if (in->previnstr!=NULL) { prev=in->previnstr; prev->nextinstr=in->nextinstr; /* deleting first instr of a frag */ } else { frag=PI->procfrag; while (frag!=NULL) { if (frag->firstinstr==instr) { frag->firstinstr=in->nextinstr; if (frag==PI->curfrag && PI->fragsize>0) PI->fragsize--; break ; } frag=frag->nextfrag; } } if (in->nextinstr!=NULL) { next=in->nextinstr; next->previnstr=in->previnstr; /* deleting last instr of a frag */ } else if (frag==NULL) { frag=PI->procfrag; while (frag!=NULL) { if (frag->lastinstr==instr) { frag->lastinstr=in->previnstr; if (frag==PI->curfrag && PI->fragsize>0) PI->fragsize--; break ; } frag=frag->nextfrag; } } else { frag->lastinstr=in->previnstr; if (frag->firstinstr==NULL) PI->fragsize=0; }; } if (in->props&(BRANCH|JUMPINSTR)) { if (frag==NULL) { frag=PI->procfrag; while (frag!=NULL) { if (frag->labref!=0 && frag->labrefinstr==instr) break ; frag=frag->nextfrag; } } /* remove the fragment from any predecessor list of the */ /* destination fragment */ if (frag && ValidPreds) { if ((in->privprops&DSWITCHJUMPINSTR) || (frag->labref!=0 && frag->labrefinstr==instr)) { UpdatePredLists(frag); frag->labref=0; frag->labrefinstr=NULL; } } } /* add the deleted instruction to the list of free instructions */ /* if we are to physically delete it, otherwise we just mark it */ /* as deleted */ if (physicaldelete!=0) { in->previnstr=(struct instrfmt *)FREEINSTRMARKER; in->nextinstr=freeinstr [PI->proclevel]; freeinstr [PI->proclevel]=instr; } else { in->group=DELETED; } } /*B DeleteInstr*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ struct instrfmt *BInsertInstr(struct fragfmt *frag,struct instrfmt *nextinstr) { /*************************************************************************/ /** Allocate a new instr record and insert it before nextinstr in the **/ /** instruction chain of the given fragment. If nextinstr is 0 then **/ /** the instruction is inserted at the end of the fragment. **/ /** Returns address of inserted instruction record. **/ /*************************************************************************/ struct instrfmt *in,*prev; in=allocateinstr(); if (nextinstr==NULL) { /* handle insertion at the end of the fragment */ in->previnstr=frag->lastinstr; in->nextinstr=NULL; if (frag->lastinstr!=NULL) { prev=frag->lastinstr; prev->nextinstr=in; } else { frag->firstinstr=in; } frag->lastinstr=in; } else { /* handle insertion inside the fragment */ in->nextinstr=nextinstr; in->previnstr=nextinstr->previnstr; if (nextinstr->previnstr!=NULL) { prev=nextinstr->previnstr; prev->nextinstr=in; } else { frag->firstinstr=in; } nextinstr->previnstr=in; } /* generate tracing if required */ if ((TraceEnab!=0)&&(MemTrace!=0)) { printf(": instr %8x inserted before instr %8x:%8x\n", (unsigned)in,(unsigned)frag,(unsigned)nextinstr); } return in; } /*B InsertInstr*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void MakeInstrSkeletal(struct instrfmt *instr) { /*************************************************************************/ /** Gets rid of all information in instruction except that required for **/ /** actual code generation. This is done during instruction commitment. **/ /** It reduces the chances of other parts of the code generator using **/ /** invalid instruction information. **/ /*************************************************************************/ instr->props&=~(DELAYSLOTEXISTS|DELAYSLOTUNUSED|DELAYSLOTINSTR|ANNULLED|ANNULLEDTAKEN); instr->rref=0; instr->rdef=0; #if(ISERFREQUIRED!=0) instr->fref=0; instr->fdef=0; #if(ISERFREQUIRED==2) instr->dref=0; instr->ddef=0; #endif #if(ISCRFREQUIRED!=0) instr->cref=0; instr->cdef=0; #endif; #endif; } /*MakeInstrSkeletal*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static struct instrfmt *GetLineMarker(struct instrfmt *instr) { /********************************************************************************/ /** Returns a pointer to the line number psuedo instruction preceeding the **/ /** given executable instruction or 0 if no line marker could be found or **/ /** there was another executable instruction between it and the line marker **/ /********************************************************************************/ struct instrfmt *curinstr; /* search through instructions before the given one looking for */ /* line number pseudo instruction */ curinstr=instr->previnstr; while (curinstr!=NULL) { if (curinstr->groupgroup==LINENUMBER) return curinstr; curinstr=curinstr->previnstr; } return 0; } /*GetLineMarker*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BCommitInstr(struct instrfmt *instr,int type) { /*************************************************************************/ /** Appends the instr onto the end of the chain of instructions in the **/ /** given fragment. If type <0 then the instruction is a branch **/ /** terminator for the fragment. Such an instruction has its **/ /** destination in its immediate field. If type >0 then the instruction **/ /** should not be made skeletal. If type is 0 then the instruction is **/ /** an ordinary one. **/ /*************************************************************************/ struct fragfmt *frag; struct instrfmt *linemark,*tempinstr; /* ensure we have a valid context for commiting an instruction */ if ((Commiting==0)||((CommitProps&RPGISRESTRICTED)!=0)) { BError("B CommitInstr: Not in unrestricted commit phase"); } if (LastCommitFrag==0) { BError("B CommitInstr: No previously committed fragment"); } /* ensure that an instruction is not committed twice */ if ((instr->bubbles!=0)&&((instr->previnstr!=0)||(instr->nextinstr!=0))) { BError("B CommitInstr: Illegal commitment"); } instr->bubbles=1; if ((CommitProps&RPBBSCHEDULED)==0) { instr->issueprops=instr->issueprops|TPGLOBSCHED; } /* issue a line marker pseudo instruction if possible */ frag=(struct fragfmt*)(LastCommitFrag); linemark=GetLineMarker(instr); if (linemark!=NULL) { if (linemark->previnstr!=0) { tempinstr=linemark->previnstr; tempinstr->nextinstr=0; } if (linemark->nextinstr!=0) { tempinstr=linemark->nextinstr; tempinstr->previnstr=0; } if (frag->lastinstr==0) { linemark->previnstr=0; linemark->nextinstr=0; frag->firstinstr=linemark; frag->lastinstr=linemark; } else { tempinstr=frag->lastinstr; tempinstr->nextinstr=linemark; linemark->previnstr=frag->lastinstr; linemark->nextinstr=0; frag->lastinstr=linemark; } } /* break the instruction chain before any instruction to be committed */ if (instr->previnstr!=0) { tempinstr=instr->previnstr; tempinstr->nextinstr=0; } /* break the instruction chain after any instruction to be committed */ if (instr->nextinstr!=0) { tempinstr=instr->nextinstr; tempinstr->previnstr=0; } /* append the instruction onto the end of the last committed */ /* fragment's instruction chain */ if (frag->lastinstr==0) { instr->previnstr=0; instr->nextinstr=0; frag->firstinstr=instr; frag->lastinstr=instr; } else { tempinstr=frag->lastinstr; tempinstr->nextinstr=instr; instr->previnstr=frag->lastinstr; instr->nextinstr=0; frag->lastinstr=instr; } /* if the instruction is a branch terminator then update the label */ /* reference information */ if (type<0) { frag->labref=instr->u1.immval; frag->labrefinstr=instr; } /* clear invalid information from the instruction */ if (type<=0) MakeInstrSkeletal(instr); } /*B CommitInstr*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BCurInstrAddr() { /**************************************************************************/ /** Returns address of the current (ie last-created) instr taking frag **/ /** breaks into account. **/ /** There must be a current instr when this function is called. **/ /**************************************************************************/ struct fragfmt *f; int i; f=PI->curfrag; i=(int)f->lastinstr; while (i==0) { f=f->prevfrag; i=(int)f->lastinstr; } return i; } /*B CurInstrAddr*/ /***/ struct instrfmt *BCurInstr() { /**************************************************************************/ /** Returns pointer to the current (ie last-created) instr taking frag **/ /** breaks into account. **/ /** There must be a current instr when this function is called. **/ /**************************************************************************/ struct fragfmt *f; struct instrfmt *instr; f=PI->curfrag; instr=f->lastinstr; while (instr==NULL) { f=f->prevfrag; instr=f->lastinstr; } return instr; } /*B CurInstr*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BCurFragAddr() { /**************************************************************************/ /** Returns the address of the current fragment **/ /**************************************************************************/ return (int)PI->curfrag; } /*B CurFragAddr*/ /***/ struct fragfmt *BCurFrag() { /**************************************************************************/ /** Returns a pointer to the current fragment **/ /**************************************************************************/ return PI->curfrag; } /*B CurFrag*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ /***/ /********************************************************************************/ /** CODE FRAGMENT MANAGEMENT **/ /********************************************************************************/ /***/ static void createfreefrags() { /****************************************************************/ /** Create an additional block of free fragment records **/ /****************************************************************/ struct fragfmt *in; int ad,i,free,alloc; ad=BMalloc(FRAGALLOCSIZE*FRAGFMTSIZE,1,MAFRAGS); if (ad==0) BError("No space for fragment records"); free=freefrag [PI->proclevel]; alloc=fragalloc [PI->proclevel]; for (i=0; i<=FRAGALLOCSIZE-1; i++) { in=(struct fragfmt*)(ad); in->prevfrag=(struct fragfmt *)FREEFRAGMARKER; in->nextfrag=(struct fragfmt *)free; in->allocchain=alloc; free=ad; alloc=ad; ad+=FRAGFMTSIZE; } freefrag [PI->proclevel]=free; fragalloc [PI->proclevel]=alloc; if ((TraceEnab!=0)&&(MemTrace!=0)) { printf(": free chain starts at %8x\n",freefrag [PI->proclevel]); } } /*create free frags*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BNewFrag() { /****************************************************************/ /** Create a new code fragment and chains it into the current **/ /** position in the fragment list **/ /****************************************************************/ struct fragfmt *frag,*prevfrag,*nextfrag; int ad,alloc; /* if the free fragment chain is exhausted then create some new entries */ if (freefrag [PI->proclevel]==0) { createfreefrags(); } /* link the new fragment into the fragment chain */ ValidPreds=0; ValidLive=NOLIVENESSINFO; ad=freefrag [PI->proclevel]; frag=(struct fragfmt*)(ad); if (frag->prevfrag!=(struct fragfmt *)FREEFRAGMARKER) { BError("B NewFrag: Bad fragment structure"); } freefrag [PI->proclevel]=(int)frag->nextfrag; alloc=frag->allocchain; memset(frag,0,sizeof( struct fragfmt)); frag->allocchain=alloc; frag->prevfrag=(struct fragfmt *)PI->curfrag; if (PI->curfrag!=0) { prevfrag=(struct fragfmt*)(PI->curfrag); if (prevfrag->nextfrag!=0) { nextfrag=prevfrag->nextfrag; nextfrag->prevfrag=(struct fragfmt *)ad; } frag->nextfrag=prevfrag->nextfrag; prevfrag->nextfrag=(struct fragfmt *)ad; } PI->curfrag=frag; PI->fragsize=0; /* print tracing information */ if ((TraceEnab!=0)&&(MemTrace!=0)) { printf(": new frag at %8x chained back to %8x\n",(unsigned)PI->curfrag,(unsigned)frag->prevfrag); } } /*B NewFrag*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BUnchainFragments() { /*************************************************************************/ /** Unchains the fragments in the current procedure. Only sequences of **/ /** fragments with the same final fragment pointer remain chained **/ /** together. This is done prior to global scheduling - the fragment **/ /** structure is recreated by the schedule committment. **/ /*************************************************************************/ struct fragfmt *tempfrag,*lastfrag,*frag; int nextfrag; nextfrag=(int)PI->procfrag; while (nextfrag!=0) { frag=(struct fragfmt*)(nextfrag); frag->prevfrag=0; while (nextfrag!=0) { tempfrag=(struct fragfmt*)(nextfrag); if (tempfrag->final!=frag->final) break ; lastfrag=tempfrag; nextfrag=(int)tempfrag->nextfrag; } lastfrag->nextfrag=0; } PI->procfrag=NULL; PI->curfrag=0; } /*B UnchainFragments*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BStartCommit(int props,int regionlabel) { /*************************************************************************/ /** Starts a fragment commitment phase. Props gives the properties of **/ /** the region which is about to be committed. Regionlabel is a unique **/ /** identifier for the region. **/ /*************************************************************************/ if (Commiting!=0) { BError("B StartCommit: Already in commit phase"); } Commiting=1; ValidPreds=0; ValidLive=ROLIVENESSINFO; LastCommitFrag=0; CommitProps=props; CommitRegion=regionlabel; } /*B StartCommit*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BEndCommit() { /*************************************************************************/ /** Ends the fragment commitment phase. The last fragment to be **/ /** committed is chained onto to the the first fragment in the **/ /** procedure chain which was not committed in the sequence. **/ /** The last fragment to be committed is returned. **/ /*************************************************************************/ struct fragfmt *frag,*tempfrag; struct instrfmt *newinstr,*oldlast; /* check that we are in a committment phase */ if (Commiting==0) { BError("B EndCommit: Not in commit phase"); } if (LastCommitFrag==0) { BError("B EndCommit: No fragments committed"); } Commiting=0; /* if the top level is being committed then the cellar fragments */ /* should be appended onto the end of the fragment chain */ frag=(struct fragfmt*)(LastCommitFrag); if (((CommitProps&RPTOPLEVEL)!=0)&&(PI->cellarfrags!=NULL)) { frag->nextfrag=PI->cellarfrags; tempfrag=PI->cellarfrags; tempfrag->prevfrag=(struct fragfmt *)LastCommitFrag; } /* put a region marker psuedo instruction at the end of the last */ /* fragment which was committed */ newinstr=allocateinstr(); newinstr->group=REGIONINFO; newinstr->issueprops=CommitProps; newinstr->u1.immval=CommitRegion; newinstr->previnstr=frag->lastinstr; newinstr->nextinstr=0; if (frag->lastinstr!=0) { oldlast=frag->lastinstr; oldlast->nextinstr=newinstr; } frag->lastinstr=newinstr; if (frag->firstinstr==0) { frag->firstinstr=newinstr; } /* return the last fragment to be committed */ return LastCommitFrag; } /*B EndCommit*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void MakeFragSkeletal(struct fragfmt *frag) { /*************************************************************************/ /** Gets rid of all information in fragment except that required for **/ /** actual code generation. This function is applied to all committed **/ /** fragments. It reduces the chances of other parts of the code **/ /** generator using invalid fragment information. **/ /*************************************************************************/ frag->props&=~(FALLSTHROUGH|FALLENINTO); frag->archinfo=0; frag->uses=0; frag->preds=0; frag->rin=-1; frag->rout=-1; frag->ruse=-1; frag->rdef=-1; #if(ISERFREQUIRED!=0) frag->fin=-1; frag->fout=-1; frag->fuse=-1; frag->fdef=-1; #if(ISERFREQUIRED==2) frag->din=-1; frag->dout=-1; frag->duse=-1; frag->ddef=-1; #endif #if(ISCRFREQUIRED!=0) frag->cin=-1; frag->cout=-1; frag->cuse=-1; frag->cdef=-1; #endif; #endif; } /*MakeFragSkeletal*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void SeqCommit(struct fragfmt *startfrag,struct fragfmt *endfrag) { /*****************************************************************************/ /** Appends a sequence of fragments between startfrag and endfrag **/ /** (inclusive) onto the end of the last fragment to be committed. All the **/ /** necessary housekeeping is performed. **/ /*****************************************************************************/ struct fragfmt *tempfrag; struct instrfmt *newinstr,*oldfirst; int nextfrag; /* append the sequence onto the end of the chain */ if (LastCommitFrag!=0) { tempfrag=(struct fragfmt*)(LastCommitFrag); tempfrag->nextfrag=startfrag; startfrag->prevfrag=(struct fragfmt *)LastCommitFrag; } else if ((CommitProps&RPINCELLAR)!=0) { if (PI->cellarfrags==NULL) { PI->cellarfrags=startfrag; startfrag->prevfrag=0; } else { nextfrag=(int)PI->cellarfrags; do { tempfrag=(struct fragfmt*)(nextfrag); nextfrag=(int)tempfrag->nextfrag; } while (!(nextfrag==0)) ; tempfrag->nextfrag=startfrag; startfrag->prevfrag=tempfrag; } } else { PI->procfrag=startfrag; startfrag->prevfrag=0; }; endfrag->nextfrag=0; /* create a region information psuedo instruction if this is */ /* the first fragment to be committed in the current phase */ if (LastCommitFrag==0) { tempfrag=startfrag; while ((tempfrag->final!=tempfrag)&&(tempfrag->nextfrag!=NULL)) { tempfrag=tempfrag->nextfrag; } newinstr=allocateinstr(); newinstr->group=REGIONINFO; newinstr->issueprops=CommitProps|RPSTART; newinstr->u1.immval=CommitRegion; newinstr->previnstr=0; newinstr->nextinstr=tempfrag->firstinstr; if (tempfrag->firstinstr!=0) { oldfirst=tempfrag->firstinstr; oldfirst->previnstr=newinstr; } tempfrag->firstinstr=newinstr; if (tempfrag->lastinstr==0) { tempfrag->lastinstr=newinstr; } } /* update the last committed fragment */ LastCommitFrag=(int)endfrag; } /*SeqCommit*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BCommitFrag(struct fragfmt *newfrag) { /**************************************************************************/ /** Appends newfrag on to a chain of fragments which are being committed **/ /** in the current commit phase. **/ /**************************************************************************/ struct fragfmt *startfrag,*tempfrag; /* ensure we are actually in a commitment phase */ if (Commiting==0) { BError("B CommitFrag: Not in commit phase"); } /* find the first fragment which has a final pointer to the fragment */ /* to be committed */ tempfrag=newfrag; do { startfrag=tempfrag; if (tempfrag->prevfrag==0) break ; tempfrag=(struct fragfmt*)(tempfrag->prevfrag); } while (!(tempfrag->final!=newfrag)) ; /* clear the instructions from each fragment to be committed */ /* and make them skeletal if the region commitment is unrestricted */ tempfrag=startfrag; while (tempfrag->final==newfrag) { MakeFragSkeletal(tempfrag); if ((CommitProps&RPGISRESTRICTED)==0) { tempfrag->labref=0; tempfrag->labrefinstr=NULL; tempfrag->firstinstr=0; tempfrag->lastinstr=0; } if (tempfrag->nextfrag==NULL) break ; tempfrag=tempfrag->nextfrag; } /* now actually commit the fragments */ SeqCommit(startfrag,newfrag); } /*B CommitFrag*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BCommitSequence(struct fragfmt *startfrag,struct fragfmt *endfrag) { /*****************************************************************************/ /** Appends a sequence of fragments between startfrag and endfrag **/ /** (inclusive) onto the end of the last fragment to be committed. All the **/ /** necessary housekeeping is performed. **/ /*****************************************************************************/ struct fragfmt *tempfrag; int origstart; /* ensure we are actually in a commitment phase */ if (Commiting==0) { BError("B CommitSequence: Not in commit phase"); } /* find the first fragment which has a final pointer to the fragment */ /* to be committed */ origstart=(int)startfrag; tempfrag=startfrag; do { startfrag=tempfrag; if (tempfrag->prevfrag==0) break ; tempfrag=(struct fragfmt*)(tempfrag->prevfrag); } while (!((int)tempfrag->final!=origstart)) ; /* actually commit the fragments */ SeqCommit(startfrag,endfrag); } /*B CommitSequence*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ struct fragfmt *BInsertFrag(struct fragfmt *insertfrag) { /*************************************************************************/ /** Inserts an empty fragment before the given one and returns a **/ /** pointer to the new fragment. If the given fragment has other label **/ /** fragments associated with it the new fragment inserted before them. **/ /** If the insertion point is given as 0 then the created fragment is **/ /** left floating - not a member of any fragment chain. The new **/ /** fragment is given the same liveness information as that from the **/ /** entry liveness to the given fragment. If the fragments have been **/ /** made unchained with B UnchainFragments then a floating fragment is **/ /** returned. **/ /*************************************************************************/ struct fragfmt *frag,*newfrag,*tempfrag,*origfrag,*finalfrag,*oldfrag; int alloc; /* find the first fragment which refers to the given one as its */ /* final fragment */ if (insertfrag!=NULL) { frag=insertfrag; origfrag=frag; finalfrag=insertfrag; do { oldfrag=frag; if (frag->prevfrag==NULL) break ; frag=frag->prevfrag; } while (!(frag->final!=finalfrag)) ; frag=oldfrag; } /* if the free fragment chain is exhausted then create some new entries */ if (freefrag [PI->proclevel]==0) { createfreefrags(); } /* clear the new fragment, remove it from the free fragment chain */ /* and chain it before the given fragment set */ newfrag=(struct fragfmt*)(freefrag [PI->proclevel]); if (newfrag->prevfrag!=(struct fragfmt *)FREEFRAGMARKER) { BError("B InsertFrag: Bad fragment structure"); } freefrag [PI->proclevel]=(int)newfrag->nextfrag; alloc=newfrag->allocchain; memset(newfrag,0,sizeof( struct fragfmt)); newfrag->allocchain=alloc; newfrag->final=newfrag; if ((insertfrag!=NULL)&&(PI->procfrag!=NULL)) { newfrag->prevfrag=frag->prevfrag; newfrag->nextfrag=frag; frag->prevfrag=newfrag; if (newfrag->prevfrag!=NULL) { tempfrag=newfrag->prevfrag; tempfrag->nextfrag=newfrag; } } /* we set the control flow flags and register liveness information */ /* if the new fragment was inserted at a specific point */ if (insertfrag!=NULL) { /* determine the properties and usage for the new fragment */ if ((origfrag->props&FALLENINTO)!=0) { newfrag->props|=FALLENINTO|FALLSTHROUGH; newfrag->uses=1; } /* generate the register liveness information for the new fragment */ newfrag->rin=origfrag->rin; newfrag->rout=origfrag->rin; #if(ISERFREQUIRED!=0) newfrag->fin=origfrag->fin; newfrag->fout=origfrag->fin; #if(ISERFREQUIRED==2) newfrag->din=origfrag->din; newfrag->dout=oricfrag->dout; #endif #if(ISCRFREQUIRED!=0) newfrag->cin=origfrag->cin; newfrag->cout=origfrag->cin; #endif; #endif; } /* print tracing information */ if ((TraceEnab!=0)&&(MemTrace!=0)) { printf(": new frag at %8x inserted before frag at %8x\n",(unsigned)newfrag,(unsigned)insertfrag); } /* return the address of the newly created fragment */ return newfrag; } /*B InsertFrag*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ struct fragfmt *BObtainFrag() { /****************************************************************/ /** Create a new unlinked code fragment **/ /****************************************************************/ struct fragfmt *frag; int alloc; /* if the free fragment chain is exhausted then create some new entries */ if (freefrag [PI->proclevel]==0) { createfreefrags(); } /* clear the new fragment and remove it from the free fragment chain */ frag=(struct fragfmt*)(freefrag [PI->proclevel]); if (frag->prevfrag!=(struct fragfmt *)FREEFRAGMARKER) { BError("B ObtainFrag: Bad fragment structure"); } freefrag [PI->proclevel]=(int)frag->nextfrag; alloc=frag->allocchain; memset(frag,0,sizeof( struct fragfmt)); frag->allocchain=alloc; /* print tracing information */ if ((TraceEnab!=0)&&(MemTrace!=0)) { printf(": new frag at %8x\n",(int)frag); } return frag; } /*B ObtainFrag*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BBreakFrag(int label) { /****************************************************************/ /** Terminate the current fragment. **/ /** If label is non-zero the last non delay slot instruction **/ /** references it. **/ /****************************************************************/ struct fragfmt *frag; struct instrfmt *lastinstr,*instr; if ((TraceEnab!=0)&&(MemTrace!=0)) { printf(": frag at %8x broken with label %d\n",(unsigned)PI->curfrag,label); } ValidPreds=0; frag=(struct fragfmt*)(PI->curfrag); if ((label!=0)&&(frag->lastinstr!=0)) { frag->labref=BLocateLabel(label,0); lastinstr=frag->lastinstr; if ((lastinstr->props&DELAYSLOTINSTR)!=0) { frag->labrefinstr=lastinstr->previnstr; } else { frag->labrefinstr=frag->lastinstr; } } /* registers used in the fragment must be included in the total set of */ /* registers used in the procedure */ instr=frag->firstinstr; while (instr!=NULL) { if (instr->grouprdef=PI->rdef|instr->rdef; PI->rref=PI->rref|instr->rref; #if(ISERFREQUIRED!=0) PI->fdef=PI->fdef|instr->fdef; PI->fref=PI->fref|instr->fref; #if(ISERFREQUIRED==2) PI->ddef=PI->ddef|instr->ddef; PI->dref=PI->dref|instr->ddef; #endif #if(ISCRFREQUIRED!=0) PI->cdef=PI->cdef|instr->cdef; PI->cref=PI->cref|instr->cref; #endif; #endif; } instr=instr->nextinstr; } BNewFrag(); } /*B BreakFrag*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BTerminateFrag(int labindex,int dsinstr) { /****************************************************************/ /** Terminates the current fragment. The fragment ends with a **/ /** branch to the given label index. If dsinstr is non-zero **/ /** then the branch has a delay slot instruction associated **/ /** with it. **/ /****************************************************************/ struct lrecfmt *lrec; struct instrfmt *instr; if (dsinstr!=0) { instr=BCurInstr(); instr->props|=DELAYSLOTINSTR; } lrec=BLabRecAd(labindex); BBreakFrag(lrec->labnum); } /*B TerminateFrag*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BFreeAllocation() { /******************************************************************/ /** Frees all the fragments, instruction and predecessor **/ /** descriptors allocated at the current procedural level. This **/ /** mechanism allows the freeing of items even if they become **/ /** floating due to not being committed by the global scheduler. **/ /******************************************************************/ struct fragfmt *frag; struct instrfmt *instr; struct preddescfmt *pred; int curitem,free; /* print tracing if required */ if ((TraceEnab!=0)&&(MemTrace!=0)) { printf("\n",PI->proclevel); } /* make all items in the fragment allocation chain free */ /* However frags with global labels need to be kept */ /* see M tidy end for handling of global labels */ free=0; curitem=fragalloc [PI->proclevel]; while (curitem!=0) { frag=(struct fragfmt*)(curitem); if ((frag->props&GLOBALLABFRAG)==0) { frag->prevfrag=(struct fragfmt *)FREEFRAGMARKER; frag->nextfrag=(struct fragfmt *)free; free=curitem; } curitem=frag->allocchain; } freefrag [PI->proclevel]=free; /* make all items in the instruction allocation chain free */ free=0; curitem=instralloc [PI->proclevel]; while (curitem!=0) { instr=(struct instrfmt*)(curitem); instr->previnstr=(struct instrfmt *)FREEINSTRMARKER; instr->nextinstr=(struct instrfmt *)free; free=curitem; curitem=instr->allocchain; } freeinstr [PI->proclevel]=(struct instrfmt *)free; /* make all items in the predecessor descriptor allocation chain free */ free=0; curitem=predalloc [PI->proclevel]; while (curitem!=0) { pred=(struct preddescfmt*)(curitem); pred->frag=FREEPREDMARKER; pred->next=free; free=curitem; curitem=pred->allocchain; } freepred [PI->proclevel]=free; } /*B FreeAllocation*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ /***/ /********************************************************************************/ /** BACKEND OPTIMISER ENTRY ROUTINES **/ /********************************************************************************/ /***/ void BSetPI(int PIaddr) { /********************************************************************************/ /** Sets a pointer to the information record for the procedure on which **/ /** instruction, fragment and label chaining operations are currently being **/ /** carried out **/ /********************************************************************************/ PI=(struct procfmt*)(PIaddr); SetVectors(0); ValidLive=NOLIVENESSINFO; ValidPreds=0; BeenReorg=0; } /*B SetPI*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BSetOptions(int TraceVect,int InhVect,int Proc,int FragLow,int FragHigh) { /********************************************************************************/ /** Sets the tracing and inhibition options for the backend optimiser. Also **/ /** sets an optional fragment range and procedure in which tracing or **/ /** inhibition is selected. **/ /** **/ /** TraceVect contains the following bits: **/ /** Bit 0: Enable memory block allocation tracing **/ /** Bit 1: Enable fragment reorganisation tracing **/ /** Bit 2: Enable terse basic block scheduler tracing **/ /** Bit 3: Enable verbose basic block scheduler tracing **/ /** Bit 4: Enable buddy allocator tracing **/ /** Bit 5: Enable extended fragment reorganisation tracing **/ /** Bit 6: Enable procedure entry name tracing **/ /** Bit 7: Enable fragment and instruction allocation tracing **/ /** Bit 8: Enable pre and post global scheduling fragment tracing **/ /** Bit 9: Enable extended fragment instruction tracing **/ /** **/ /** InhVect contains the following bits: **/ /** Bit 0: Disables basic block scheduler **/ /** Bit 1: Disables delay slot filling **/ /** Bit 2: Disables switch and return improvement **/ /** Bit 3: Disables live register analysis (global inhibition only) **/ /** Bit 4: Disables predecessor list generation (global inhibition only) **/ /** Bit 5: Disables global scheduling (global inhibition only) **/ /** Bit 6: Disables fragment reorganisation 1 **/ /** Bit 7: Disables fragment reorganisation 2 **/ /** Bit 8: Disables fragment reorganisation 3 **/ /** Bit 9: Disables fragment reorganisation 4 **/ /** Bit 10: Disables fragment reorganisation 5 **/ /** Bit 11: Disables fragment reorganisation 6 **/ /** Bit 12: Disables fragment reorganisation 7 **/ /** Bit 13: Disables fragment reorganisation 8 **/ /** Bit 14: Disables fragment reorganisation 9 **/ /** Bit 15: Disables global scheduling for any given procedure and all **/ /** subsequent procedures **/ /** Bit 16: Disables global scheduling for any given region and all higher **/ /** numbered regions in the selected procedure **/ /** **/ /** If Proc is zero then the inhibition/tracing applies to all fragments in **/ /** all procedures, otherwise it only corresponds to the numbered procedure. **/ /** If Proc is non-zero and FragLow is non-zero and FragHigh >= FragLow then **/ /** the inhibition/tracing only applies to the given range of fragments in the **/ /** given procedure. **/ /********************************************************************************/ BlkTrace=TraceVect&1; FragReport=TraceVect&2; SchReport=TraceVect&12; VerSchReport=TraceVect&8; BuddyTrace=TraceVect&16; ExtFragReport=TraceVect&32; EntryTrace=TraceVect&64; MemTrace=TraceVect&128; GISFragReport=TraceVect&256; ExtInstrTrace=TraceVect&512; InhibVect=InhVect; RangeProc=Proc; RangeFragLow=FragLow; RangeFragHigh=FragHigh; } /*B SetOptions*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BInitialise() { /********************************************************************************/ /** Initialises the backend optimiser **/ /********************************************************************************/ struct arenainfo *arena; struct lrecfmt *lrec; int I,J; BlkTrace=0; FragReport=0; SchReport=0; VerSchReport=0; BuddyTrace=0; ExtFragReport=0; EntryTrace=0; MemTrace=0; ExtInstrTrace=0; GISFragReport=0; RegionLabel=0; ValidLive=NOLIVENESSINFO; ValidPreds=0; BeenReorg=0; InhibVect=0; RangeProc=0; RangeFragLow=0; RangeFragHigh=0; CurInhibVect=0; ArchVariant=0; GISBuffZone=0; GISFloatOpt=0; GISUnsafeOpt=0; GISTrace=0; GISInhib=0; GISRegion=0; GISAttempt=0; GISPass=0; TraceEnab=1; CurArena=ARCHMalloc(ARENAINFOSIZE); if (CurArena==0) BError("No space for root arena information"); arena=(struct arenainfo*)(CurArena); arena->headchunk=0; arena->tailchunk=0; arena->childarena=0; arena->parentarena=0; for (I=0; I<=19; I++) { fragalloc [I]=0; instralloc [I]=0; predalloc [I]=0; freefrag [I]=0; freeinstr [I]=NULL; freepred [I]=0; } nextlab=33; for (I=1; I<=ublabrecheads; I++) { labrecheads [I]=0; } J=BMalloc(LRECBUCKETSIZE*LRECSIZE,1,MALABADADAREA); if (J==0) BError("No space for label records"); labrecheads [0]=J; for (I=0; I<=32; I++) { lrec=(struct lrecfmt*)(J); memset(lrec,0,sizeof( struct lrecfmt)); J+=LRECSIZE; } } /*B Initialise*/ /***/ void BReinitialise() { /********************************************************************************/ /** Initialises the backend optimiser **/ /** Dont reset the options which are carried over to new generation */ /** Reset the space variables as they are freed in Bterminate */ /********************************************************************************/ struct arenainfo *arena; struct lrecfmt *lrec; int I,J; RegionLabel=0; ValidLive=NOLIVENESSINFO; ValidPreds=0; BeenReorg=0; CurInhibVect=0; GISBuffZone=0; GISFloatOpt=0; GISUnsafeOpt=0; GISTrace=0; GISInhib=0; GISRegion=0; GISAttempt=0; GISPass=0; /**/ CurArena=ARCHMalloc(ARENAINFOSIZE); if (CurArena==0) BError("No space for root arena information"); arena=(struct arenainfo*)(CurArena); arena->headchunk=0; arena->tailchunk=0; arena->childarena=0; arena->parentarena=0; /**/ for (I=0; I<=19; I++) { fragalloc [I]=0; instralloc [I]=0; predalloc [I]=0; freefrag [I]=0; freeinstr [I]=NULL; freepred [I]=0; } /**/ nextlab=33; for (I=0; I<=ublabrecheads; I++) { labrecheads [I]=0; } J=BMalloc(LRECBUCKETSIZE*LRECSIZE,1,MALABADADAREA); if (J==0) BError("No space for label records"); labrecheads [0]=J; for (I=0; I<=32; I++) { lrec=(struct lrecfmt*)(J); memset(lrec,0,sizeof( struct lrecfmt)); J+=LRECSIZE; } } /*B Reinitialise*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BTerminate() { /********************************************************************************/ /** Terminates the backend optimiser freeing any dynamic memory owned by it **/ /********************************************************************************/ struct arenainfo *arena; struct memchunk *chunk; int nextchunk; /* free any memory owned by the buddy allocator */ while (CurArena!=0) { arena=(struct arenainfo*)(CurArena); nextchunk=arena->headchunk; while (nextchunk!=0) { chunk=(struct memchunk*)(nextchunk); nextchunk=chunk->nextchunk; ARCHFree((int)chunk); } CurArena=arena->parentarena; ARCHFree((int)arena); } } /*B Terminate*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BChangeDest(struct fragfmt *frag,struct instrfmt *instr,struct lrecfmt *lrec) { /********************************************************************************/ /** The given instruction which uses a label operand is rewritten to use the **/ /** new label. **/ /********************************************************************************/ int labindex; labindex=BLocateLabel(lrec->labnum,0); instr->u1.immval=labindex; if ((frag->labref!=0)&&(frag->labrefinstr==instr)) { frag->labref=labindex; } } /*B ChangeDest*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BCheckOpCode(struct instrfmt *instr,int opcode) { /********************************************************************************/ /** Returns a non-zero value if the given instruction has the given opcode. It **/ /** is used by the global scheduler for locating special instructions whose **/ /** opcodes are given in the processor model. **/ /********************************************************************************/ if ((instr->groupopcode==opcode)) { return 1; } else { return 0; } } /*B CheckOpCode*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BMarkDSInstr(struct instrfmt *instr,int isinslot) { /********************************************************************************/ /** Marks the given instruction as one which appears in a delay slot if **/ /** isinslot is non-zero else marks the instruction as not in a delay slot **/ /********************************************************************************/ if (isinslot!=0) { instr->props|=DELAYSLOTINSTR; } else { instr->props&=~DELAYSLOTINSTR; } } /*B MarkDSInstr*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BMarkDSOwner(struct instrfmt *instr,int unused,int isannulled,int annultaken) { /********************************************************************************/ /** Marks the given instruction as a delay slot owner (or potential owner). **/ /** The owner may also be marked to annul the delay slot if the branch is **/ /** taken or untaken. **/ /********************************************************************************/ if (unused!=0) { /* if the delay slot is unused then work out if that means it doesn't */ /* actually exist or is a NOP */ #if(DELAYSLOTMUSTEXIST!=0) instr->props&=~(ANNULLED|ANNULLEDTAKEN); instr->props|=DELAYSLOTEXISTS|DELAYSLOTUNUSED; #else instr->props&=~(DELAYSLOTEXISTS|ANNULLED|ANNULLEDTAKEN); instr->props|=DELAYSLOTUNUSED; #endif; } else { /* the delay slot is used so mark its annul properties */ instr->props|=DELAYSLOTEXISTS; instr->props&=~(ANNULLED|ANNULLEDTAKEN|DELAYSLOTUNUSED); if (isannulled!=0) { if (annultaken!=0) { instr->props|=ANNULLED|ANNULLEDTAKEN; } else { instr->props|=ANNULLED; } } } } /*B MarkDSOwner*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BGetArchInfo(struct fragfmt *frag) { /********************************************************************************/ /** Obtains the architecture specific information word for the given fragment **/ /********************************************************************************/ return frag->archinfo; } /*B GetArchInfo*/ /***/ int BGetEdgeList(struct fragfmt *frag) { /********************************************************************************/ /** Obtains the head of the predecessor edge list for the given fragment or 0 **/ /** if the list is empty. Ignores edges associated with dense switches. **/ /********************************************************************************/ struct preddescfmt *edge; int curedge; if (ValidPreds==0) { BError("B GetEdgeList: No valid edge information"); } curedge=frag->preds; while (curedge!=0) { edge=(struct preddescfmt*)(curedge); if (edge->isswitchedge==0) return curedge; curedge=edge->next; } return 0; } /*B GetEdgeList*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BGetEdgeInfo(struct preddescfmt *edge,int *frag,int *isforward) { /********************************************************************************/ /** Extracts the information associated with the given predecessor edge **/ /** descriptor. Frag is set to the fragment from which the edge emanates and **/ /** isforward is set to non-zero iff is a forward edge in the control flow **/ /** graph. Edges associated with dense switch jumps are ignored. **/ /********************************************************************************/ *frag=edge->frag; *isforward=edge->isforward; } /*B GetEdgeInfo*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ #if (Target==SPARC || Target==M88K || Target==MIPS) /* If Target has delayed branches */ /***/ static void FillDelaySlots(int startfrag,int prelimpass) { /********************************************************************************/ /** Goes through the fragments starting with the given one and fills as many **/ /** empty delay slots as possible. If prelimpass is non-zero then delay slots **/ /** are not filled but any which are to be annulled for special delay slot **/ /** sequences are marked so that the delay slots are not filled by GIS. **/ /********************************************************************************/ struct fragfmt *frag; struct instrfmt *instr; int nextf,nexti; /* go through each fragment attempting to fill empty delay slots */ nextf=startfrag; while (nextf!=0) { frag=(struct fragfmt*)(nextf); if ((frag->props&((USERASSEMFRAG|DSWITCHJUMPFRAG)|NOOPTIMFRAG))==0) { /* perform if/then optimisations to improve code for */ /* processors with annulled delay slots */ if (prelimpass==0) PerformIfThenOpt(frag); PerformIfThenElseOpt(frag,prelimpass); PerformUncondBranchOpt(frag,prelimpass); PerformBranchOverOpt(frag,prelimpass); /* go through the fragment and attempt to fill any empty delay slots */ if (prelimpass==0) { nexti=(int)BMachineInstr(frag->firstinstr,NEXTCHAIN); while (nexti!=0) { instr=(struct instrfmt*)(nexti); if (((instr->props&DELAYSLOTUNUSED)!=0)&&((instr->privprops&DSWITCHJUMPINSTR)==0)) { ARCHFillDelaySlot(frag,instr); } nexti=(int)instr->nextinstr; } } } nextf=(int)frag->nextfrag; } } /*FillDelaySlots*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ #endif /**/ /***/ int BGetFixedLabel(struct fragfmt *frag) { /********************************************************************************/ /** Returns a pointer to the descriptor for any addressed or label destination **/ /** label defined at the start of the given fragment. Returns 0 if there is no **/ /** such label. **/ /********************************************************************************/ struct fragfmt *curfrag; int prevfrag; /* determine if there are fixed labels or dense switch case */ /* destinations associated with the fragment */ curfrag=frag; while (curfrag->final==frag->final) { if ((curfrag->labindex!=0)&&((ARCHIsAddrLabel(curfrag->labindex)!=0)||((curfrag->props&DSWITCHCASEFRAG )!=0))) { return (int)BLabRecAd(curfrag->labindex); } prevfrag=(int)curfrag->prevfrag; if (prevfrag==0) break ; curfrag=(struct fragfmt*)(prevfrag); } return 0; } /*B GetFixedLabel*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BMoveDelaySlot(struct fragfmt *branchfrag,struct instrfmt *branchinstr) { /********************************************************************************/ /** Moves a delay slot instruction, whose owner is a fragment terminator, to **/ /** the destination of the branch. If there are other references to the **/ /** fragment then appropriate jumps are inserted to bypass the displaced delay **/ /** slot instruction. Use of this function destroys the current liveness **/ /** information. **/ /********************************************************************************/ struct fragfmt *destfrag,*newfrag,*curfrag,*bypassfrag; struct instrfmt *delayinstr,*newinstr,*newnop; struct lrecfmt *lrec; struct preddescfmt *edge,*tempedge; int newlabel,labindex,singleinstrcase,nextf,prevedge,nextedge,junk; /* test the consistency of the fragment and instruction */ if (ValidPreds==0) { BError("B MoveDelaySlot: No edge information"); } if (((branchinstr->props&DELAYSLOTEXISTS)==0)||((branchinstr->props&DELAYSLOTUNUSED)!=0)|| (BMachineInstr(branchinstr->nextinstr,NEXTCHAIN)==NULL)|| (branchfrag->labref==0)||(branchfrag->labrefinstr!=branchinstr)) { BError("B MoveDelaySlot: Illegal branch instruction"); } /* find the destination of the branch and the edge which represents it */ lrec=BLabRecAd(branchfrag->labref); destfrag=lrec->frag; destfrag=(struct fragfmt*)(destfrag->final); if (destfrag->preds==0) { BError("B MoveDelaySlot: Inconsistent edge information"); } delayinstr=BMachineInstr(branchinstr->nextinstr,NEXTCHAIN); edge=(struct preddescfmt*)(destfrag->preds); /* handle the cases for a single edge flowing into the destination and */ /* multiple edges separately */ if ((edge->next==0)&&((destfrag->props&FALLENINTO)==0)) { /* the branch we are modifying is the only way into the fragment so */ /* just move the delay slot instruction to the destination */ if (edge->frag!=(int)branchfrag) { BError("B MoveDelaySlot: Corrupted edge information"); } BMoveBefore(branchfrag,destfrag,delayinstr,destfrag->firstinstr); delayinstr->props&=~DELAYSLOTINSTR; } else { /* there are multiple edges flowing into the destination fragment so */ /* we have to create a brand new fragment to hold the solitary delay */ /* slot instruction */ newfrag=BInsertFrag(destfrag); newfrag->props|=FALLSTHROUGH; destfrag->props|=FALLENINTO; branchfrag->props&=~RETURNBRANCH; /* detect the case where we use the delay slot annulment facility to */ /* efficiently handle jumping over a single instruction - this is */ /* handled specially to try and generate more sensible code */ singleinstrcase=0; if (((branchinstr->props&ANNULLED)!=0)&&((branchinstr->props&CONDITIONALBRANCH)==0)&&((branchfrag->props &FALLSTHROUGH)!=0)) { /* check that if the branch is not taken then it merely falls */ /* through into the branch destination - i.e. the delay slot is */ /* the only conditional instruction */ nextf=(int)branchfrag->nextfrag; while (nextf!=0) { curfrag=(struct fragfmt*)(nextf); if (nextf==(int)destfrag) { singleinstrcase=1; break ; } if (BMachineInstr(curfrag->firstinstr,NEXTCHAIN)!=NULL) break ; nextf=(int)curfrag->nextfrag; } } /* handle the special and general cases separately */ if (singleinstrcase!=0) { /* handle the special case by simply moving the delay slot */ /* instruction into a fragment by itself and inverting the branch */ /* condition if the annulment was if the branch was untaken */ BMoveBefore(branchfrag,newfrag,delayinstr,newfrag->firstinstr); delayinstr->props&=~DELAYSLOTINSTR; if (((branchinstr->props&ANNULLED)!=0)&&((branchinstr->props&ANNULLEDTAKEN)==0)) { junk=ARCHInvertBranch(branchinstr,0,0); } } else { /* handle the general case - we obtain a new entry label for the */ /* new fragment and make the branch go to the new fragment */ newlabel=ARCHAskForLabelNum(); /* actually move the delay slot instruction into the new fragment */ BMoveBefore(branchfrag,newfrag,delayinstr,newfrag->firstinstr); delayinstr->props&=~DELAYSLOTINSTR; /* actually create a new label descriptor associated with the */ /* fragment and make the branch point to it */ labindex=BLocateLabel(newlabel,0); lrec=BLabRecAd(labindex); lrec->frag=newfrag; newfrag->labindex=labindex; BChangeDest(branchfrag,branchinstr,lrec); /* find the edge descriptor representing the branch and move */ /* it so that it is associated with the new fragment */ prevedge=0; nextedge=destfrag->preds; while (nextedge!=0) { edge=(struct preddescfmt*)(nextedge); if (edge->frag==(int)branchfrag) break ; prevedge=nextedge; nextedge=edge->next; } if (nextedge==0) { BError("B MoveDelaySlot: Edge descriptor missing"); } if (prevedge==0) { destfrag->preds=edge->next; } else { tempedge=(struct preddescfmt*)(prevedge); tempedge->next=edge->next; } edge->next=0; newfrag->preds=(int)edge; newfrag->uses=newfrag->uses+1; destfrag->uses=destfrag->uses-1; /* if the new fragment is fallen into then we have to generate */ /* an unconditional branch to bypass the displaced delay slot */ /* instruction */ if ((newfrag->props&FALLENINTO)!=0) { newinstr=(struct instrfmt*)(ARCHCreateUncond(destfrag->labindex)); /* if the fragment which falls into the new one itself ends */ /* in a branch then we have to create yet another new fragment */ /* to hold the unconditional branch */ bypassfrag=BPrevFrag(newfrag); if (bypassfrag->labref!=0) { bypassfrag=BInsertFrag(newfrag); } /* move the unconditional branch into the bypass fragment */ BMoveAfter(NULL,bypassfrag,newinstr,bypassfrag->lastinstr); bypassfrag->labref=destfrag->labindex; bypassfrag->labrefinstr=newinstr; AddPredFrag(0,1,0,destfrag,bypassfrag); destfrag->uses=destfrag->uses+1; bypassfrag->props&=~FALLSTHROUGH; newfrag->props&=~FALLENINTO; /* if the unconditional branch must have a delay slot */ /* instruction then we insert a NOP after it */ if ((DELAYSLOTMUSTEXIST!=0)&&((newinstr->props&DELAYSLOTUNUSED)!=0)) { newinstr->props|=DELAYSLOTEXISTS; newnop=(struct instrfmt*)(ARCHCreateNOP()); newnop->props|=DELAYSLOTINSTR; BMoveAfter(NULL,bypassfrag,newnop,bypassfrag->lastinstr); } } } } /* we now clear up the original branch instruction */ #if(DELAYSLOTMUSTEXIST!=0) /* if delay slot instructions after branches are definitely */ /* required then we insert a NOP instruction */ newinstr=(struct instrfmt*)(ARCHCreateNOP()); newinstr->props|=DELAYSLOTINSTR; BMoveAfter(NULL,branchfrag,newinstr,branchinstr); branchinstr->props&=~(ANNULLED|ANNULLEDTAKEN); branchinstr->props|=DELAYSLOTUNUSED; #else /* if a delay slot instruction is not necessary then we just mark */ /* the branch as having an unused potential delay slot */ branchinstr->props&=~(DELAYSLOTEXISTS|ANNULLED|ANNULLEDTAKEN); branchinstr->props|=DELAYSLOTUNUSED; #endif; } /*B MoveDelaySlot*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BGetFragDest(struct fragfmt *frag) { /********************************************************************************/ /** Returns the destination label of any terminator instruction associated **/ /** with the given fragment, or 0 if there is no label branching terminator **/ /********************************************************************************/ struct fragfmt *destfrag; struct lrecfmt *lrec; if (frag->labref!=0) { lrec=BLabRecAd(frag->labref); if ((lrec->labnum>=PI->labadjust)&&(lrec->frag!=NULL)) { destfrag=lrec->frag; destfrag=(struct fragfmt*)(destfrag->final); if ((destfrag->props&PRIVATELABFRAG)!=0) return 0; return (int)lrec; } } return 0; } /*B GetFragDest*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BGetInstrEnds(struct fragfmt *frag,int *head,int *tail) { /********************************************************************************/ /** Obtains the head and tail of the real instruction chain associated with **/ /** the given fragment. Head is set to the first instruction in the chain and **/ /** tail to the last instruction in the chain. Head and tail are both set to 0 **/ /** if the fragment is not associated with any executable instructions. **/ /********************************************************************************/ *head=(int)BMachineInstr(frag->firstinstr,NEXTCHAIN); *tail=(int)BMachineInstr(frag->lastinstr,PREVCHAIN); } /*B GetInstrEnds*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BGetLabInfo(struct lrecfmt *label,int *labnum,int *frag) { /********************************************************************************/ /** Obtains the information associated with the given label. Labnum is set to **/ /** the label and frag is set point to the fragment associated with the **/ /** definition point of the label. **/ /********************************************************************************/ struct fragfmt *tempfrag; if (label->frag==NULL) { BError("B GetLabInfo: Label is not defined"); } *labnum=label->labnum; tempfrag=label->frag; *frag=(int)tempfrag->final; } /*B GetLabInfo*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BGetOpCode(struct instrfmt *instr) { /********************************************************************************/ /** Returns the operation code associated with the given instruction or -1 if **/ /** it is not an executable instruction **/ /********************************************************************************/ if (instr->groupopcode>>1; #endif; return instr->opcode; } else { return -1; } } /*B GetOpCode*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BGetTerminator(struct fragfmt *frag) { /********************************************************************************/ /** Returns the terminator instruction for the given fragment or 0 if the **/ /** fragment is not associated with a terminator **/ /********************************************************************************/ struct instrfmt *instr; if (frag->labref!=0) { if (frag->labrefinstr==0) return 0; instr=frag->labrefinstr; if (instr->groupprops&RETURNFRAG)!=0)||((frag->props&DSWITCHJUMPFRAG)!=0)) { if (BMachineInstr(frag->lastinstr,PREVCHAIN)!=NULL) { instr=BMachineInstr(frag->lastinstr,PREVCHAIN); if (((instr->props&DELAYSLOTINSTR)!=0)&& (BMachineInstr(instr->previnstr,PREVCHAIN)!=NULL)) { instr=BMachineInstr(instr->previnstr,PREVCHAIN); } return (int)instr; } return 0; } else { return 0; }; } /*B GetTerminator*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BLiveEntryRegs(struct fragfmt *frag,int *regsa,int *regsb,int *regsc,int *regsd,int windowlive ) { /********************************************************************************/ /** Obtains the set of registers which are live upon entry to the given **/ /** fragment. Regsa gives the live integer registers, regsb gives the live **/ /** float registers, regsc gives the set of live control registers and regsd **/ /** is returned as 0. Registers which do not exist on the target architecture **/ /** are returned as dead. Exempt registers are also returned as dead. If **/ /** windowlive is non-zero then the effects of window saves are ignored. **/ /********************************************************************************/ struct instrfmt *instr,*nextinstr; int nexti; if (ValidLive==NOLIVENESSINFO) { BError("B LiveEntryRegs: No valid register liveness info"); } if ((HASWINDOWREGS!=0)&&(windowlive!=0)&&(frag->prevfrag==0)&& (BMachineInstr(frag->firstinstr,NEXTCHAIN)!=NULL)) { /* get the live registers after the window save instruction - */ /* if the stack frame is large then there may be extra instructions */ /* to load the stack size into a scratch register */ instr=BMachineInstr(frag->firstinstr,NEXTCHAIN); if ((instr->privprops&SCHRESTRICTINSTR)!=0) { nextinstr=instr; do { instr=nextinstr; nexti=(int)BMachineInstr(instr->nextinstr,NEXTCHAIN); if (nexti==0) break ; nextinstr=(struct instrfmt*)(nexti); } while (!((nextinstr->privprops&SCHRESTRICTINSTR)==0)) ; } BLiveSetAfterInstr(frag,instr,regsa,regsb,regsc,regsd); *regsa&=~GRFEXEMPT; #if(ISERFREQUIRED!=0) *regsb&=~ERFEXEMPT; #if(ISERFREQUIRED==2) *regsd&=~DRFEXEMPT; #endif #if(ISCRFREQUIRED!=0) *regsc&=~CRFEXEMPT; #endif; #endif; } else { frag=(struct fragfmt*)(frag->final); *regsa=frag->rin&(~GRFEXEMPT); #if(ISERFREQUIRED!=0) *regsb=frag->fin&(~ERFEXEMPT); #if(ISERFREQUIRED==2) *regsd=frag->din&(~DRFEXEMPT); #else *regsd=0; #endif #if(ISCRFREQUIRED!=0) *regsc=frag->cin&(~CRFEXEMPT); #else *regsc=0; #endif; #else *regsb=0; *regsc=0; *regsd=0; #endif; } } /*B LiveEntryRegs*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BLiveExitRegs(struct fragfmt *frag,int *regsa,int *regsb,int *regsc,int *regsd,int windowlive) { /********************************************************************************/ /** Obtains the set of registers which are live upon entry to the given **/ /** fragment. Regsa gives the registers in the GRF, regsb the registers in the **/ /** ERF and regsc the registers in the CRF. Registers which do not exist on **/ /** the processor are returned as dead. Exempt registers are also returned as **/ /** dead. If windowlive is non-zero then the effects of window restores are **/ /** ignored. **/ /********************************************************************************/ struct instrfmt *instr; if (ValidLive==NOLIVENESSINFO) { BError("B LiveExitRegs: No valid register liveness info"); } frag=(struct fragfmt*)(frag->final); /* we handle return fragments of windowed architectures specially */ if ((HASWINDOWREGS!=0)&&(windowlive!=0)&&((frag->props&RETURNFRAG)!=0)&& (BMachineInstr(frag->lastinstr,PREVCHAIN)!=NULL)) { /* get the live registers before the window restore instruction */ instr=BMachineInstr(frag->lastinstr,PREVCHAIN); if (BMachineInstr(instr->previnstr,PREVCHAIN)!=NULL) { instr=BMachineInstr(instr->previnstr,PREVCHAIN); } BLiveSetAfterInstr(frag,instr,regsa,regsb,regsc,regsd); *regsa&=~GRFEXEMPT; #if(ISERFREQUIRED!=0) *regsb&=~ERFEXEMPT; #if(ISERFREQUIRED==2) *regsd&=~DRFEXEMPT; #else *regsd=0; #endif #if(ISCRFREQUIRED!=0) *regsc&=~CRFEXEMPT; #endif; #endif; } else { *regsa=frag->rout&(~GRFEXEMPT); #if(ISERFREQUIRED!=0) *regsb=frag->fout&(~ERFEXEMPT); #if(ISERFREQUIRED==2) *regsd=frag->dout&)~DRFEXEMPT); #else *regsd=0; #endif #if(ISCRFREQUIRED!=0) *regsc=frag->cout&(~CRFEXEMPT); #else *regsc=0; #endif; #else *regsb=0; *regsc=0; *regsd=0; #endif; } } /*B LiveExitRegs*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BMemoryOverlap(int fraga,int fragb,struct instrfmt *instra,struct instrfmt *instrb,int areaa,int areab ,int constbase) { /******************************************************************************/ /** Returns a value indicating whether the two given memory accessing **/ /** instructions overlap or not. A return value of <0 indicates that the **/ /** accesses do not overlap, 0 indicates that they might and >0 indicates **/ /** that they definitely do. The area parameters, areaa and areab, may be **/ /** set if the area to which the accesses are made is known. A value of -1 **/ /** indicates that the area is unknown. Fraga and fragb are pointers to the **/ /** fragments to which the instructions belong. These may be set to 0 but a **/ /** less complete analysis will be performed. If constbase is non-zero then **/ /** it assumed that any base registers have the same values in both **/ /** accesses. **/ /******************************************************************************/ struct instrfmt *testinstr; int test,combprops,noalias,unroll,stores,previ,endinstr,found; /* if either access is volatile then consider that they overlap so */ /* that the original access order is maintained */ if (((instra->privprops&VOLATILELOC)!=0)||((instrb->privprops&VOLATILELOC)!=0)) return MOMAYBE; /* call the architecture specific module to try and determine */ /* whether the accesses overlap */ test=ARCHMemoryOverlap(instra,instrb,constbase); /* try and obtain more information if both accesses are direct */ if ((test==MOMAYBE)&&((((instra->privprops|instrb->privprops)&MULTIPLYINDIRECT))==0)) { /* improve the area information for the instructions if possible */ if (areaa==-1) areaa=instra->area; if (areab==-1) areab=instrb->area; /* if we have an unknown access and one which is known to */ /* access a read only area and one of the accesses is a */ /* store then they cannot overlap */ if (((((instra->props|instrb->props)&STOREINSTR))!=0)&&((areaa==-1)||(areab==-1))) { if ((areaa>=0)&&(areaa<=MAXCOMPILERAREA)&&(FixedAreaROStat [areaa]!=0)) return MONO; if ((areab>=0)&&(areab<=MAXCOMPILERAREA)&&(FixedAreaROStat [areab]!=0)) return MONO; } /* if either access is in some sort of non-aliased context then */ /* try and determine if they are in the same one */ if ((fraga==0)||(fragb==0)||(fraga==fragb)) { combprops=instra->privprops&instrb->privprops; if ((combprops&(NOINTERFERENCE|ADJACENTLOOPS))!=0) { /* firstly, assume that instruction b comes after instruction a */ noalias=instrb->privprops&NOINTERFERENCE; unroll=instrb->privprops&ADJACENTLOOPS; stores=0; previ=(int)BMachineInstr(instrb->previnstr,PREVCHAIN); endinstr=(int)BMachineInstr(instra->previnstr,PREVCHAIN); found=0; while ((previ!=0)&&(previ!=endinstr)) { if (previ==(int)instra) found=1; testinstr=(struct instrfmt*)(previ); if ((testinstr->props&STOREINSTR)!=0) { stores++; if (stores>1) unroll=0; } noalias&=testinstr->privprops; unroll&=testinstr->privprops; previ=(int)BMachineInstr(testinstr->previnstr,PREVCHAIN); } if (found!=0) { if (unroll!=0) return MONO; if ((noalias!=0)&&(instra->CXTnum==instrb->CXTnum)) { return MONO; } } /* secondly, assume that instruction a comes after instruction b */ noalias=instra->privprops&NOINTERFERENCE; unroll=instra->privprops&ADJACENTLOOPS; stores=0; previ=(int)BMachineInstr(instra->previnstr,PREVCHAIN); endinstr=(int)BMachineInstr(instrb->previnstr,PREVCHAIN); found=0; while ((previ!=0)&&(previ!=endinstr)) { if (previ==(int)instrb) found=1; testinstr=(struct instrfmt*)(previ); if ((testinstr->props&STOREINSTR)!=0) { stores++; if (stores>1) unroll=0; } noalias&=testinstr->privprops; unroll&=testinstr->privprops; previ=(int)BMachineInstr(testinstr->previnstr,PREVCHAIN); } if (found!=0) { if (unroll!=0) return MONO; if ((noalias!=0)&&(instra->CXTnum==instrb->CXTnum)) { return MONO; } } } } /* do further analysis depending on which areas are known */ if ((areaa!=-1)&&(areab!=-1)) { /* if the accesses are definitely to different areas then there */ /* can be no overlap */ if (areaa!=areab) return MONO; } } return test; } /*B MemoryOverlap*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BNextEdge(struct preddescfmt *edge) { /********************************************************************************/ /** Returns a pointer to the next edge descriptor in a predecessor edge list **/ /** or 0 if the given edge was the last in the list. This skips over edges **/ /** associated with dense switch tables. **/ /********************************************************************************/ int curedge; curedge=edge->next; while (curedge!=0) { edge=(struct preddescfmt*)(curedge); if (edge->isswitchedge==0) return curedge; curedge=edge->next; } return 0; } /*B NextEdge*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BNextFrag(struct fragfmt *frag) { /********************************************************************************/ /** Returns the next main fragment after the given one or 0 if the given **/ /** fragment was the last in the chain **/ /********************************************************************************/ struct fragfmt *tempfrag; if (frag->nextfrag!=0) { tempfrag=frag->nextfrag; tempfrag=(struct fragfmt*)(tempfrag->final); while (((tempfrag->props&FALLENINTO)==0)&&((tempfrag->props&FALLSTHROUGH)==0)&&(tempfrag->preds==0) &&(BMachineInstr(tempfrag->firstinstr,NEXTCHAIN)==NULL)) { if (tempfrag->nextfrag==NULL) return 0; tempfrag=tempfrag->nextfrag; tempfrag=(struct fragfmt*)(tempfrag->final); } return (int)tempfrag; } else { return 0; } } /*B NextEdge*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BNextInstr(struct instrfmt *instr) { /********************************************************************************/ /** Returns the next executable instruction after the given one in the same **/ /** fragment or 0 if the given instruction was the last in the chain **/ /********************************************************************************/ struct instrfmt *newinstr; int nextinstr; nextinstr=(int)instr->nextinstr; while (nextinstr!=0) { newinstr=(struct instrfmt*)(nextinstr); if (newinstr->groupnextinstr; } return 0; } /*B NextInstr*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ struct fragfmt*BPrevFrag(struct fragfmt *frag) { /********************************************************************************/ /** Returns the previous main fragment before the given one or 0 if the given **/ /** fragment was the first in the chain **/ /********************************************************************************/ struct fragfmt *tempfrag,*prevfrag,*lastfrag; tempfrag=frag; do { prevfrag=tempfrag->prevfrag; if (prevfrag==NULL) return NULL; tempfrag=prevfrag; } while (!(tempfrag->final!=frag)) ; while (((tempfrag->props&FALLENINTO)==0)&&((tempfrag->props&FALLSTHROUGH)==0)&& (tempfrag->preds==0)&&(BMachineInstr(tempfrag->firstinstr,NEXTCHAIN)==NULL)) { lastfrag=tempfrag; do { prevfrag=tempfrag->prevfrag; if (prevfrag==NULL) return NULL; tempfrag=prevfrag; } while (!(tempfrag->final!=lastfrag)) ; } return tempfrag; } /*B PrevFrag*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BPrevInstr(struct instrfmt *instr) { /********************************************************************************/ /** Returns the previous executable instruction before the given one in the **/ /** same fragment or 0 if the given instruction was the first in the chain **/ /********************************************************************************/ struct instrfmt *newinstr; int nextinstr; nextinstr=(int)instr->previnstr; while (nextinstr!=0) { newinstr=(struct instrfmt*)(nextinstr); if (newinstr->groupprevinstr; } return 0; } /*B PrevInstr*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BSetArchInfo(struct fragfmt *frag,int value) { /********************************************************************************/ /** Sets the architecture specific information word for the given fragment **/ /********************************************************************************/ frag->archinfo=value; } /*B SetArchInfo*/ /***/ void BSetArchVariant(int variant) { /********************************************************************************/ /** Sets the architecture variant for which backend optimisation should be **/ /** performed - this also corresponds to the GIS model number **/ /********************************************************************************/ ArchVariant=variant; } /*B SetArchVariant*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BSetGISOptions(int BuffZone,int FloatOpt,int UnsafeOpt,int Trace,int Inhib,int RangeRegion,int AttemptNum ,int PassNum) { /********************************************************************************/ /** Sets the options for the global instruction scheduler. The parameters are **/ /** as follows: **/ /** BuffZone - The number of bytes of buffer zone for speculative loads at **/ /** the start and end of the address space. **/ /** FloatOpt - Non-zero if certain floating point operations may be **/ /** performed speculatively on the basis that floating point **/ /** traps will be disabled. **/ /** UnsafeOpt - The unsafe optimisation level: **/ /** 0 - No unsafe optimisations are performed (other than **/ /** those allowed by setting BuffZone and FloatOpt). **/ /** 1 - Data speculative potentially excepting instructions **/ /** may be issued but only if the data is speculative due **/ /** to possible call side effects. **/ /** 2 - Control and data speculative execution of potentially **/ /** excepting instructions is allowed but with many **/ /** heuristics to try and prevent invalid speculation. **/ /** 3 - Control and data speculative execution of potentially **/ /** excepting instructions is allowed but with minimal **/ /** heuristics to prevent invalid speculation. **/ /** Trace - Bit vector for GIS tracing. **/ /** Inhib - Bit vector for GIS optimisation inhibition. **/ /** RangeRegion - Region for which tracing or optimisation inhibition is **/ /** selected or 0 is no region is selected. **/ /** AttemptNum - Schedule attempt number for which tracing or optimisation **/ /** inhibition is selected or 0 if no attempt is selected. **/ /** PassNum - Schedule pass number for which tracing or optimisation **/ /** inhibition is selected or 0 if no pass is selected. **/ /********************************************************************************/ GISBuffZone=BuffZone; GISFloatOpt=FloatOpt; GISUnsafeOpt=UnsafeOpt; GISTrace=Trace; GISInhib=Inhib; GISRegion=RangeRegion; GISAttempt=AttemptNum; GISPass=PassNum; } /*B SetGISOptions*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BIsAnnotated() { /********************************************************************************/ /** Returns a non-zero value if global scheduling annotation should be printed **/ /** on generated assembly listings. **/ /********************************************************************************/ return EntryTrace; } /*B IsAnnotated*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BGetTraceProc() { /********************************************************************************/ /** Returns the number of the procedure for which tracing should be produced **/ /** or 0 if tracing should be produced for all procedures **/ /********************************************************************************/ return RangeProc; } /*B GetTraceProc*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BTraceFrag(int printfrag) { /********************************************************************************/ /** Prints tracing of the fragment address and number for the given fragment. **/ /********************************************************************************/ struct fragfmt *frag,*curfrag; int fragnum,nextfrag; ARCHFlushOutput(1); if (printfrag!=0) { frag=(struct fragfmt*)(printfrag); fragnum=1; nextfrag=(int)PI->procfrag; while ((nextfrag!=0)&&(nextfrag!=(int)frag)) { curfrag=(struct fragfmt*)(nextfrag); nextfrag=(int)curfrag->nextfrag; fragnum++; } printf("FRAGMENT:"); if (nextfrag==0) { printf("%8x ",(int)frag); } else { printf("%d [%8x]",fragnum,(int)frag); } printf("\n"); } else { printf("NO_FRAGMENT\n"); } ARCHFlushOutput(0); } /*B TraceFrag*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BTraceInstr(int printinstr) { /********************************************************************************/ /** Prints tracing of disassmebly for the given instruction. **/ /********************************************************************************/ struct instrfmt *instr; int ca; ARCHFlushOutput(1); if (printinstr!=0) { instr=(struct instrfmt*)(printinstr); ca=-1; ARCHDisasInstr(instr,&ca); } else { printf("NO_INSTRUCTION\n"); } ARCHFlushOutput(0); } /*B TraceInstr*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BTraceLabel(int label) { /********************************************************************************/ /** Prints tracing for the given label **/ /********************************************************************************/ struct lrecfmt *lrec; ARCHFlushOutput(1); if (label==0) { printf("NO_LABEL"); } else { lrec=(struct lrecfmt*)(label); printf("LABEL:%8x L%d FRAGADDR %x",label,lrec->labnum,(unsigned)lrec->frag); } printf("\n"); ARCHFlushOutput(0); } /*B TraceLabel*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BAskForRegionLabel() { /********************************************************************************/ /** Returns a unique region label number. This is unique for the entire **/ /** compilation unit. The label is used for tracing selection purposes. **/ /********************************************************************************/ RegionLabel++; return RegionLabel; } /*B AskForRegionLabel*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BAskTrace(int regionlabel,int attemptnum,int passnum) { /********************************************************************************/ /** Obtains a bit vector giving the set things the the global scheduler should **/ /** trace in the given region. The use of the various bits is defined by the **/ /** global scheduler itself. If the region label is 0 then the global tracing **/ /** bits are returned. Attemptnum is used to specify for which attempt number **/ /** tracing should be generated or 0 if tracing should be generated for all **/ /** attempts. Passnum is used to specify for which pass number tracing **/ /** should be generated for or 0 if tracing should be generated for all **/ /** passes. **/ /********************************************************************************/ if (GISTrace==0) { return 0; } else if ((RangeProc!=0)&&(RangeProc!=PI->procnum)) { return 0; } else if (regionlabel!=0) { if (GISRegion==0) { return GISTrace; } else if (regionlabel==GISRegion) { if ((GISPass!=0)&&(passnum!=0)&&(passnum!=GISPass)) { return 0; } else if ((GISAttempt!=0)&&(attemptnum!=0)&&(GISAttempt!=attemptnum)) { return 0; }; return GISTrace; } else { return 0; }; }; return GISTrace; } /*B AskTrace*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ int BAskInhibit(int regionlabel,int attemptnum,int passnum) { /********************************************************************************/ /** Obtains a bit vector giving the set of optimisations which should be **/ /** inhibited in the given region. The use of the various bits is defined by **/ /** the global scheduler itself. If the region label is 0 then the global **/ /** inhibition bits are returned. Attemptnum is used to specify for which **/ /** attempt number inhibition is specified or 0 if all attempts for the given **/ /** region should be similiarly inhibited. Passnum is used to specify for **/ /** which pass number inhibition is specified for or 0 if all passes for the **/ /** given region should be similiarly inhibited. If bit 0 of the returned **/ /** vector is set then global scheduling should be disabled. **/ /********************************************************************************/ if ((GISInhib==0)&&((InhibVect&(1<<16))==0)) { return 0; } else if ((RangeProc!=0)&&(RangeProc!=PI->procnum)) { return 0; } else if (regionlabel!=0) { if (GISRegion==0) { return GISInhib; } else if ((RangeProc!=0)&&(RangeProc==PI->procnum)&&((InhibVect&(1<<16))!=0)&&(regionlabel>=GISRegion )) { return 1; } else if (regionlabel==GISRegion) { if ((GISPass!=0)&&(passnum!=0)&&(passnum!=GISPass)) { return 0; } else if ((GISAttempt!=0)&&(attemptnum!=0)&&(attemptnum!=GISAttempt)) { return 0; }; return GISInhib; } else { return 0; }; }; return GISInhib; } /*B AskInhibit*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BAugmentLiveness(struct fragfmt *frag,int regsa,int regsb,int regsc,int regsd) { /********************************************************************************/ /** Adds the given live registers to those which are live on exit from the **/ /** given fragment. Regsa to regsd give the new live registers. **/ /********************************************************************************/ frag->rout=frag->rout|regsa; #if(ISERFREQUIRED!=0) frag->fout=frag->fout|regsb; #if(ISERFEXEMPT==2) frag->dout=frag->dout|regsd; #endif #if(ISCRFREQUIRED!=0) frag->cout=frag->cout|regsc; #endif; #endif; } /*B AugmentLiveness*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BSetEntryLabel(struct fragfmt *frag,int newlabel) { /********************************************************************************/ /** Sets the label associated with entry to the given fragment or marks the **/ /** fragment has having no entry label if newlabel is 0. Labels associated **/ /** with the fragment which are addressed or are the destinations of dense **/ /** switch table entries are preserved. **/ /********************************************************************************/ struct fragfmt *curfrag,*newfrag,*tempfrag; struct lrecfmt *lrec; int newlabelset,prevfrag; /* look through all the label fragments associated with the given */ /* fragment and delete all those which are not associated with */ /* addressed labels */ ValidPreds=0; newlabelset=0; curfrag=frag; while (curfrag->final==frag) { prevfrag=(int)curfrag->prevfrag; if ((((curfrag->labindex==0)||(ARCHIsAddrLabel(curfrag->labindex)==0))&&(((curfrag->props&RETURNFRAG )==0)||((curfrag->props&NODELETEFRAG)==0)||(newlabel!=0))&&((curfrag->props&PRIVATELABFRAG)==0)&&((curfrag ->props&DSWITCHCASEFRAG)==0))||((curfrag->labindex!=0)&&((int)BLabRecAd(curfrag->labindex)==newlabel))) { if (newlabelset==0) { /* the new main label has not been set so use the fragment */ if (newlabel!=0) { lrec=(struct lrecfmt*)(newlabel); lrec->frag=NULL; curfrag->labindex=BLocateLabel(lrec->labnum,1); lrec->frag=curfrag; } else { curfrag->labindex=0; } newlabelset=1; } else { /* delete the current fragment as it is now redundant */ if (curfrag->prevfrag!=0) { tempfrag=(struct fragfmt*)(curfrag->prevfrag); tempfrag->nextfrag=curfrag->nextfrag; } if (curfrag->nextfrag!=NULL) { tempfrag=curfrag->nextfrag; tempfrag->prevfrag=curfrag->prevfrag; } curfrag->prevfrag=(struct fragfmt *)FREEFRAGMARKER; curfrag->nextfrag=(struct fragfmt *)freefrag [PI->proclevel]; freefrag [PI->proclevel]=(int)curfrag; } } if (newlabel==0) newlabelset=1; if (prevfrag==0) break ; curfrag=(struct fragfmt*)(prevfrag); } /* create a new fragment if there were none to hold the main label */ if ((newlabelset==0)&&(newlabel!=0)) { newfrag=BObtainFrag(); newfrag->prevfrag=frag->prevfrag; newfrag->nextfrag=frag; frag->prevfrag=newfrag; if (newfrag->prevfrag==0) { BError("B SetEntryLabel: Cannot insert new label at start of chain"); } else { tempfrag=(struct fragfmt*)(newfrag->prevfrag); tempfrag->nextfrag=newfrag; } newfrag->final=frag; lrec=(struct lrecfmt*)(newlabel); lrec->frag=NULL; newfrag->labindex=BLocateLabel(lrec->labnum,1); lrec->frag=newfrag; } } /*B SetEntryLabel*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void SetVectors(int FragNum) { /********************************************************************************/ /** Sets the current inhibit and tracing vectors depending on the current **/ /** procedure number and the given fragment number. If the given fragment **/ /** number is 0 then the inhibition and tracing flags are set to the required **/ /** global value for the current procedure. **/ /********************************************************************************/ CurInhibVect=0; TraceEnab=0; if ((RangeProc==0)||((FragNum==0)&&(RangeProc==PI->procnum))) { /* tracing is enabled if fragment 0 is passed in or the selection */ /* is not confined to a particular procedure */ CurInhibVect=InhibVect; TraceEnab=1; } else if ((PI->procnum==RangeProc)&&(RangeFragHigh>=RangeFragLow)) { /* to enable the inhibition the fragment range must be global */ /* or we must be in the correct range of fragments */ if ((RangeFragLow==0)||((RangeFragLow<=FragNum)&&(FragNum<=RangeFragHigh))) { CurInhibVect=InhibVect; TraceEnab=1; } }; } /*SetVectors*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void SetupUseAndDef(struct fragfmt *frag) { /********************************************************************************/ /** Sets up the reference and definition fields for the given fragment. This **/ /** is required for the live register analysis. **/ /********************************************************************************/ struct instrfmt *instr,*previnstr,*tempinstr; int userset,usefset,usecset,defrset,deffset,defcset,nextinstr; #if (ISERFREQUIRED==2) int defdset; #endif if ((frag->props&USERASSEMFRAG)!=0) { /* if we have a user assembly fragment then we consider that it */ /* references and defines all registers - this is a worst case */ /* assumption as we are unable to obtain more exacting information */ frag->ruse=-1; frag->rdef=-1; #if(ISERFREQUIRED!=0) frag->fuse=-1; frag->fdef=-1; #if(ISERFREQUIRED==2) frag->duse=-1; frag->ddef=-1; #endif #if(ISCRFREQUIRED!=0) frag->cuse=-1; frag->cdef=-1; #endif; #endif; } else { /* we calculate the use and definition sets for the registers in the */ /* fragment */ userset=0; usefset=0; usecset=0; defrset=0; deffset=0; defcset=0; nextinstr=(int)BMachineInstr(frag->firstinstr,NEXTCHAIN); while (nextinstr!=0) { instr=(struct instrfmt*)(nextinstr); if (((instr->props&DELAYSLOTINSTR)!=0)&& (BMachineInstr(instr->previnstr,PREVCHAIN)!=NULL)&&(frag->labref !=0)&&(frag->labrefinstr==previnstr)&&((previnstr->props&ANNULLED)!=0)) { /* If the instruction is an annulled delay slot instruction then */ /* it may or may not be executed depending on whether the branch */ /* is taken or not. We assume the worst case by considering that */ /* the instruction references all its own definitions. Thus the */ /* registers potentially overwritten by this instruction are */ /* not considered to be dead. */ userset|=(instr->rref|instr->rdef)&(~defrset); defrset|=instr->rdef; #if(ISERFREQUIRED!=0) usefset|=(instr->fref|instr->fdef)&(~deffset); deffset|=instr->fdef; #if(ISCRFREQUIRED!=0) usecset|=(instr->cref|instr->cdef)&(~defcset); defcset|=instr->cdef; #endif; #endif; } else if (((instr->props&ANYCALL)!=0)&&((instr->props&DELAYSLOTEXISTS)!=0)&&((instr->props&DELAYSLOTUNUSED )==0)&&(BMachineInstr(instr->nextinstr,NEXTCHAIN)!=NULL)) { /* We have a call instruction with a filled delay slot - we */ /* consider registers defined by the delay slot owner to not */ /* be defined until after the execution of the delay slot */ /* instruction */ tempinstr=instr; instr=BMachineInstr(instr->nextinstr,NEXTCHAIN); userset|=(tempinstr->rref|instr->rref)&(~defrset); defrset=(defrset|tempinstr->rdef)|instr->rdef; #if(ISERFREQUIRED!=0) usefset|=(tempinstr->fref|instr->fref)&(~deffset); deffset=(deffset|tempinstr->fdef)|instr->fdef; #if (ISERFREQUIRED==2) usedset|=(tempinstr->dref|instr->dref)&(~defdset); defdset|=(defdset|tempinstr->dfdef)|instr->ddef; #endif #if(ISCRFREQUIRED!=0) usecset|=(tempinstr->cref|instr->cref)&(~defcset); defcset=(defcset|tempinstr->cdef)|instr->cdef; #endif; #endif; } else { /* We update the use set by all registers which are referenced */ /* but have not been defined since the start of the fragment. */ /* These registers are live. */ userset|=instr->rref&(~defrset); defrset|=instr->rdef; #if(ISERFREQUIRED!=0) usefset|=instr->fref&(~deffset); deffset|=instr->fdef; #if (ISERFREQUIRED==2) usedset|=instr->drref&(~defdset); defdset|=instr->ddef; #endif #if(ISCRFREQUIRED!=0) usecset|=instr->cref&(~defcset); defcset|=instr->cdef; #endif; #endif; }; previnstr=instr; nextinstr=(int)BMachineInstr(instr->nextinstr,NEXTCHAIN); } /* store the use and definition sets in the fragment record */ frag->ruse=userset; frag->rdef=defrset; #if(ISERFREQUIRED!=0) frag->fuse=usefset; frag->fdef=deffset; #if (ISERFREQUIRED==2) frag->duse=usedset; frag->ddef=defdset; #endif #if(ISCRFREQUIRED!=0) frag->cuse=usecset; frag->cdef=defcset; #endif; #endif; } } /*SetupUseAndDef*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BReconvergeLiveness() { /********************************************************************************/ /** Reconverges the live register information for current procedure from **/ /** scratch. If the predecessor edge information is invalid then this is also **/ /** recreated as it is required for live register analysis. **/ /********************************************************************************/ struct fragfmt *frag; int nextf; /** check that the fragments have been reorganised */ if (BeenReorg==0) { BError("Cannot do liveness reconvergence"); } /** go through the fragment chain generating a predecessor list for */ /** each final fragment if the information is no longer valid */ if (ValidPreds==0) { if ((InhibVect&16)==0) { nextf=(int)PI->procfrag; while (nextf!=0) { frag=(struct fragfmt*)(nextf); frag=(struct fragfmt*)(frag->final); frag->preds=0; nextf=(int)frag->nextfrag; } WalkFinalFrags(GenPredecessors); } ValidPreds=1; } /** reconverge the liveness information */ if ((InhibVect&8)==0) { /** initialise the fragment ref and def fields for live reg analysis */ WalkFinalFrags(InitLiveRegAnalysis); /** do live register analysis iteratively until there are no changes to */ /** the in sets of any of the fragments in the procedure */ do { changes=0; WalkFinalFrags(DoLiveRegAnalysis); } while (!(changes==0)) ; ValidLive=RWLIVENESSINFO; } } /*B ReconvergeLiveness*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BUpdateLiveRegs(struct fragfmt *frag) { /********************************************************************************/ /** Updates the live registers for the current procedure after a change has **/ /** been made to the given fragment. We attempt to do the minimum amount of **/ /** work to reconverge the live register analysis equation. We recalculate the **/ /** use and definition sets for the fragment and, from those, calculate the **/ /** new in set. If the in set is the same as before then no further work needs **/ /** to be done as the change will have no impact on any other fragments. If **/ /** the new in set is a superset of the old one then we go through all the **/ /** fragments applying the live register analysis algorithm until it converges **/ /** again. If, however, some members of the old in set do not appear in the **/ /** new in set then we remove these registers from the in and out sets of all **/ /** fragments and apply the analysis again. **/ /********************************************************************************/ struct fragfmt *curfrag; int newrin,newfin,newcin,remrset,remfset,remcset,remdset,nextfrag; #if(ISERFREQUIRED==2) int newdin; #endif if (ValidLive!=RWLIVENESSINFO) { BError("B UpdateLiveRegs: No valid register liveness info"); } if ((InhibVect&8)!=0) return ; frag=(struct fragfmt*)(frag->final); if ((frag->props&USERASSEMFRAG)==0) { /* update the use and definition sets for the fragment */ SetupUseAndDef(frag); /* recalculate the in set for the fragment and see if it has changed */ #if(ISERFREQUIRED!=0) #if(ISCRFREQUIRED!=0) newrin=frag->ruse|(frag->rout&(~frag->rdef)); newfin=frag->fuse|(frag->fout&(~frag->fdef)); newcin=frag->cuse|(frag->cout&(~frag->cdef)); #if(ISERFREQUIRED==2) newdin=frag->duse|(frag->dout&(~frag->ddef)); if ((frag->rin==newrin)&&(frag->fin==newfin)&&(frag->cin==newcin)&& (frag->din==newdin)) return; #else if ((frag->rin==newrin)&&(frag->fin==newfin)&&(frag->cin==newcin)) return ; #endif remrset=frag->rin&(~newrin); remfset=frag->fin&(~newfin); remcset=frag->cin&(~newcin); frag->rin=newrin; frag->fin=newfin; frag->cin=newcin; #if(ISERFREQUIRED==2) remdset=frag->din&(~newdin); frag->din=newdin; #else remdset=0; #endif #else newrin=frag->ruse|(frag->rout&(~frag->rdef)); newfin=frag->fuse|(frag->fout&(~frag->fdef)); #if(ISERFREQUIRED==2) newdin=frag->dfuse|(frag->dfout&(~frag->dfdef)); if ((frag->rin==newrin)&&(frag->fin==newfin)&&(frag->din==newdin)) return; #else if ((frag->rin==newrin)&&(frag->fin==newfin)) return ; #endif remrset=frag->rin&(~newrin); remfset=frag->fin&(~newfin); remcset=0; frag->rin=newrin; frag->fin=newfin; #if(ISERFREQUIRED==2) remdset=frag->din&(~newdin); frag->din=newdin; #else remdset=0; #endif #endif; #else newrin=frag->ruse|(frag->rout&(~frag->rdef)); if (frag->rin==newrin) return ; remrset=frag->rin&(~newrin); remfset=0; remcset=0; frag->rin=newrin; #endif; /* if the new in set is not a superset of the old one then we have */ /* to remove the missing members from all the in and out sets in the */ /* procedure before trying to reconverge the live register analysis */ if (((remrset|remfset)|remcset|remdset)!=0) { nextfrag=(int)PI->procfrag; while (nextfrag!=0) { curfrag=(struct fragfmt*)(nextfrag); /* subtract the removal sets from the in and out sets */ if ((curfrag->final==curfrag)&&((curfrag->props&USERASSEMFRAG)==0)) { curfrag->rout=curfrag->rout&(~remrset); #if(ISERFREQUIRED!=0) curfrag->fout=curfrag->fout&(~remfset); #if(ISERFREQUIRED==2) curfrag->dout=curfrag->dout&(~remdset); #endif #if(ISCRFREQUIRED!=0) curfrag->cout=curfrag->cout&(~remcset); #endif; #endif; if ((int)frag!=(int)curfrag) { curfrag->rin=curfrag->rin&(~remrset); #if(ISERFREQUIRED!=0) curfrag->fin=curfrag->fin&(~remfset); #if(ISERFREQUIRED==2) curfrag->din=curfrag->dcin&(~remdset); #endif #if(ISCRFREQUIRED!=0) curfrag->cin=curfrag->cin&(~remcset); #endif; #endif; } } nextfrag=(int)curfrag->nextfrag; } } /* do live register analysis iteratively until there are no changes to */ /* the in sets of any of the fragments in the procedure */ do { changes=0; WalkFinalFrags(DoLiveRegAnalysis); } while (!(changes==0)) ; } } /*B UpdateLiveRegs*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BLiveSetAfterInstr(struct fragfmt *frag,struct instrfmt *instr,int *rlive,int *flive,int *clive ,int *dlive) { /********************************************************************************/ /** Determines the set of registers which are live after execution of the **/ /** given instruction in the given fragment. Sets rlive to be the set of live **/ /** integer registers, flive to be the set of live float registers and clive **/ /** to be the set of live control registers. The dlive paramter is set to 0. **/ /********************************************************************************/ struct instrfmt *previnstr; int userset,usefset,usecset,usedset,defrset,deffset,defcset,defdset,nextinstr,inannulslot; if (ValidLive==NOLIVENESSINFO) { BError("B LiveSetAfterInstr: No valid register liveness info"); } frag=(struct fragfmt*)(frag->final); if (((frag->props&USERASSEMFRAG)!=0)||((InhibVect&8)!=0)) { /* if we are in a user assembly fragment then we consider all */ /* registers to be live since we cannot obtain more exact information */ *rlive=-1; *flive=0; *clive=0; *dlive=0; #if(ISERFREQUIRED!=0) *flive=-1; #if(ISERFREQUIRED==2) *dlive=-1; #endif #if(ISCRFREQUIRED!=0) *clive=-1; #endif; #endif; } else { /* we look at the remainder of instructions in the fragment and see */ /* which registers are referenced and defined */ userset=0; usefset=0; usecset=0; usedset=0; defrset=0; deffset=0; defcset=0; defdset=0; nextinstr=(int)BMachineInstr(instr->nextinstr,NEXTCHAIN); while (nextinstr!=0) { instr=(struct instrfmt*)(nextinstr); /* determine if the instruction is in a potentially annulled */ /* delay slot */ inannulslot=0; if (((instr->props&DELAYSLOTINSTR)!=0)&&(BMachineInstr(instr->previnstr,PREVCHAIN)!=NULL)) { previnstr=BMachineInstr(instr->previnstr,PREVCHAIN); if (((previnstr->props&ANNULLED)!=0)&&(frag->labref!=0)&&(frag->labrefinstr==previnstr)) { inannulslot=1; } } if (inannulslot!=0) { /* If the instruction is an annulled delay slot instruction then */ /* it may or may not be executed depending on whether the branch */ /* is taken or not. We assume the worst case by considering that */ /* the instruction references all its own definitions. Thus the */ /* registers potentially overwritten by this instruction are */ /* not considered to be dead. */ userset|=(instr->rref|instr->rdef)&(~defrset); defrset|=instr->rdef; #if(ISERFREQUIRED!=0) usefset|=(instr->fref|instr->fdef)&(~deffset); deffset|=instr->fdef; #if(ISERFREQUIRED==2) usedset|=(instr->dcref|instr->ddef)&(~defdset); defdset|=instr->ddef; #endif #if(ISCRFREQUIRED!=0) usecset|=(instr->cref|instr->cdef)&(~defcset); defcset|=instr->cdef; #endif; #endif; } else { /* We update the use set by all registers which are referenced */ /* but have not been defined since the start position in the */ /* fragment. These registers are live. */ userset|=instr->rref&(~defrset); defrset|=instr->rdef; #if(ISERFREQUIRED!=0) usefset|=instr->fref&(~deffset); deffset|=instr->fdef; #if(ISERFREQUIRED==2) usedset|=instr->dref&(~defdset); defdset|=instr->ddef; #endif #if(ISCRFREQUIRED!=0) usecset|=instr->cref&(~defcset); defcset|=instr->cdef; #endif; #endif; } nextinstr=(int)BMachineInstr(instr->nextinstr,NEXTCHAIN); } /* the full set of live registers is the union of those referenced in */ /* the remainder of the fragment and those which are live on exit which */ /* were not defined in the remainder of the fragment */ *rlive=userset|(frag->rout&(~defrset)); *flive=0; *clive=0; *dlive=0; #if(ISERFREQUIRED!=0) *flive=usefset|(frag->fout&(~deffset)); #if(ISCRFREQUIRED!=0) *clive=usecset|(frag->cout&(~defcset)); #endif; #if(ISERFREQUIRED==2) *dlive=usedset|(frag->dout&(~defdset)); #endif #endif; *dlive=0; } } /*B LiveSetAfterInstr*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BPrintProcBanner() { /********************************************************************************/ /** Prints the entry banner for the current procedure **/ /********************************************************************************/ struct fragfmt *frag; if ((FragReport!=0)||(EntryTrace!=0)||(BlkTrace!=0)||(SchReport!=0)||(MemTrace!=0)||(GISFragReport!=0) ) { printf("\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"); frag=PI->procfrag; if (frag->entryid!=0) { ARCHPrintEntryName(frag->entryid); printf(" [%d]",PI->procnum); } else { printf("PROCEDURE %2d",PI->procnum); } printf(" LEVEL=%d\n\n",PI->proclevel); } } /*B PrintProcBanner*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BReorganiseFragments() { /********************************************************************************/ /** Reorganises the fragment sequences in the current procedure to improve **/ /** processor utilisation. This routine does not alter the sequences of **/ /** machine instructions within the fragments themselves. **/ /** **/ /** Reorganisation is based upon a static use-count calculated for each **/ /** fragment. Only fragments containing real machine instructions are **/ /** considered : fragments containing only housekeeping pseudo instructions **/ /** neither have nor contribute to use counts. **/ /********************************************************************************/ int iterations; /** check that multiple reorganisation is not attempted */ /* %IF BeenReorg # 0 %THENSTART */ /* B Error("Cannot reorganise fragments") */ /* %FINISH */ /** initialise the fragment management fields */ WalkFrags(InitFrag); /** calculate fragment use counts */ WalkFrags(CountFrag); /** print frag structure for debugging */ if ((TraceEnab!=0)&&(FragReport!=0)) { BPrintFrags("FRAGMENT STRUCTURE BEFORE REORGANISATION"); } /** do whatever reorganisations are possible */ for (iterations=1; iterations<=MAXITERATIONS; iterations++) { changes=0; WalkFrags(ReorgFrag); if ((TraceEnab!=0)&&(FragReport!=0)) { printf("\n\nITERATION %1d CHANGES = %d\n\n",iterations,changes); if (changes!=0) { BPrintFrags("FRAGMENT STRUCTURE BEFORE ITERATION"); } } if (changes==0) break ; } /** go through the fragment chain generating a predecessor list for */ /** each final fragment */ if ((InhibVect&16)==0) { WalkFinalFrags(GenPredecessors); } ValidPreds=1; if ((InhibVect&8)==0) { /** initialise the fragment ref and def fields for live reg analysis */ WalkFinalFrags(InitLiveRegAnalysis); /** do live register analysis iteratively until there are no changes to */ /** the in sets of any of the fragments in the procedure */ do { changes=0; WalkFinalFrags(DoLiveRegAnalysis); } while (!(changes==0)) ; ValidLive=RWLIVENESSINFO; } /** print frag structure for debugging */ if ((TraceEnab!=0)&&(FragReport!=0)) { BPrintFrags("FRAGMENT STRUCTURE AFTER REORGANISATION"); } /** the fragments have been reorganised */ BeenReorg=1; } /*B ReorganiseFragments*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BSchBasicBlock(struct fragfmt *frag) { /********************************************************************************/ /** Performs basic block scheduling on the given fragment but does not **/ /** attempy to fill unused delay slots in the fragment **/ /********************************************************************************/ /* check that necessary information is available */ if (BeenReorg==0) { BError("Cannot do B-B scheduling"); } /* claim space to hold the schedule descriptors */ ClaimSchDescriptors((int)frag,1); /* we perform basic block scheduling on the fragment*/ if (((frag->props&USERASSEMFRAG)==0)&&((frag->props&DSWITCHJUMPFRAG)==0)&&((frag->props&NOOPTIMFRAG )==0)&&((InhibVect&1)==0)&&(BMachineInstr(frag->firstinstr,NEXTCHAIN)!=NULL)) { /* generate tracing if enabled */ if ((TraceEnab!=0)&&(SchReport!=0)) { printf("\n%s%s\nBASIC BLOCK SCHEDULER - FRAGMENT: %8x\n",bannerline,bannerline,(int)frag); } /* schedule the instructions in the fragment */ DoReschedFrag((int)frag); } /* free the space claimed to hold the schedule descriptors */ FreeSchDescriptors(); } /*B SchBasicBlock*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BBasicBlockSchProc(int prelimsch) { /********************************************************************************/ /** Schedules the instructions in the fragments of the current procedure to **/ /** improve processor utilisation. This scheduling should be done after **/ /** the fragment order itself is rearranged. The scheduling is constrained to **/ /** operate within basic blocks. If prelimsch is non-zero then only **/ /** preliminary scheduling is done and delay slots are not filled. **/ /********************************************************************************/ /** check that necessary information is available */ if (BeenReorg==0) { BError("Cannot do B-B scheduling"); } /** print fragments for debugging purposes */ if ((TraceEnab!=0)&&(FragReport!=0)) { if (prelimsch!=0) { BPrintFrags("FRAGMENTS BEFORE PRELIM B-B SCHEDULING"); } else { BPrintFrags("FRAGMENTS BEFORE B-B SCHEDULING"); } } /** perform basic block scheduling on the procedure */ SchedProc(prelimsch); /** print fragments for debugging purposes */ if ((TraceEnab!=0)&&(FragReport!=0)) { if (prelimsch!=0) { BPrintFrags("FRAGMENTS AFTER PRELIM B-B SCHEDULING"); } else { BPrintFrags("FRAGMENTS AFTER B-B SCHEDULING"); } } } /*B BasicBlockSchProc*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ #if(BUILDWITHGS!=0) static void CountBranchRefs(struct fragfmt *frag) { /********************************************************************************/ /** Counts the number of references to each fragment in a function after it **/ /** has been scheduled. This information is used in optimisations performed **/ /** in the architecture specific module after global scheduling. This routine **/ /** also clears the predecessor edge and register liveness information from **/ /** the fragments (which will be invalid after global scheduling) and updates **/ /** the fallen into and falls through flags for each fragment. **/ /********************************************************************************/ struct fragfmt *destfrag,*nextfrag; struct lrecfmt *lrec; struct instrfmt *instr; int lasti; /* call the architecture specific module to handle any label references */ /* other than those in fragment terminating branches */ ARCHSetLabelRefs(frag); /* see if the fragment terminator instruction references some */ /* other fragment - if so increment its use count */ if ((frag->labref!=0)&&(frag->labrefinstr!=NULL)) { lrec=BLabRecAd(frag->labref); if (lrec->labnum>=PI->labadjust) { /* dest is in current proc */ destfrag=lrec->frag; destfrag=(struct fragfmt*)(destfrag->final); destfrag->uses=destfrag->uses+1; } lasti=(int)frag->labrefinstr; instr=(struct instrfmt*)(lasti); } else { lasti=(int)BMachineInstr(frag->lastinstr,PREVCHAIN); if (lasti!=0) { instr=(struct instrfmt*)(lasti); if ((instr->props&DELAYSLOTINSTR)!=0) { lasti=(int)BMachineInstr(instr->previnstr,PREVCHAIN); instr=(struct instrfmt*)(lasti); } } } /* determine if control flow from the fragment may fall through */ /* into the succeeding fragment - we tie a dense switch jump */ /* instruction to any following dense switch table fragment */ if (((lasti==0)||((((instr->props&BRANCH)==0)||((instr->props&CONDITIONALBRANCH)!=0))&&(((instr->props &JUMPINSTR)==0)||((instr->privprops&DSWITCHJUMPINSTR)!=0))))&&(frag->nextfrag!=0)&&((frag->props&RETURNFRAG )==0)) { frag->props|=FALLSTHROUGH; nextfrag=frag->nextfrag; nextfrag=(struct fragfmt*)(nextfrag->final); nextfrag->uses=nextfrag->uses+1; nextfrag->props|=FALLENINTO; } } /*CountBranchRefs*/ #endif /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BTraceProcedure() { /********************************************************************************/ /** Prints tracing for all the fragments in the current procedure starting **/ /** with the given initial fragment. This is used to give the fragment state **/ /** after delay slot reintroduction has been performed by the global **/ /** scheduler and also before the wrappers are changed by the global **/ /** scheduler. **/ /********************************************************************************/ ARCHFlushOutput(1); BPrintFrags("FRAGMENTS AFTER GIS PROCESSING"); ARCHFlushOutput(0); } /*B TraceProcedure*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BGlobalSchProc() { /********************************************************************************/ /** Performs global scheduling on the fragments in the current procedure to **/ /** improve processor utilisation. This scheduling should be done after the **/ /** fragment order itself is rearranged. **/ /********************************************************************************/ #if(BUILDWITHGS!=0) if (((CurInhibVect&(1<<5))==0)&&(((InhibVect&(1<<15))==0)||(RangeProc==0)||(PI->procnumprocfrag,1); /** print fragments for debugging purposes */ if ((TraceEnab!=0)&&(GISFragReport!=0)) { BPrintFrags("FRAGMENTS BEFORE GLOBAL SCHEDULING"); } /** perform global scheduling on the procedure */ if (GISTrace!=0) ARCHFlushOutput(0); GlobalSchedule(ArchVariant,(int)PI->procfrag,GISBuffZone,GISFloatOpt,GISUnsafeOpt); ValidLive=NOLIVENESSINFO; ValidPreds=0; /** print fragments for debugging purposes */ if ((TraceEnab!=0)&&(GISFragReport!=0)) { BPrintFrags("FRAGMENTS AFTER GLOBAL SCHEDULING"); } /** set the uses field in the fragments back to the correct value */ /** and update the falls through and fallen into flags */ WalkFinalFrags(CountBranchRefs); /** Try and fill any empty delay slots in the function */ FillDelaySlots((int)PI->procfrag,0); /** print fragments for debugging purposes */ if ((TraceEnab!=0)&&(FragReport!=0)) { BPrintFrags("FRAGMENTS AFTER ALL SCHEDULING"); } }; #endif; } /*B GlobalSchProc*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void WalkFrags(void FragHandler(struct fragfmt *)) { /********************************************************************************/ /** Walks over all the fragments in the current procedure and calls **/ /** 'FragHandler' for each of them. **/ /********************************************************************************/ struct fragfmt *frag; int nextfrag,fragnum; fragnum=1; nextfrag=(int)PI->procfrag; if (RangeProc==0) { CurInhibVect=InhibVect; TraceEnab=1; while (nextfrag!=0) { frag=(struct fragfmt*)(nextfrag); FragHandler(frag); nextfrag=(int)frag->nextfrag; fragnum++; } } else { while (nextfrag!=0) { SetVectors(fragnum); frag=(struct fragfmt*)(nextfrag); FragHandler(frag); nextfrag=(int)frag->nextfrag; fragnum++; } SetVectors(0); } } /*WalkFrags*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void WalkFinalFrags(void FragHandler(struct fragfmt *)) { /********************************************************************************/ /** Walks over all the final fragments in the current procedure and calls **/ /** 'FragHandler' for each of them. **/ /********************************************************************************/ struct fragfmt *frag; int nextfrag,fragnum; fragnum=1; nextfrag=(int)PI->procfrag; if (RangeProc==0) { CurInhibVect=InhibVect; TraceEnab=1; while (nextfrag!=0) { frag=(struct fragfmt*)(nextfrag); frag=(struct fragfmt*)(frag->final); FragHandler(frag); nextfrag=(int)frag->nextfrag; fragnum++; } } else { while (nextfrag!=0) { SetVectors(fragnum); frag=(struct fragfmt*)(nextfrag); frag=(struct fragfmt*)(frag->final); FragHandler(frag); nextfrag=(int)frag->nextfrag; fragnum++; } SetVectors(0); } } /*WalkFinalFrags*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BPrintFrags(char * message) { /********************************************************************************/ /** Prints all the fragments in the given procedure, headed with the given **/ /** message. **/ /********************************************************************************/ if ((InhibVect!=0)||(RangeProc==0)||(RangeProc==PI->procnum)) { printf("\n%s%s\n%s\n\n",bannerline,bannerline,message); fragno=0; WalkFrags(PrintFrag); } } /*PrintFrags*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void PrintFragInfo(struct fragfmt *frag) { /********************************************************************************/ /** Prints the information associated with the given fragment **/ /********************************************************************************/ struct fragfmt *curfrag; struct instrfmt *instr; struct lrecfmt *lrec; struct preddescfmt *preddesc; int nextdesc,fragnum,nextfrag,anyregs,count,bubbles,labdesc,i; int isapprox,rregs,fregs,cregs,dregs; printf("uses:%d final:%8x fragprops:0x%8x\n",frag->uses,(unsigned)frag->final,frag->props); printf("prevfrag:%8x nextfrag:%8x firstinstr:%8xlastinstr:%8x\n",(unsigned)frag->prevfrag,(unsigned)frag->nextfrag,(unsigned)frag->firstinstr,(unsigned)frag->lastinstr); if ((frag->labindex!=0)||(frag->labref!=0)) { printf("lab:%8x",frag->labindex); if (frag->labindex!=0) { labdesc=(int)BLabRecAd(frag->labindex); printf(" ldesc:%8x",labdesc); } printf(" lref:%8x lrefi:%8x",frag->labref,(unsigned)frag->labrefinstr); if (frag->labref!=0) { lrec=BLabRecAd(frag->labref); printf(" destfrag:"); nextfrag=(int)PI->procfrag; fragnum=1; while ((nextfrag!=0)&&(nextfrag!=(int)lrec->frag)) { fragnum++; curfrag=(struct fragfmt*)(nextfrag); nextfrag=(int)curfrag->nextfrag; } if (nextfrag!=0) { printf("%2d",fragnum); } else { printf("%8x",(unsigned)lrec->frag); } } printf("\n"); } /* if the fragment has been globally scheduled then estimate */ /* its execution time */ i=(int)BMachineInstr(frag->firstinstr,NEXTCHAIN); if (i!=0) { instr=(struct instrfmt*)(i); if ((instr->bubbles!=0)&&((instr->issueprops&TPGLOBSCHED)!=0)) { isapprox=0; count=0; while (i!=0) { instr=(struct instrfmt*)(i); if (((instr->issueprops&TPIGSTART)!=0)||((instr->props&DELAYSLOTINSTR)!=0)) count++; bubbles=instr->bubbles; if (bubbles!=0) { if (bubbles==BIBUBBLES) isapprox=1; if (bubbles>=BIBUBBLES) bubbles=1; count=(count+bubbles)-1; } i=(int)instr->nextinstr; } if (count!=0) { if (isapprox!=0) printf("APPROXIMATE "); printf("EXECUTION CYCLES: %d\n",count); } } } if ((ValidPreds!=0)&&(frag->preds!=0)) { printf("preds:"); nextdesc=frag->preds; while (nextdesc!=0) { preddesc=(struct preddescfmt*)(nextdesc); nextfrag=(int)PI->procfrag; fragnum=1; while ((nextfrag!=0)&&(nextfrag!=preddesc->frag)) { fragnum++; curfrag=(struct fragfmt*)(nextfrag); nextfrag=(int)curfrag->nextfrag; } if (nextfrag!=0) { if (preddesc->isforward==0) { printf(" B"); } else { printf(" "); } if (preddesc->isswitchedge!=0) { printf("S%d",fragnum); } else { printf("%1d",fragnum); } } else { if (preddesc->isforward==0) { printf(" B["); } else { printf(" ["); } printf("%8x]",preddesc->frag); } nextdesc=preddesc->next; } printf("\n"); } if (frag->final==frag) { anyregs=0; if ((frag->rin|frag->rout|frag->ruse|frag->rdef)!=0) anyregs=1; #if(ISERFREQUIRED!=0) if ((frag->fin|frag->fout|frag->fuse|frag->fdef)!=0) anyregs=1; #if(ISERFREQUIRED==2) if ((frag->din|frag->dout|frag->duse|frag->ddef)!=0) anyregs=1; #endif #if(ISCRFREQUIRED!=0) if ((frag->cin|frag->cout|frag->cuse|frag->cdef)!=0) anyregs=1; #endif #endif; if (anyregs!=0) { printf("in:"); rregs=frag->rin; fregs=0; cregs=0; dregs=0; #if(ISERFREQUIRED!=0) fregs=frag->fin; #if(ISCRFREQUIRED!=0) cregs=frag->cin; #endif #if(ISERFREQUIRED==2) dregs=frag->din; #endif #endif ARCHPrintRegisters(rregs,fregs,cregs,dregs); printf(" out:"); rregs=frag->rout; fregs=0; cregs=0; dregs=0; #if(ISERFREQUIRED!=0) fregs=frag->fout; #if(ISCRFREQUIRED!=0) cregs=frag->cout; #endif #if(ISERFREQUIRED==2) dregs=frag->dout; #endif #endif ARCHPrintRegisters(rregs,fregs,cregs,dregs); printf("\nuse:"); rregs=frag->ruse; fregs=0; cregs=0; dregs=0; #if(ISERFREQUIRED!=0) fregs=frag->fuse; #if(ISCRFREQUIRED!=0) cregs=frag->cuse; #endif #if(ISERFREQUIRED==2) dregs=frag->duse; #endif #endif ARCHPrintRegisters(rregs,fregs,cregs,dregs); printf(" def:"); rregs=frag->rdef; fregs=0; cregs=0; dregs=0; #if(ISERFREQUIRED!=0) fregs=frag->fdef; #if(ISCRFREQUIRED!=0) cregs=frag->cdef; #endif #if(ISERFREQUIRED==2) dregs=frag->ddef; #endif #endif ARCHPrintRegisters(rregs,fregs,cregs,dregs); printf("\n"); } } ARCHPrintArchInfo(frag->archinfo); printf("\n"); } /*PrintFragInfo*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void PrintFrag(struct fragfmt *frag) { /********************************************************************************/ /** Prints a disassembly of the given fragment. **/ /********************************************************************************/ struct instrfmt *instr; struct lrecfmt *lrec; int previnstr,nextinstr,ca; fragno++; if (TraceEnab==0) return ; printf("\n%s%s\nFRAGMENT:%d [%8x] ",bannerline,bannerline,fragno,(int)frag); PrintFragInfo(frag); if (frag->entryid!=0) { ARCHPrintEntryName(frag->entryid); printf("\n\n"); } if (frag->labindex!=0) { lrec=BLabRecAd(frag->labindex); printf(" L%d:\n",lrec->labnum); } previnstr=0; nextinstr=(int)frag->firstinstr; while (nextinstr!=0) { instr=(struct instrfmt*)(nextinstr); if (instr->previnstr!=(struct instrfmt *)previnstr) { BError("Bad instruction chain"); } if (ExtInstrTrace!=0) { ARCHPrintInstr(instr); } else { ca=-2; ARCHDisasInstr(instr,&ca); } previnstr=nextinstr; nextinstr=(int)instr->nextinstr; } /* if the fragment is associated with a switch table then print */ /* its contents */ if (frag->switchtab!=0) { ARCHPrintSwitch(frag); } } /*PrintFrag*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ struct instrfmt *BMachineInstr(struct instrfmt * Startinstr,int Chain) { /*****************************************************************************/ /** Starting with the instruction addressed by 'StartinstrAddr' follows the **/ /** appropriate chain looking for a real machine instruction. Returns the **/ /** instruction's address if it finds one, else returns 0. **/ /*****************************************************************************/ struct instrfmt *nextinstr; nextinstr=Startinstr; while (nextinstr!=NULL) { if (nextinstr->groupprevinstr; else nextinstr=nextinstr->nextinstr; } return nextinstr; } /*B MachineInstr*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static int FindFrag(struct fragfmt *StartFrag,struct fragfmt *StopFrag,struct fragfmt *TargetFrag,int Chain) { /********************************************************************************/ /** Starting with the frag essed by 'StartFragAddr' follows the **/ /** appropriate chain looking for frag 'TargetFrag'. Returns 1 if **/ /** frag is found, else returns 0. **/ /********************************************************************************/ struct fragfmt *frag; frag=StartFrag; do { if (frag==TargetFrag) return 1; if (frag==StopFrag) return 0; if (frag==NULL) break ; if (Chain==PREVCHAIN) frag=frag->prevfrag; else frag=frag->nextfrag; } while (1) /* FOR EVER */; return 0; } /*FindFrag*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BMoveAfter(struct fragfmt *whichf,struct fragfmt *wheref,struct instrfmt *whichi,struct instrfmt *wherei) { /***************************************************************************/ /** 'which' and 'where' are instructions in fragments whichf and wheref **/ /** respectively. **/ /** Alter the linkages so that 'which' becomes the nextinstr to 'where'. **/ /** If ffrag is 0 then the instruction to be moved is not chained into **/ /** any fragment. **/ /** If where is 0 then the instruction is inserted at the very start of **/ /** the destination fragment. **/ /***************************************************************************/ struct instrfmt *work; /* check we are not moving a branch out of a fragment */ if ((whichf!=wheref)&&(whichf!=NULL)) { if ((whichf->labref!=0)&&(whichf->labrefinstr==whichi)) { BError("B MoveAfter: Cannot move branch between fragments"); } } /* first unhook 'which' from its current position */ if (whichf!=NULL) { if (whichi->previnstr!=NULL) { work=whichi->previnstr; work->nextinstr=whichi->nextinstr; } else { whichf->firstinstr=whichi->nextinstr; } if (whichi->nextinstr!=NULL) { work=whichi->nextinstr; work->previnstr=whichi->previnstr; } else { whichf->lastinstr=whichi->previnstr; } if ((whichf==(struct fragfmt *)PI->curfrag)&&(PI->fragsize>0)) PI->fragsize=PI->fragsize-1; } /* now append 'which' to 'where' - test for the case where the */ /* destination fragment was empty */ if (wheref->firstinstr==NULL) { wheref->firstinstr=whichi; wheref->lastinstr=whichi; whichi->previnstr=NULL; whichi->nextinstr=NULL; } else if (wherei==NULL) { work=wheref->firstinstr; work->previnstr=whichi; whichi->previnstr=NULL; whichi->nextinstr=work; wheref->firstinstr=whichi; } else { whichi->nextinstr=wherei->nextinstr; if (wherei->nextinstr!=NULL) { work=wherei->nextinstr; work->previnstr=whichi; } else { wheref->lastinstr=whichi; } wherei->nextinstr=whichi; whichi->previnstr=wherei; }; if (wheref==(struct fragfmt *)PI->curfrag) PI->fragsize=PI->fragsize+1; } /*B MoveAfter*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ void BMoveBefore(struct fragfmt *whichf,struct fragfmt *wheref,struct instrfmt * whichi,struct instrfmt * wherei) { /***************************************************************************/ /** 'which' and 'where' are instructions in fragments 'whichf' & 'wheref' **/ /** respectively. **/ /** Alter the linkages so that 'which' becomes the previnstr to 'where'. **/ /** If wheref is 0 then the instruction to be moved is not chained into **/ /** any fragment. **/ /** If wherei is 0 then the instruction is inserted at the very end of **/ /** the destination fragment. **/ /***************************************************************************/ struct instrfmt *work; /* check we are not moving a branch out of a fragment */ if ((wheref!=whichf)&&(whichf!=NULL)) { if ((whichf->labref!=0)&&(whichf->labrefinstr==whichi)) { BError("B MoveBefore: Cannot move branch between fragments"); } } /* first unhook 'which' from its current position */ if (whichf!=NULL) { if (whichi->previnstr!=NULL) { work=whichi->previnstr; work->nextinstr=whichi->nextinstr; } else { whichf->firstinstr=whichi->nextinstr; } if (whichi->nextinstr!=NULL) { work=whichi->nextinstr; work->previnstr=whichi->previnstr; } else { whichf->lastinstr=whichi->previnstr; } if ((whichf==(struct fragfmt *)PI->curfrag)&&(PI->fragsize>0)) PI->fragsize=PI->fragsize-1; } /* now insert 'which' before 'where' - handle the case where */ /* the destination fragment is empty */ if (wheref->firstinstr==NULL) { wheref->firstinstr=whichi; wheref->lastinstr=whichi; whichi->previnstr=NULL; whichi->nextinstr=NULL; } else if (wherei==NULL) { work=wheref->lastinstr; work->nextinstr=whichi; whichi->previnstr=work; whichi->nextinstr=NULL; wheref->lastinstr=whichi; } else { whichi->previnstr=wherei->previnstr; if (wherei->previnstr!=NULL) { work=(struct instrfmt*)(wherei->previnstr); work->nextinstr=whichi; } else { wheref->firstinstr=whichi; } wherei->previnstr=whichi; whichi->nextinstr=wherei; }; if (wheref==(struct fragfmt *)PI->curfrag) PI->fragsize=PI->fragsize+1; } /*B MoveBefore*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void InitFrag(struct fragfmt *frag) { /****************************************************************************/ /** Called once for each fragment. Sets the 'uses' field as reqd. and the **/ /** 'final' field to the address of the first fragment (starting from this **/ /** one) containing a real machine instruction. It also fills in the **/ /** 'final' field of any intervening empty fragments it finds to save time **/ /** when it is called to initialise them. **/ /****************************************************************************/ struct fragfmt *finalfrag,*nextfrag; struct instrfmt *instr,*nextinstr; int nexti,LastLineNum,LastInstr,i,props; /* if fragment entry label is fixed up to then avoid deletion */ frag->uses=0; frag->props&=(~3); /* clear out falls bits */ if (frag->labindex!=0) { /* fragment has entry label - check if it is addressed */ if (ARCHIsAddrLabel(frag->labindex)!=0) frag->uses=1; } /* user assembly fragments have their use count increased so that */ /* they are not deleted */ if ((frag->props&(GLOBALLABFRAG|USERASSEMFRAG))!=0) frag->uses++; /* if the fragment becomes split between a delay slot owner and */ /* the delay slot instruction then the delay slot instruction is */ /* moved back into the predecessor fragment */ LastInstr=(int)BMachineInstr(frag->lastinstr,PREVCHAIN); if (LastInstr!=0) { instr=(struct instrfmt*)(LastInstr); if ((instr->props&DELAYSLOTEXISTS)!=0) { /* pull next real instruction back into this frag as delay slot */ nextfrag=frag->nextfrag; i=(int)BMachineInstr(nextfrag->firstinstr,NEXTCHAIN); while (i==0) { nextfrag=nextfrag->nextfrag; i=(int)BMachineInstr(nextfrag->firstinstr,NEXTCHAIN); } nextinstr=(struct instrfmt*)(i); BMoveAfter(nextfrag,frag,nextinstr,instr); nextinstr->props|=DELAYSLOTINSTR; } } /* set the final fragment pointers propogating GLOBAL LAB FRAG*/ if (frag->final==0) { props=frag->props; finalfrag=frag; i=(int)BMachineInstr(finalfrag->firstinstr,NEXTCHAIN); while ((i==0)&&(finalfrag->nextfrag!=0)&&(finalfrag->entryid==0)&&(finalfrag->switchtab==0)) { finalfrag=finalfrag->nextfrag; props|=finalfrag->props; i=(int)BMachineInstr(finalfrag->firstinstr,NEXTCHAIN); } frag->final=finalfrag; while ((int)frag!=(int)finalfrag) { frag=frag->nextfrag; frag->props|=props&GLOBALLABFRAG; frag->final=finalfrag; } } /* delete any redundant line numbers */ nexti=(int)frag->firstinstr; while (nexti!=0) { instr=(struct instrfmt*)(nexti); if (instr->group>=MINPSEUDOGROUP) { LastLineNum=0; do { if (instr->group==LINENUMBER) LastLineNum=(int)instr; if (instr->nextinstr==0) break ; instr=(struct instrfmt*)(instr->nextinstr); } while (!(instr->groupgroupnextinstr; if ((instr->group==LINENUMBER)&&(i!=LastLineNum)) { BDeleteInstr(instr,1); } } } nexti=(int)instr->nextinstr; } /* ensure each machine instr which is a delay slot owner is immediately */ /* followed by an instruction which has the DELAY SLOT INSTR property set */ nexti=(int)BMachineInstr(frag->firstinstr,NEXTCHAIN); while (nexti!=0) { instr=(struct instrfmt*)(nexti); nexti=(int)BMachineInstr(instr->nextinstr,NEXTCHAIN); if (((instr->props&DELAYSLOTEXISTS)!=0)&&(nexti!=0)) { nextinstr=(struct instrfmt*)(nexti); nextinstr->props|=DELAYSLOTINSTR; } } } /*InitFrag*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void CountFrag(struct fragfmt *frag) { /********************************************************************************/ /** Called once for each fragment - following the destination of any fragment **/ /** terminator and updating the fragment usage counts as appropriate. **/ /** **/ /** If the fragment is an external entry point (either the main entry or a **/ /** FORTRAN side entry) then increment the frag->final use count, otherwise **/ /** it's liable to get deleted later. **/ /** **/ /** If this frag contains real machine instructions then increment the use **/ /** count of the frag to which this one branches (if any) and the frag **/ /** following this one (if control can fall through into it). **/ /********************************************************************************/ struct fragfmt *destfrag,*nextfrag,*edgefrag; struct lrecfmt *lrec; struct instrfmt *instr; int foundinstr; /* Call the architecture specific module to handle any label references */ /* other than those in fragment terminating branches. If these */ /* references are not accounted for then the labels might appear dead */ /* and be deleted by the fragment reorganiser. Any label references */ /* from a switch table have to be handled by this routine. */ ARCHSetLabelRefs(frag); /* entry fragments and outer scope IMP fragments have thier count */ /* increased so that they are not deleted */ if ((PI->proclevel==0)||(frag->entryid!=0)) { edgefrag=(struct fragfmt*)(frag->final); edgefrag->uses=edgefrag->uses+1; } /* see if the fragment terminator instruction references some */ /* other fragment - if so increment its use count */ foundinstr=0; if ((frag->labref!=0)&&(frag->labrefinstr!=NULL)) { lrec=BLabRecAd(frag->labref); if (lrec->labnum>=PI->labadjust) { /* dest is in current proc */ if (lrec->frag==NULL) BError("Label undefined"); destfrag=lrec->frag; destfrag=(struct fragfmt*)(destfrag->final); destfrag->uses=destfrag->uses+1; } instr=frag->labrefinstr; foundinstr=1; } else if (BMachineInstr(frag->lastinstr,PREVCHAIN)!=NULL) { instr=BMachineInstr(frag->lastinstr,PREVCHAIN); if (((instr->props&DELAYSLOTINSTR)!=0)&&(BMachineInstr(instr->previnstr,PREVCHAIN)!=NULL)) { instr=BMachineInstr(instr->previnstr,PREVCHAIN); } foundinstr=1; }; /* determine if control flow from the fragment may fall through */ /* into the succeeding fragment - we tie a dense switch jump */ /* instruction to any following dense switch table fragment */ if ((foundinstr!=0)&&(((instr->props&BRANCH)==0)||((instr->props&CONDITIONALBRANCH)!=0))&&(((instr->props &JUMPINSTR)==0)||(((instr->privprops&DSWITCHJUMPINSTR)!=0)&&(frag->switchtab==0)))&&(frag->nextfrag !=0)&&((frag->props&RETURNFRAG)==0)) { frag->props|=FALLSTHROUGH; nextfrag=frag->nextfrag; nextfrag=(struct fragfmt*)(nextfrag->final); nextfrag->uses=nextfrag->uses+1; nextfrag->props|=FALLENINTO; } /* if we have an empty entry fragment then we mark it as falling through */ if ((foundinstr==0)&&(frag->nextfrag!=0)&&(frag->entryid!=0)) { frag->props|=FALLSTHROUGH; nextfrag=frag->nextfrag; nextfrag=(struct fragfmt*)(nextfrag->final); nextfrag->uses=nextfrag->uses+1; nextfrag->props|=FALLENINTO; } } /*CountFrag*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void InitLiveRegAnalysis(struct fragfmt *frag) { /********************************************************************************/ /** Initialises live register analysis for fragments. We attempt to determine **/ /** which registers are live on both entry to and exit from each fragment in **/ /** the procedure. To do this we are required to calculate which registers are **/ /** referenced and defined in each fragment. **/ /********************************************************************************/ /* we initially clear the in set for each fragment */ frag->rin=0; #if(ISERFREQUIRED!=0) frag->fin=0; #if (ISERFREQUIRED==2) frag->din=0; #endif #if(ISCRFREQUIRED!=0) frag->cin=0; #endif; #endif; /* set the use and definition fields for the fragment */ SetupUseAndDef(frag); } /*InitLiveRegAnalysis*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void DoLiveRegAnalysis(struct fragfmt *frag) { /********************************************************************************/ /** Performs live register analysis on the given fragment in the current **/ /** procedure. The liveness analysis is done using an iterative scheme. This **/ /** routine is called repeatedly for each fragment until the in sets for **/ /** each fragment are constant for an entire pass of the procedure. The **/ /** variable changes is updated if the in set changes for any fragment. We are **/ /** assured that the analysis will eventually terminate since the in sets **/ /** always increase in cardinality. **/ /********************************************************************************/ struct fragfmt *curfrag; struct instrfmt *instr; struct lrecfmt *lrec; int oldrin,oldfin,oldcin,outrset,outfset,outcset,outdset,foundref,nextinstr; int curfragaddr,delta; #if(ISERFREQUIRED==2) int olddin; #endif /* remember the old in set so that we can determine if it has changed */ oldrin=frag->rin; #if(ISERFREQUIRED!=0) oldfin=frag->fin; #if(ISERFREQUIRED==2) olddin=frag->din; #endif #if(ISCRFREQUIRED!=0) oldcin=frag->cin; #endif; #endif; if ((frag->props&USERASSEMFRAG)!=0) { /* if the fragment contains user assembler then we consider */ /* that all registers are live on exit */ frag->rout=-1; #if(ISERFREQUIRED!=0) frag->fout=-1; #if(ISERFREQUIRED==2) frag->dout=-1; #endif #if(ISCRFREQUIRED!=0) frag->cout=-1; #endif; #endif; /* if we are in a user assembly fragment then all registers */ /* are considered to be live on entry - since we have no */ /* better information */ frag->rin=-1; #if(ISERFREQUIRED!=0) frag->fin=-1; #if(ISERFREQUIRED==2) frag->din=-1; #endif #if(ISCRFREQUIRED!=0) frag->cin=-1; #endif; #endif; } else if ((frag->props&DSWITCHJUMPFRAG)!=0) { /* If we have a dense switch jump fragment then we try and locate */ /* all the case fragments that may be jumped to. If we don't find */ /* any then we assume that all registers are live on fragment */ /* exit otherwise we assume that the destination list is complete. */ foundref=0; outrset=0; outfset=0; outcset=0; outdset=-1; curfragaddr=(int)PI->procfrag; while (curfragaddr!=0) { curfrag=(struct fragfmt*)(curfragaddr); curfragaddr=(int)curfrag->nextfrag; if ((curfrag->props&DSWITCHCASEFRAG)!=0) { nextinstr=(int)curfrag->firstinstr; while (nextinstr!=0) { instr=(struct instrfmt*)(nextinstr); if ((instr->group==DSWITCHNOTE)&&(instr->u1.immval==(int)frag)) { foundref=1; curfrag=(struct fragfmt*)(curfrag->final); outrset|=curfrag->rin; #if(ISERFREQUIRED!=0) outfset|=curfrag->fin; #if(ISERFREQUIRED==2) outdest|=curfrag->din; #endif #if(ISCRFREQUIRED!=0) outcset|=curfrag->cin; #endif; #endif; } nextinstr=(int)instr->nextinstr; } } } if (foundref==0) { outrset=-1; outfset=-1; outcset=-1; outdset=-1; } frag->rout=outrset; #if(ISERFREQUIRED!=0) frag->fout=outfset; #if(ISERFREQUIRED==2) frag->dot=outdset; #endif #if(ISCRFREQUIRED!=0) frag->cout=outcset; #endif; #endif; /* the in set is considered to be the union of all registers */ /* referenced in the fragment and all registers live on output that */ /* are not actually generated inside the fragment */ frag->rin=frag->ruse|(frag->rout&(~frag->rdef)); #if(ISERFREQUIRED!=0) frag->fin=frag->fuse|(frag->fout&(~frag->fdef)); #if(ISERFREQUIRED==2) frag->din=frag->duse|(frag->dout&(~frag->ddef)); #endif #if(ISCRFREQUIRED!=0) frag->cin=frag->cuse|(frag->cout&(~frag->cdef)); #endif; #endif; } else { /* if the fragment falls through to the succeeding fragment then add */ /* its in set to the current fragment's out set */ if ((frag->props&FALLSTHROUGH)!=0) { curfrag=frag->nextfrag; curfrag=(struct fragfmt*)(curfrag->final); outrset=curfrag->rin; #if(ISERFREQUIRED!=0) outfset=curfrag->fin; #if(ISERFREQUIRED==2) outdset=curfrag->din; #endif #if(ISCRFREQUIRED!=0) outcset=curfrag->cin; #endif; #endif; } else { outrset=0; outfset=0; outcset=0; outdset=0; } /* add the in set of any fragment being branched to to the current */ /* fragment out set */ if (frag->labref!=0) { lrec=BLabRecAd(frag->labref); curfrag=lrec->frag; curfrag=(struct fragfmt*)(curfrag->final); outrset|=curfrag->rin; #if(ISERFREQUIRED!=0) outfset|=curfrag->fin; #if(ISERFREQUIRED==2) outdset|=curfrag->din; #endif #if(ISCRFREQUIRED!=0) outcset|=curfrag->cin; #endif; #endif; } /* update the out set in the fragment record */ frag->rout=outrset; #if(ISERFREQUIRED!=0) frag->fout=outfset; #if(ISERFREQUIRED==2) frag->dout=outdset; #endif #if(ISCRFREQUIRED!=0) frag->cout=outcset; #endif; #endif; /* the in set is considered to be the union of all registers */ /* referenced in the fragment and all registers live on output that */ /* are not actually generated inside the fragment */ frag->rin=frag->ruse|(frag->rout&(~frag->rdef)); #if(ISERFREQUIRED!=0) frag->fin=frag->fuse|(frag->fout&(~frag->fdef)); #if(ISERFREQUIRED==2) frag->din=frag->duse|(frag->dout&(~frag->ddef)); #endif #if(ISCRFREQUIRED!=0) frag->cin=frag->cuse|(frag->cout&(~frag->cdef)); #endif; #endif; }; /* update changes if in out set was changed */ delta=0; if (oldrin!=frag->rin) delta++; #if(ISERFREQUIRED!=0) if (oldfin!=frag->fin) delta++; #if(ISCRFREQUIRED!=0) if (oldcin!=frag->cin) delta++; #endif #if(ISERFREQUIRED==2) if (olddin!=frag->din) delta++; #endif #endif if (delta!=0) changes++; } /*DoLiveRegAnalysis*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void ReorgFrag(struct fragfmt *frag) { /*********************************************************************************/ /** ReorgFrag tests for certain conditions on this frag and the frags it **/ /** references. When these conditions are met it rearranges the fragment links **/ /** and other information in such a way that 'better' code will be generated. **/ /*********************************************************************************/ struct lrecfmt *lrec; struct instrfmt *instr,*destinstr,*nextinstr,*tinstr,*slotinstr; struct fragfmt *destfrag,*nextfrag,*nextnextfrag,*destdestfrag; struct fragfmt *workf; int i,nexti,prevf,nextf,condmatch,F1,F2,F3,F4,F5; int conditional,uses,following,coalesce,ahead,jumppredict,oldlabref; int ochanges,forceexit,alloc,numinstrs,junk; /***/ /** detect and delete unused labels */ /** pds can not understand why return labs can not be deleted. Certainly */ /** on PENTIUM we avoid several AGIs if the exit sequence can be merged */ /** with normal instructions by the scheduler */ /***/ if ((frag->labindex!=0)&&(frag->switchtab==0)&&((Target==PENTIUM)||((frag->props&RETURNFRAG)==0))&&((frag ->props&PRIVATELABFRAG)==0)) { workf=(struct fragfmt*)(frag->final); if ((workf->uses<=0)||((workf->uses==1)&&((workf->props&FALLENINTO)!=0))) { changes++; frag->labindex=0; if ((TraceEnab!=0)&&(FragReport!=0)) { printf("FRAGMENT: %8x DELETED LABEL\n",(int)frag); } } } /***/ /** delete machine instructions from unused frag and decrement use count of */ /** any referenced frags */ /***/ if ((frag->uses<=0)&&(frag->final==frag)&&((CurInhibVect&64)==0)&&((frag->props&NODELETEFRAG)==0) &&(BMachineInstr(frag->firstinstr,NEXTCHAIN)!=NULL)) { /* if we are deleting a dense switch jump fragment then remove */ /* the dense switch notes of all the destinations and reduce */ /* the destination fragment reference count */ if ((frag->props&DSWITCHJUMPFRAG)!=0) { frag->props&=~DSWITCHJUMPFRAG; nextf=(int)PI->procfrag; while (nextf!=0) { workf=(struct fragfmt*)(nextf); if ((workf->props&DSWITCHCASEFRAG)!=0) { i=(int)workf->firstinstr; while (i!=0) { instr=(struct instrfmt*)(i); if ((instr->group==DSWITCHNOTE)&&(instr->u1.immval==(int)frag)) { workf=(struct fragfmt*)(workf->final); workf->uses=workf->uses-1; BDeleteInstr(instr,1); break ; } i=(int)instr->nextinstr; } } nextf=(int)workf->nextfrag; } ARCHDeleteSwitch(frag); } /* delete the real instructions in the fragment */ oldlabref=frag->labref; i=(int)BMachineInstr(frag->firstinstr,NEXTCHAIN); while (i!=0) { instr=(struct instrfmt*)(i); nexti=(int)instr->nextinstr; BDeleteInstr(instr,1); i=(int)BMachineInstr((struct instrfmt *)nexti,NEXTCHAIN); } changes++; frag->props&=~(RETURNFRAG|RETURNBRANCH|COMMONTAIL); frag->labindex=0; /* this frag is never referenced */ frag->labrefinstr=NULL; /* this frag has no code now */ if ((TraceEnab!=0)&&(FragReport!=0)) { printf("FRAGMENT:%8x REORGANISATION 1\n",(int)frag); if (ExtFragReport!=0) { BPrintFrags("FRAGMENTS AFTER REORGANISATION 1"); } } /* update the reference count of any fragment branched to by the */ /* one currently being deleted */ if ((oldlabref!=0)&&((CurInhibVect&128)==0)) { /* find out where it used to jump to */ lrec=BLabRecAd(oldlabref); if (lrec->labnum>=PI->labadjust) { /* dest is in current proc */ destfrag=lrec->frag; destfrag=(struct fragfmt*)(destfrag->final); destfrag->uses=destfrag->uses-1; changes++; if ((TraceEnab!=0)&&(FragReport!=0)) { printf("FRAGMENT:%8x REORGANISATION 2\n",(int)frag); if (ExtFragReport!=0) { BPrintFrags("FRAGMENTS AFTER REORGANISATION 2"); } } } frag->labref=0; } /* if the succeeding frag is fallen into then modify its properties */ if (((frag->props&FALLSTHROUGH)!=0)&&((CurInhibVect&256)==0)) { destfrag=frag->nextfrag; destfrag=(struct fragfmt*)(destfrag->final); destfrag->uses=destfrag->uses-1; destfrag->props&=~FALLENINTO; frag->props&=~FALLSTHROUGH; changes++; if ((TraceEnab!=0)&&(FragReport!=0)) { printf("FRAGMENT:%8x REORGANISATION 3\n",(int)frag); if (ExtFragReport!=0) { BPrintFrags("FRAGMENTS AFTER REORGANISATION 3"); } } } return ; /* none of the remaining reorganisations apply */ } /***/ /** detect a loop whose only reference is from the loop end : give the */ /** whole loop use counts of zero so that the next iteration will */ /** delete all its frags */ /***/ do { if (frag->labrefinstr!=NULL) { if ((frag->props&FALLSTHROUGH)!=0) break ; lrec=BLabRecAd(frag->labref); if (lrec->labnumlabadjust) break ; /* dest not in this proc */ destfrag=lrec->frag; destfrag=(struct fragfmt*)(destfrag->final); if ((destfrag->props&((DSWITCHJUMPFRAG|USERASSEMFRAG)|NOOPTIMFRAG))!=0) break ; uses=destfrag->uses; if (uses!=1) break ; if (FindFrag(frag,NULL,destfrag,PREVCHAIN)==0) break ; if ((int)frag==(int)destfrag) { if ((CurInhibVect&512)!=0) return ; frag->uses=0; if ((frag->props&FALLSTHROUGH)!=0) { workf=frag->nextfrag; workf=(struct fragfmt*)(workf->final); workf->uses=workf->uses-1; workf->props&=~FALLENINTO; frag->props&=~FALLSTHROUGH; } changes++; if ((TraceEnab!=0)&&(FragReport!=0)) { printf("FRAGMENT:%8x REORGANISATION 4\n",(int)frag); if (ExtFragReport!=0) { BPrintFrags("FRAGMENTS AFTER REORGANISATION 4"); } } return ; } else { if ((CurInhibVect&1024)!=0) return ; i=0; nextf=(int)destfrag->nextfrag; while (nextf!=(int)frag->nextfrag) { workf=(struct fragfmt*)(nextf); if ((workf->props&FALLENINTO)==0) { i=1; break ; } if (workf->labindex!=0) { i=1; break ; } nextf=(int)workf->nextfrag; } if (i==1) break ; nextf=(int)destfrag; while (nextf!=(int)frag->nextfrag) { workf=(struct fragfmt*)(nextf); workf->uses=0; nextf=(int)workf->nextfrag; } if ((frag->props&FALLSTHROUGH)!=0) { workf=frag->nextfrag; workf=(struct fragfmt*)(workf->final); workf->uses=workf->uses-1; workf->props&=~FALLENINTO; } changes++; if ((TraceEnab!=0)&&(FragReport!=0)) { printf("FRAGMENT:%8x REORGANISATION 5\n",(int)frag); if (ExtFragReport!=0) { BPrintFrags("FRAGMENTS AFTER REORGANISATION 5"); } } return ; } } break ; } while (1) /* FOR EVER */; /***/ /** for switch tables check each destination to see whether it is itself */ /** an unconditional branch -- if so, change the table entry to point */ /** directly to destination's destination. Switch implementation is architecture*/ /** specific so this optimisation is performed in the architecture specific */ /** module. */ /***/ if ((frag->switchtab!=0)&&((CurInhibVect&4)==0)) { changes+=ARCHImproveSwitch(frag); } /***/ /** look for fragments which end in unconditional branches and check to see */ /** if they branch to fragments which just do a function return. On some */ /** architectures it is prudent to replace the unconditional branch with */ /** the return code. */ /***/ if (((frag->props&FALLSTHROUGH)==0)&&((CurInhibVect&4)==0)) { changes+=ARCHImproveReturn(frag); } /***/ /** if frag ends with a jump to another jump instruction, replace the first */ /** jump's destination with the second's destination if appropriate. */ /** if the new destination is another jump instruction then try to apply same*/ /** transformation, etc. */ /***/ if (((frag->props&USERASSEMFRAG)==0)&&((frag->props&NOOPTIMFRAG)==0)&&(frag->labref!=0)&&((CurInhibVect &2048)==0)) { ochanges=changes; do { lrec=BLabRecAd(frag->labref); if (lrec->labnum>=PI->labadjust) { /* dest is in current proc */ destfrag=lrec->frag; destfrag=(struct fragfmt*)(destfrag->final); /* destination frag */ i=(int)BMachineInstr(destfrag->firstinstr,NEXTCHAIN); /***/ if (i!=0) { condmatch=ARCHConditionMatch((int)frag->labrefinstr,i); if ((i!=(int)frag->labrefinstr)&&(destfrag->uses!=0)&&(condmatch==1)) { /* instr at i is a jump whose condition includes the */ /* condition in the jump at the end of this frag */ destinstr=(struct instrfmt*)(i); if (((destinstr->props&DELAYSLOTEXISTS)!=0)&&((destinstr->props&DELAYSLOTUNUSED)==0)) break ; lrec=BLabRecAd(destfrag->labref); destdestfrag=lrec->frag; destdestfrag=(struct fragfmt*)(destdestfrag->final); if ((int)destdestfrag!=(int)destfrag) { destfrag->uses=destfrag->uses-1; /*dec old dst*/ destdestfrag->uses=destdestfrag->uses+1; /*inc new dst*/ instr=frag->labrefinstr; instr->u1.immval=destinstr->u1.immval; frag->labref=destfrag->labref; frag->props|=(COMMONTAIL|RETURNBRANCH)&destfrag->props; changes++; if ((TraceEnab!=0)&&(FragReport!=0)) { printf("FRAGMENT:%8x REORGANISATION 6\n",(int)frag); if (ExtFragReport!=0) { /* code folded from here */ BPrintFrags("FRAGMENTS AFTER REORGANISATION 6"); /* unfolding */ } } } else break ; } else break ; } else break ; } else break ; } while (!((changes-ochanges)>SENSIBLEITER)) ; } /***/ /** replace a conditional jump over an unconditional jump by an inverse */ /** conditional jump to the unconditional jump's destination */ /***/ ochanges=changes; do { if ((frag->labrefinstr!=NULL)&&((frag->props&FALLSTHROUGH)!=0)&&((frag->props&USERASSEMFRAG)==0)&&((frag ->props&NOOPTIMFRAG)==0)&&((CurInhibVect&4096)==0)) { nextfrag=frag->nextfrag; nextfrag=(struct fragfmt*)(nextfrag->final); if (nextfrag->uses>1) break ; /* cannot delete next frag */ if (nextfrag->labrefinstr==NULL) break ; /* next frag has no jump */ if (BMachineInstr(nextfrag->firstinstr,NEXTCHAIN)!=nextfrag->labrefinstr) break ; /* jump is not first instr */ if ((nextfrag->props&FALLSTHROUGH)!=0) break ; /* not uncond */ lrec=BLabRecAd(frag->labref); destfrag=lrec->frag; if (nextfrag->nextfrag==0) break ; nextnextfrag=nextfrag->nextfrag; if (nextnextfrag->final!=destfrag->final) { while ((nextnextfrag->final!=destfrag->final)&&(nextnextfrag->uses==0)) { if (BMachineInstr(nextnextfrag->firstinstr,NEXTCHAIN)!=NULL) break ; if (nextnextfrag->nextfrag==0) break ; nextnextfrag=nextnextfrag->nextfrag; } if (nextnextfrag->final!=destfrag->final) break ; } instr=frag->labrefinstr; nextinstr=nextfrag->labrefinstr; if (((nextinstr->props&DELAYSLOTEXISTS)!=0)&&((nextinstr->props&DELAYSLOTUNUSED)==0)) break ; /* uncond jump has delay slot -- cannot delete */ lrec=BLabRecAd(nextinstr->u1.immval); jumppredict=FindFrag(frag,NULL,lrec->frag,PREVCHAIN); /* = 0 => label is forward => will not jump */ /* = 1 => label in prior fragment => will jump */ junk=ARCHInvertBranch(instr,jumppredict,0); instr->u1.immval=nextinstr->u1.immval; /*change jump destination*/ frag->labref=nextfrag->labref; frag->props&=~(COMMONTAIL|RETURNBRANCH); frag->props|=(COMMONTAIL|RETURNBRANCH)&nextfrag->props; destfrag=(struct fragfmt*)(destfrag->final); destfrag->props|=FALLENINTO; BDeleteInstr(nextinstr,1); prevf=(int)nextfrag->prevfrag; nextf=(int)nextfrag->nextfrag; alloc=nextfrag->allocchain; memset(nextfrag,0,sizeof( struct fragfmt)); nextfrag->allocchain=alloc; nextfrag->prevfrag=(struct fragfmt *)prevf; nextfrag->nextfrag=(struct fragfmt *)nextf; nextnextfrag=(struct fragfmt*)(nextf); nextfrag->final=nextnextfrag->final; while (prevf!=0) { workf=(struct fragfmt*)(prevf); if (workf->final==nextfrag) { workf->final=nextfrag->final; prevf=(int)workf->prevfrag; } else { break ; } } /* ensure that there is a fall through path to the dest frag */ workf=(struct fragfmt*)(nextfrag->final); while (workf->final!=destfrag) { workf=(struct fragfmt*)(workf->final); if ((workf->props&FALLENINTO)==0) { workf->uses=workf->uses+1; } workf->props|=FALLENINTO|FALLSTHROUGH; workf=(struct fragfmt*)(workf->nextfrag); } changes++; if ((TraceEnab!=0)&&(FragReport!=0)) { printf("FRAGMENT:%8x REORGANISATION 7\n",(int)frag); if (ExtFragReport!=0) { BPrintFrags("FRAGMENTS AFTER REORGANISATION 7"); } } } else break ; } while (!((changes-ochanges)>SENSIBLEITER)) ; /***/ /** if the destination of a conditional or unconditional jump is the following*/ /** fragment then the jump can be deleted */ /***/ /** if the only use of the destination fragment is */ /** (a) the target of an unconditional jump (including the one possibly*/ /** just deleted) */ /** or (b) the target of the conditional jump just deleted from the */ /** preceding fragment */ /** then coalesce the calling and target instruction chains */ ochanges=changes; do /* apply this transformation repeatedly */{ if ((frag->props&(((NOOPTIMFRAG|USERASSEMFRAG)|NOCOALESCEHEAD)|DSWITCHJUMPFRAG))!=0) break ; if (frag->labrefinstr!=NULL) { instr=frag->labrefinstr; lrec=BLabRecAd(frag->labref); if (lrec->labnumlabadjust) break ; /* dest not in this proc */ destfrag=lrec->frag; following=0; if (frag->nextfrag!=0) { nextfrag=frag->nextfrag; while ((nextfrag->nextfrag!=0)&&(nextfrag->uses==0)&& (BMachineInstr(nextfrag->firstinstr,NEXTCHAIN)==NULL)) { if ((nextfrag->props&USERASSEMFRAG)!=0) break; nextfrag=nextfrag->nextfrag; } if (destfrag->final==nextfrag->final) following=1; } destfrag=(struct fragfmt*)(destfrag->final); if ((int)destfrag==(int)frag) break ; /* unlikely */ if ((destfrag->props&(USERASSEMFRAG|NOOPTIMFRAG))!=0) break ; uses=destfrag->uses; conditional=frag->props&FALLSTHROUGH; if (((conditional==0)&&(uses==1))||((conditional!=0)&&(uses==2)&&(following==1))) coalesce=1; else coalesce=0; /* further checks on coalesce-ability */ if ((destfrag->props&NOCOALESCETAIL)!=0) coalesce=0; if (destfrag->switchtab!=0) { coalesce=0; following=0; } if (coalesce==1) { F1=(int)destfrag->prevfrag; F2=(int)destfrag->nextfrag; F3=(int)destfrag; do { workf=(struct fragfmt*)(F3); if (F3!=(int)workf->final) { F3=(int)workf->final; workf=(struct fragfmt*)(F3); } if ((workf->props&(NOCOALESCEHEAD|NOCOALESCETAIL))!=0) coalesce=0; if ((workf->props&FALLSTHROUGH)==0) break ; F3=(int)workf->nextfrag; } while (!(F3==0)) ; if (F3!=0) F4=(int)workf->nextfrag; else F4=0; if (F4!=0) { workf=(struct fragfmt*)(F4); if ((workf->props&DSWITCHJUMPFRAG)!=0) { /* don't leave a switch jump behind */ F3=F4; F4=(int)workf->nextfrag; } } F5=(int)frag->nextfrag; ahead=FindFrag(frag,NULL,destfrag,NEXTCHAIN); if (ahead==0) { /* destination behind current frag - must be careful */ /* to avoid closed fragment loops when coalescing */ if (FindFrag(destfrag,(struct fragfmt *)F3,frag,NEXTCHAIN)==1) coalesce=0; } /* make sure the instruction limit is not exceeded */ numinstrs=0; nexti=(int)frag->firstinstr; while (nexti!=0) { numinstrs++; tinstr=(struct instrfmt*)(nexti); nexti=(int)tinstr->nextinstr; } nexti=(int)destfrag->firstinstr; while (nexti!=0) { numinstrs++; tinstr=(struct instrfmt*)(nexti); nexti=(int)tinstr->nextinstr; } if (numinstrs>MAXFRAGMENTSIZE) coalesce=0; } if (((following==1)||(coalesce==1))&&((CurInhibVect&8192)==0)) { /* jump is to following fragment or is only ref to destination */ if ((instr->props&DELAYSLOTEXISTS)!=0) { if (((instr->props&CONDITIONALBRANCH)!=0)&&((instr->props&DELAYSLOTUNUSED)==0)&&((instr->props&ANNULLED )!=0)) { break ; } nextinstr=BMachineInstr(instr->nextinstr,NEXTCHAIN); if ((instr->props&DELAYSLOTUNUSED)!=0) { BDeleteInstr(nextinstr,1); } else if (((instr->props&CONDITIONALBRANCH)==0)&&((instr->props&BRANCH)!=0)&&((instr->props&ANNULLED )!=0)&&((instr->props&ANNULLEDTAKEN)!=0)) { BDeleteInstr(nextinstr,1); } else { nextinstr->props&=~DELAYSLOTINSTR; }; } BDeleteInstr(frag->labrefinstr,1) /* delete the jump */; frag->labref=0; frag->labrefinstr=NULL; frag->props&=~(COMMONTAIL|RETURNBRANCH); if (!(coalesce==1)) frag->props|=FALLSTHROUGH; /* ensure intervening frags between the old jump fragment and */ /* the destination have a fall through path */ if ((following==1)&&(coalesce==0)) { workf=frag->nextfrag; while (workf->final!=destfrag) { workf=(struct fragfmt*)(workf->final); if ((workf->props&FALLENINTO)==0) { workf->uses=workf->uses+1; } workf->props|=FALLENINTO|FALLSTHROUGH; workf=(struct fragfmt*)(workf->nextfrag); } } if (following==1) destfrag->props|=FALLENINTO; changes++; if ((TraceEnab!=0)&&(FragReport!=0)) { printf("FRAGMENT:%8x REORGANISATION 8\n",(int)frag); if (ExtFragReport!=0) { BPrintFrags("FRAGMENTS AFTER REORGANISATION 8"); } } } } else if (((frag->props&FALLSTHROUGH)!=0)&&(frag->nextfrag!=0)) { /* coalesce two fragments if one falls into the other and there */ /* is no other entry into the second fragment */ coalesce=0; nextfrag=frag->nextfrag; destfrag=(struct fragfmt*)(nextfrag->final); if ((destfrag->labindex==0)&&(destfrag->uses==1)&&((frag->props&((USERASSEMFRAG|NOOPTIMFRAG)|DSWITCHJUMPFRAG ))==0)&&(destfrag->switchtab==0)&&((destfrag->props&((USERASSEMFRAG|NOOPTIMFRAG)|NOCOALESCETAIL))==0) ) { /* check that all the labels associated with the fragment */ /* have been deleted */ coalesce=1; following=1; F1=(int)destfrag->prevfrag; F2=(int)destfrag->nextfrag; F3=0; nextf=(int)frag->nextfrag; while (nextf!=(int)destfrag) { workf=(struct fragfmt*)(nextf); if (workf->labindex!=0) coalesce=0; nextf=(int)workf->nextfrag; } /* make sure the instruction limit is not exceeded */ numinstrs=0; nexti=(int)frag->firstinstr; while (nexti!=0) { numinstrs++; tinstr=(struct instrfmt*)(nexti); nexti=(int)tinstr->nextinstr; } nexti=(int)destfrag->firstinstr; while (nexti!=0) { numinstrs++; tinstr=(struct instrfmt*)(nexti); nexti=(int)tinstr->nextinstr; } if (numinstrs>MAXFRAGMENTSIZE) coalesce=0; } } else { break ; }; if ((coalesce==1)&&((CurInhibVect&16384)==0)) { /* Append the instruction sequence in 'destfrag' to the end of */ /* 'frag's sequence (the jump at the end of 'frag' having been */ /* deleted above) */ /* 'destfrag' now becomes empty, but any subsequent frags, up to*/ /* one ending in an unconditional jump, must be moved to after */ /* 'frag' in the fragment chain. */ if (frag->lastinstr!=0) /* Might have deleted only */{ instr=(struct instrfmt*)(frag->lastinstr); /* instr in frag above. */ instr->nextinstr=(struct instrfmt *)destfrag->firstinstr; if (destfrag->firstinstr!=0) { destinstr=(struct instrfmt*)(destfrag->firstinstr); destinstr->previnstr=instr; } } else { frag->firstinstr=destfrag->firstinstr; } /* update frag's info */ frag->props=(frag->props&(~FALLSTHROUGH)) | (destfrag->props&(FALLSTHROUGH|RETURNFRAG|RETURNBRANCH|COMMONTAIL|NOCOALESCEHEAD|NOCOALESCETAIL)); frag->labref=destfrag->labref; frag->labrefinstr=destfrag->labrefinstr; if (destfrag->lastinstr!=0) { frag->lastinstr=destfrag->lastinstr; } /* now unhook appropriate frags after 'destfrag' and chain them */ /* after 'frag' */ alloc=destfrag->allocchain; memset(destfrag,0,sizeof( struct fragfmt)); /* this frag now dead */ destfrag->prevfrag=(struct fragfmt *)F1; /* but preserve it's links */ destfrag->allocchain=alloc; nextfrag=frag->nextfrag; if ((nextfrag->final==destfrag->final)||(F2==0)||(F3==0)||(F3==(int)destfrag)) { /* no subsequent frags to be chained onto 'frag' */ destfrag->nextfrag=(struct fragfmt *)F2; } else { destfrag->nextfrag=(struct fragfmt *)F4; if (F4!=0) { workf=(struct fragfmt*)(F4); workf->prevfrag=destfrag; } frag->nextfrag=(struct fragfmt *)F2; workf=(struct fragfmt*)(F2); workf->prevfrag=frag; workf=(struct fragfmt*)(F3); workf->nextfrag=(struct fragfmt *)F5; if (F5!=0) { workf=(struct fragfmt*)(F5); workf->prevfrag=(struct fragfmt *)F3; } } /* having obliterated destfrag find the next 'real' frag and */ /* put its address in destfrag's _final field and the _final */ /* field of previous frags which used to refer to destfrag */ nextf=(int)destfrag->nextfrag; if (nextf!=0) { nextfrag=(struct fragfmt*)(nextf); nextf=(int)nextfrag->final; } else { nextf=(int)destfrag; } destfrag->final=(struct fragfmt *)nextf; prevf=(int)destfrag->prevfrag; while (prevf!=0) { workf=(struct fragfmt*)(prevf); if ((workf->final==0)||(workf->final==destfrag)) { workf->final=(struct fragfmt *)nextf; } else { break ; } prevf=(int)workf->prevfrag; } /* update the falls through and fallen into properties */ if ((frag->props&FALLSTHROUGH)==0) { nextf=(int)frag->nextfrag; while (nextf!=0) { workf=(struct fragfmt*)(nextf); forceexit=0; while (workf!=workf->final) { if (((workf->props&DSWITCHCASEFRAG)!=0)||(workf->entryid!=0)||(workf->labindex!=0)) { forceexit=1; } workf=(struct fragfmt*)(workf->nextfrag); } workf->props&=~FALLENINTO; if ((workf->labindex!=0)||(forceexit!=0)||(workf->entryid!=0)|| (BMachineInstr(workf->firstinstr,NEXTCHAIN)!=NULL)) { break ; } workf->props&=~FALLSTHROUGH; nextf=(int)workf->nextfrag; } } changes++; if ((TraceEnab!=0)&&(FragReport!=0)) { printf("FRAGMENT:%8x REORGANISATION 9\n",(int)frag); if (ExtFragReport!=0) { BPrintFrags("FRAGMENTS AFTER REORGANISATION 9"); } } } else break ; } while (!((changes-ochanges)>SENSIBLEITER)) ; /**/ /** If this frag ends in un unconditional jump to a target frag which */ /** itself does not fall through copy up the instructions in target frag */ /** and delete the jump provided the number of instructions is small */ /** small is architecture dependent and should be defined in the archdefs file*/ /***/ if (MINCODESIZE!=0) return; ochanges=changes; do /* apply this transformation repeatedly */{ if ((frag->props&(((NOOPTIMFRAG|USERASSEMFRAG)|NOCOALESCEHEAD)|DSWITCHJUMPFRAG))!=0) break ; if (frag->labrefinstr==NULL) break ; instr=frag->labrefinstr; if (!((instr->props&(BRANCH|CONDITIONALBRANCH))==BRANCH)) break ; lrec=BLabRecAd(frag->labref); if (lrec->labnumlabadjust) break ; /* dest not in this proc */ destfrag=lrec->frag; destfrag=(struct fragfmt*)(destfrag->final); if ((destfrag->props& (NOOPTIMFRAG|USERASSEMFRAG|FALLSTHROUGH|NOCOALESCETAIL|DSWITCHJUMPFRAG))!=0) break ; if (destfrag==frag) break; /* block jumps to itself!! */ i=0; nexti=(int)destfrag->firstinstr; do { nexti=(int)BMachineInstr((struct instrfmt *)nexti,NEXTCHAIN); if (nexti==0) break ; i++; nextinstr=(struct instrfmt*)(nexti); nexti=(int)nextinstr->nextinstr; } while (1) /* FOR EVER */; if (i>8) break ; /* 3 beats at 3 instrs per cycle */ /**/ /* do we need any further checks here before proceeding ? */ /**/ /**/ /* sort out slot instruction */ /**/ slotinstr=BMachineInstr(instr->nextinstr,NEXTCHAIN); if (slotinstr!=NULL) { if ((instr->props&DELAYSLOTUNUSED)!=0) { BDeleteInstr(slotinstr,0); } else { slotinstr->props&=~DELAYSLOTINSTR; } } BDeleteInstr(instr,0) /* Zap the jump */; frag->labrefinstr=NULL; /**/ /* copy up the instructions and rechain */ /**/ nexti=(int)destfrag->firstinstr; do { nexti=(int)BMachineInstr((struct instrfmt *)nexti,NEXTCHAIN); if (nexti==0) break ; nextinstr=(struct instrfmt*)(nexti); tinstr=BInsertInstr(frag,0); BCopyInstr(nextinstr,tinstr); nexti=(int)nextinstr->nextinstr; if (destfrag->labrefinstr==nextinstr) frag->labrefinstr=tinstr; } while (1) /* FOR EVER */; /**/ /* update frag fields */ /**/ destfrag->uses=destfrag->uses-1; frag->labref=destfrag->labref; frag->props|=destfrag->props&(~(FALLENINTO|FALLSTHROUGH)); /* find the new destination and, if in this proc, updtae the count */ if (frag->labrefinstr!=NULL) { instr=frag->labrefinstr; lrec=BLabRecAd(frag->labref); if (lrec->labnum>=PI->labadjust) { destfrag=lrec->frag; destfrag=(struct fragfmt*)(destfrag->final); destfrag->uses++; } } changes++; if ((TraceEnab!=0)&&(FragReport!=0)) { printf("FRAGMENT:%8x REORGANISATION 10\n",(int)frag); if (ExtFragReport!=0) { BPrintFrags("FRAGMENTS AFTER REORGANISATION 10"); } } } while (!((changes-ochanges)>SENSIBLEITER)) ; } /*ReorgFrag*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void AddPredFrag(int procfrag,int isforward,int isswitch,struct fragfmt *frag,struct fragfmt *pred) { /*********************************************************************************/ /** Adds a predecessor descriptor to the predecessor list for the given **/ /** fragment. Frag is the fragment and pred is its predecessor. The pred list **/ /** does not contain duplicates. If procfrag is zero then the direction of the **/ /** edge is known and is given in isforward. If procfrag is non-zero then the **/ /** direction is unknown and the parameter gives the address of the first frag **/ /** in the procedure. A forward edge is a physically forward jump - thus all **/ /** loops must contain at least one back edge. If isswitch is non-zero then **/ /** the edge is generated from a dense switch jump instruction. **/ /*********************************************************************************/ struct fragfmt *curfrag; struct preddescfmt *desc; int nextdesc,nextfrag,ad,i,free,alloc; /* if the control inflow is for a label which should be ignored by */ /* the backend optimiser then don't generate a predecessor descriptor */ if ((frag->props&PRIVATELABFRAG)!=0) return ; /* check that the predecessor is not already in the list */ nextdesc=frag->preds; while (nextdesc!=0) { desc=(struct preddescfmt*)(nextdesc); if (desc->frag==(int)pred) return ; nextdesc=desc->next; } /* if procfrag is non-zero then we have to determine if the */ /* predecessor represents a forward edge or not */ if (procfrag!=0) { isforward=0; nextfrag=procfrag; while (nextfrag!=(int)frag) { if (nextfrag==(int)pred) { isforward=1; break ; } curfrag=(struct fragfmt*)(nextfrag); nextfrag=(int)curfrag->nextfrag; } } /* if the free predecessor descriptor list is exhausted then */ /* we malloc another chunk of space for it */ if (freepred [PI->proclevel]==0) { ad=BMalloc(PREDDESCALLOCSIZE*PREDDESCFMTSIZE,1,MAPREDDESCS); if (ad==0) BError("No space for predecessor descriptors"); free=freepred [PI->proclevel]; alloc=predalloc [PI->proclevel]; for (i=1; i<=PREDDESCALLOCSIZE; i++) { desc=(struct preddescfmt*)(ad); desc->frag=FREEPREDMARKER; desc->next=free; desc->allocchain=alloc; free=ad; alloc=ad; ad+=PREDDESCFMTSIZE; } freepred [PI->proclevel]=free; predalloc [PI->proclevel]=alloc; } /* create the predecessor descriptor */ desc=(struct preddescfmt*)(freepred [PI->proclevel]); freepred [PI->proclevel]=desc->next; if (desc->frag!=FREEPREDMARKER) { BError("AddPredFrag: Bad predecessor descriptor chain"); } desc->next=frag->preds; desc->frag=(int)pred; desc->isforward=isforward; desc->isswitchedge=isswitch; frag->preds=(int)desc; } /*AddPredFrag*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void GenPredecessors(struct fragfmt *frag) { /*********************************************************************************/ /** Adds this fragment to the predecessor list of any fragment that it branches **/ /** to. If it is a dense switch jump fragment then there may be many such **/ /** successor fragments. This routine is called once for each final fragment in **/ /** the procedure to generate a complete predecessor list for each fragment. **/ /*********************************************************************************/ struct fragfmt *curfrag,*finalfrag; struct instrfmt *instr; struct lrecfmt *lrec; int foundcase,nextfrag,nextinstr,isforward; /* generate a predecessor entry for a fragment terminating branch */ if (frag->labref!=0) { lrec=BLabRecAd(frag->labref); if (lrec->labnum>=PI->labadjust) { curfrag=lrec->frag; curfrag=(struct fragfmt*)(curfrag->final); AddPredFrag((int)PI->procfrag,0,0,curfrag,frag); } } /* if we have a dense switch jump fragment then it may be the */ /* predecessor of many dense switch case fragments */ if ((frag->props&DSWITCHJUMPFRAG)!=0) { /* search for dense switch case fragments with a switch note */ /* pointing to the original fragment and add that fragment to */ /* their predecessor lists */ foundcase=0; isforward=0; nextfrag=(int)PI->procfrag; while (nextfrag!=0) { curfrag=(struct fragfmt*)(nextfrag); if ((curfrag->props&DSWITCHCASEFRAG)!=0) { nextinstr=(int)curfrag->firstinstr; while (nextinstr!=0) { instr=(struct instrfmt*)(nextinstr); if ((instr->group==DSWITCHNOTE)&&(instr->u1.immval==(int)frag)) { foundcase=1; finalfrag=(struct fragfmt*)(curfrag->final); AddPredFrag(0,isforward,1,finalfrag,frag); } nextinstr=(int)instr->nextinstr; } } if (nextfrag==(int)frag) isforward=1; nextfrag=(int)curfrag->nextfrag; } /* if no switch notes were found then it is assummed that */ /* the fragment may be the predecessor of any dense switch */ /* case fragment in the procedure */ if (foundcase==0) { isforward=0; nextfrag=(int)PI->procfrag; while (nextfrag!=0) { curfrag=(struct fragfmt*)(nextfrag); if ((curfrag->props&DSWITCHCASEFRAG)!=0) { finalfrag=(struct fragfmt*)(curfrag->final); AddPredFrag(0,isforward,1,finalfrag,frag); } if (nextfrag==(int)frag) isforward=1; nextfrag=(int)curfrag->nextfrag; } } } } /*GenPredecessors*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ /***/ /********************************************************************************/ /** PEEPHOLE OPTIMISATION SECTION **/ /********************************************************************************/ /***/ static void PerformIfThenOpt(struct fragfmt *frag) { /********************************************************************************/ /** Performs if/then optimisation specific to annulled delay slot machines. **/ /** Looks for statements of the sort "if (cond) instr" where instr is a **/ /** single machine instruction not including calls or branches. **/ /** Rearranges code so that cond is reversed and "instr" is in the delay slot **/ /** and annulled when the branch is untaken. **/ /********************************************************************************/ struct fragfmt *thenfrag,*nextfrag,*tempfrag; struct instrfmt *theninstr,*tempinstr,*instr; int junk,i; /* check that the fragment ends in a conditional branch */ if ((frag->labref==0)||(frag->labrefinstr==NULL)||((frag->props&FALLSTHROUGH)==0)||(frag->nextfrag==0) ||((frag->props&(USERASSEMFRAG|NOOPTIMFRAG))!=0)) return ; /* find the next label or instruction - this may be a then part */ thenfrag=frag->nextfrag; while ((thenfrag->nextfrag!=0)&&(thenfrag->labindex==0)&&(thenfrag->final!=thenfrag)) { thenfrag=thenfrag->nextfrag; } if (thenfrag->final!=thenfrag) return ; if ((thenfrag->labindex!=0)||(thenfrag->nextfrag==0)) return ; if ((thenfrag->uses!=1)||((thenfrag->props&FALLENINTO)==0)||((thenfrag->props&FALLSTHROUGH)==0)||((thenfrag ->props&(USERASSEMFRAG|NOOPTIMFRAG))!=0)) return ; /* now check that thenfrag has only one instruction left in it and */ /* that the instruction is suitable for putting into a delay slot */ i=(int)BMachineInstr(thenfrag->firstinstr,NEXTCHAIN); if (i==0) return ; theninstr=(struct instrfmt*)(i); if ((theninstr->props&(BRANCH|JUMPINSTR))!=0) return ; if ((theninstr->privprops&SYMBOLICACCESS)!=0) return ; if ((theninstr->privprops&NONANNULINSTR)!=0) return ; i=(int)BMachineInstr(theninstr->nextinstr,NEXTCHAIN); if (i!=0) return ; /* find the next fragment and check it has the right label and props */ nextfrag=thenfrag->nextfrag; while ((nextfrag->nextfrag!=0)&&(nextfrag->labindex==0)&&(nextfrag->final!=nextfrag)) { nextfrag=nextfrag->nextfrag; } if (nextfrag->final!=nextfrag) return ; if ((nextfrag->labindex!=frag->labref)||((nextfrag->props&FALLENINTO)==0)) return ; /* now check out the branch for suitability */ instr=frag->labrefinstr; if (((instr->props&DELAYSLOTEXISTS)!=0)&&((instr->props&DELAYSLOTUNUSED)==0)) return ; /* check that the architecture has annullable delay slots */ if (ARCHHasAnnullableSlots()==0) return ; /* go ahead and optimise the if/then construct */ if ((instr->props&DELAYSLOTEXISTS)!=0) { tempinstr=BMachineInstr(instr->nextinstr,NEXTCHAIN); if (tempinstr==NULL) return ; BDeleteInstr(tempinstr,1); } BMoveAfter(thenfrag,frag,theninstr,instr); theninstr->props|=DELAYSLOTINSTR; nextfrag=thenfrag->nextfrag; instr->props&=~(DELAYSLOTUNUSED|ANNULLEDTAKEN); instr->props|=DELAYSLOTEXISTS|ANNULLED; thenfrag->uses=0; nextfrag=thenfrag->nextfrag; thenfrag->rin=0; thenfrag->rout=0; #if(ISERFREQUIRED!=0) thenfrag->fin=0; thenfrag->fout=0; #if(ISERFREQUIRED==2) thenfrag->din=0; thenfrag->dout=0; #endif #if(ISCRFREQUIRED!=0) thenfrag->cin=0; thenfrag->cout=0; #endif; #endif; thenfrag->preds=0; tempfrag=thenfrag; do { tempfrag->final=nextfrag->final; if (tempfrag->prevfrag==0) break ; tempfrag=(struct fragfmt*)(tempfrag->prevfrag); } while (!(tempfrag->final!=thenfrag)) ; if ((ValidPreds!=0)&&(ValidLive==RWLIVENESSINFO)) { BUpdateLiveRegs(frag); } else { ValidLive=NOLIVENESSINFO; } /* update the GIS issue property information */ if (instr->bubbles!=0) { theninstr->issueprops=theninstr->issueprops&(~(TPCASCADED|TPQUEUED)); theninstr->issueprops=theninstr->issueprops|TPIGSTART; theninstr->bubbles=1; instr->bubbles=BIITOPT; if ((instr->issueprops&TPISCONTROL)!=0) { instr->issueprops=instr->issueprops&(~((TPIFTHEN|TPIFTHENELSE)|TPITECOLLAPSE)); } } /* invert the branch condition */ junk=ARCHInvertBranch(instr,0,0); /* print tracing information if selected */ if ((TraceEnab!=0)&&(FragReport!=0)) { printf("\n%s%s\nPERFORMED IF/THEN OPTIMISATION AT %8x\n",bannerline,bannerline,(int)instr); } } /*PerformIfThenOpt*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void PerformIfThenElseOpt(struct fragfmt *frag,int prelim) { /********************************************************************************/ /** Performs if/then/else optimisation specific to annulled delay slot **/ /** machines. Looks for statements of the sort "if (cond) instrs else instr" **/ /** where instr is a single machine instruction not including calls or **/ /** branches. Rearranges code so that "instr" is in the delay slot and **/ /** annulled when the branch is untaken. If prelim is non-zero then the delay **/ /** slot owner is only marked as suitable for the optimisation to prevent GIS **/ /** from filling the slot. **/ /********************************************************************************/ struct fragfmt *nextfrag,*mergefrag,*thenfrag,*destfrag,*elsefrag; struct instrfmt *elseinstr,*branchinstr,*tempinstr,*instr; struct lrecfmt *lrec; int i; /* check that the fragment ends in a conditional branch */ if ((frag->labref==0)||(frag->labrefinstr==NULL)||((frag->props&FALLSTHROUGH)==0)||(frag->nextfrag==0) ||((frag->props&(USERASSEMFRAG|NOOPTIMFRAG))!=0)) return ; /* now check out the branch for suitability */ instr=frag->labrefinstr; if (((instr->props&DELAYSLOTEXISTS)!=0)&&((instr->props&DELAYSLOTUNUSED)==0)) return ; /* find the destination of the branch and check that it has a single */ /* instruction in its fragment */ lrec=BLabRecAd(frag->labref); elsefrag=lrec->frag; elsefrag=(struct fragfmt*)(elsefrag->final); if (((elsefrag->props&FALLENINTO)!=0)||(elsefrag->uses!=1)||((elsefrag->props&(USERASSEMFRAG|NOOPTIMFRAG ))!=0)||(elsefrag->nextfrag==0)) return ; i=(int)BMachineInstr(elsefrag->firstinstr,NEXTCHAIN); if (i==0) return ; elseinstr=(struct instrfmt*)(i); if ((elseinstr->props&(BRANCH|JUMPINSTR))!=0) return ; if ((elseinstr->privprops&SYMBOLICACCESS)!=0) return ; if ((elseinstr->privprops&NONANNULINSTR)!=0) return ; i=(int)BMachineInstr(elseinstr->nextinstr,NEXTCHAIN); if (i!=0) return ; /* find the next fragment and check it has the right label and */ /* properties - this should be where the if and else parts merge */ mergefrag=elsefrag->nextfrag; while ((mergefrag->nextfrag!=0)&&(mergefrag->labindex==0)&&(((mergefrag->props&FALLENINTO)!=0)||(mergefrag ->final!=mergefrag))) { if (BMachineInstr(mergefrag->firstinstr,NEXTCHAIN)!=NULL) { return ; } mergefrag=mergefrag->nextfrag; } if ((mergefrag->final!=mergefrag)||((mergefrag->props&FALLENINTO)==0)) return ; /* find the next label or instruction - this may be a then part */ thenfrag=frag->nextfrag; while ((thenfrag->nextfrag!=0)&&(thenfrag->labindex==0)&&(thenfrag->final!=thenfrag)) { thenfrag=thenfrag->nextfrag; } if (thenfrag->final!=thenfrag) return ; if ((thenfrag->uses!=1)||((thenfrag->props&FALLENINTO)==0)||((thenfrag->props&FALLSTHROUGH)!=0)||((thenfrag ->props&(USERASSEMFRAG|NOOPTIMFRAG))!=0)||(thenfrag->labindex!=0)||(thenfrag->nextfrag==0)||(thenfrag ->labrefinstr==NULL)||(thenfrag->labref==0)) return ; branchinstr=thenfrag->labrefinstr; if (((branchinstr->props&BRANCH)==0)||((branchinstr->props&CONDITIONALBRANCH)!=0)) return ; lrec=BLabRecAd(thenfrag->labref); destfrag=lrec->frag; if (destfrag->final!=mergefrag->final) return ; /* find the next fragment check it has the right label and properties */ nextfrag=thenfrag->nextfrag; while ((nextfrag->nextfrag!=0)&&(nextfrag->labindex==0)&& (BMachineInstr(nextfrag->firstinstr,NEXTCHAIN)==NULL)&&((nextfrag->props&FALLENINTO)==0)&&(nextfrag->final!=nextfrag)) { nextfrag=nextfrag->nextfrag; } if (nextfrag->final!=nextfrag) return ; if ((nextfrag->labindex!=frag->labref)||((nextfrag->props&FALLENINTO)!=0)) return ; /* check that the architecture has annullable delay slots */ if (ARCHHasAnnullableSlots()==0) return ; /* if we are doing a preliminary pass then just mark the construct */ if (prelim!=0) { instr->props|=NOGISDSFILL; branchinstr->props|=NOGISDSFILL; elseinstr->props|=NOSPECEXEC; return ; } /* go ahead and optimise the if/then/else construct */ if ((instr->props&DELAYSLOTEXISTS)!=0) { tempinstr=BMachineInstr(instr->nextinstr,NEXTCHAIN); if (tempinstr==NULL) return ; BDeleteInstr(tempinstr,1); } BMoveAfter(elsefrag,frag,elseinstr,instr); elseinstr->props|=DELAYSLOTINSTR; instr->props&=~(DELAYSLOTUNUSED|ANNULLEDTAKEN); instr->props|=DELAYSLOTEXISTS|ANNULLED; instr=thenfrag->labrefinstr; if ((instr->props&DELAYSLOTEXISTS)!=0) { if ((instr->props&DELAYSLOTUNUSED)!=0) { BDeleteInstr(BMachineInstr(instr->nextinstr,NEXTCHAIN),1); } else { tempinstr=BMachineInstr(instr->nextinstr,NEXTCHAIN); tempinstr->props&=~DELAYSLOTINSTR; } } BDeleteInstr(instr,1); thenfrag->props|=FALLSTHROUGH; thenfrag->labref=0; thenfrag->labrefinstr=NULL; thenfrag=thenfrag->nextfrag; thenfrag=(struct fragfmt*)(thenfrag->final); while ((int)thenfrag!=(int)elsefrag) { thenfrag->props|=FALLSTHROUGH|FALLENINTO; thenfrag=thenfrag->nextfrag; thenfrag=(struct fragfmt*)(thenfrag->final); } elsefrag->props|=FALLENINTO; mergefrag->uses=mergefrag->uses-1; if (mergefrag->uses==1) mergefrag->labindex=0; if ((ValidPreds!=0)&&(ValidLive==RWLIVENESSINFO)) { BUpdateLiveRegs(frag); BUpdateLiveRegs(thenfrag); BUpdateLiveRegs(elsefrag); } else { ValidLive=NOLIVENESSINFO; } /* update the GIS issue property information */ if (elseinstr->bubbles!=0) { elseinstr->issueprops=elseinstr->issueprops&(~(TPCASCADED|TPQUEUED)); elseinstr->issueprops=elseinstr->issueprops|TPIGSTART; elseinstr->bubbles=1; instr->bubbles=BIITEOPT; if ((instr->issueprops&TPISCONTROL)!=0) { instr->issueprops=instr->issueprops&(~((TPIFTHEN|TPIFTHENELSE)|TPITECOLLAPSE)); } } /* print tracing information if selected */ if ((TraceEnab!=0)&&(FragReport!=0)) { printf("\n%s%s\nPERFORMED IF/THEN/ELSE OPTIMISATION AT %8x\n",bannerline,bannerline,(int)&frag->labrefinstr); } } /*PerformIfThenElseOpt*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void PerformUncondBranchOpt(struct fragfmt *frag,int prelim) { /********************************************************************************/ /** Performs an unconditional branch optimisation specific to annulled delay **/ /** slot machines. Looks for statements of the sort "if (cond) instr branch" **/ /** where instr is a single machine instruction not including calls or **/ /** branches and branch is an unconditional branch. Rearranges code so that **/ /** instr is in the delay slot of the conditional branch. If prelim is **/ /** non-zero then the delay slot owner is only marked as suitable for the **/ /** optimisation to prevent GIS from filling the slot. **/ /********************************************************************************/ struct fragfmt *nextfrag,*mergefrag,*destfrag; struct instrfmt *slotinstr,*uncondbranch,*instr,*tempinstr; struct lrecfmt *lrec; struct preddescfmt *pred,*temppred; int junk,prevdesc,nextdesc,i; /* check that the fragment ends in a conditional branch */ if ((frag->labref==0)||(frag->labrefinstr==NULL)||((frag->props&FALLSTHROUGH)==0)||(frag->nextfrag==0) ||((frag->props&(USERASSEMFRAG|NOOPTIMFRAG))!=0)) return ; /* now check out the branch for suitability */ instr=frag->labrefinstr; if (((instr->props&DELAYSLOTEXISTS)!=0)&&((instr->props&DELAYSLOTUNUSED)==0)) return ; /* find the next fragment and check that it has a single */ /* instruction and an unconditional branch in its fragment */ nextfrag=frag->nextfrag; while ((nextfrag->nextfrag!=0)&&(nextfrag->labindex==0)&& (BMachineInstr(nextfrag->firstinstr,NEXTCHAIN)==NULL)&&(((nextfrag->props&FALLENINTO)!=0)||(nextfrag->final!=nextfrag))) { nextfrag=nextfrag->nextfrag; } if ((nextfrag->final!=nextfrag)||(nextfrag->labindex!=0)||(nextfrag->uses!=1)||((nextfrag->props &FALLENINTO)==0)||(nextfrag->labref==0)||((nextfrag->props&USERASSEMFRAG)!=0)||((nextfrag->props&NOOPTIMFRAG )!=0)||(nextfrag->nextfrag==0)||((nextfrag->props&FALLSTHROUGH)!=0)) return ; i=(int)BMachineInstr(nextfrag->firstinstr,NEXTCHAIN); if (i==0) return ; slotinstr=(struct instrfmt*)(i); if ((slotinstr->props&(BRANCH|JUMPINSTR))!=0) return ; if ((slotinstr->privprops&SYMBOLICACCESS)!=0) return ; if ((slotinstr->privprops&NONANNULINSTR)!=0) return ; i=(int)BMachineInstr(slotinstr->nextinstr,NEXTCHAIN); if (i==0) return ; uncondbranch=(struct instrfmt*)(i); if (((uncondbranch->props&BRANCH)==0)||((uncondbranch->props&CONDITIONALBRANCH)!=0)||(((uncondbranch ->props&DELAYSLOTEXISTS)!=0)&&((uncondbranch->props&DELAYSLOTUNUSED)==0))) return ; /* find the next fragment and check it has the right label and */ /* properties - this should be the destination of the conditional branch */ lrec=BLabRecAd(frag->labref); destfrag=lrec->frag; destfrag=(struct fragfmt*)(destfrag->final); mergefrag=nextfrag->nextfrag; mergefrag=(struct fragfmt*)(mergefrag->final); if ((int)mergefrag!=(int)destfrag) return ; /* check that the architecture has annullable delay slots */ if (ARCHHasAnnullableSlots()==0) return ; /* if we are doing a preliminary pass then just mark the construct */ if (prelim!=0) { instr->props|=NOGISDSFILL; uncondbranch->props|=NOGISDSFILL; slotinstr->props|=NOSPECEXEC; return ; } /* go ahead and perform the optimisation on the construct */ if ((instr->props&DELAYSLOTEXISTS)!=0) { tempinstr=BMachineInstr(instr->nextinstr,NEXTCHAIN); if (tempinstr==NULL) return ; BDeleteInstr(tempinstr,1); } BMoveAfter(nextfrag,frag,slotinstr,instr); lrec=BLabRecAd(nextfrag->labref); destfrag=lrec->frag; destfrag=(struct fragfmt*)(destfrag->final); slotinstr->props|=DELAYSLOTINSTR; instr->props&=~(DELAYSLOTUNUSED|ANNULLEDTAKEN); instr->props|=DELAYSLOTEXISTS|ANNULLED; frag->labref=nextfrag->labref; instr->u1.immval=frag->labref; junk=ARCHInvertBranch(instr,0,0); if ((uncondbranch->props&DELAYSLOTEXISTS)!=0) { BDeleteInstr(BMachineInstr(uncondbranch->nextinstr,NEXTCHAIN),1); } BDeleteInstr(uncondbranch,1); nextfrag->props|=FALLSTHROUGH; mergefrag->props|=FALLENINTO; if (mergefrag->uses==1) mergefrag->labindex=0; nextfrag->labref=0; nextfrag->labrefinstr=NULL; /* Nextfrag is now empty */ nextfrag->uses=0; nextfrag->final=mergefrag->final; if (ValidPreds!=0) { prevdesc=0; nextdesc=mergefrag->preds; while (nextdesc!=0) { pred=(struct preddescfmt*)(nextdesc); if (pred->frag==(int)frag) { if (prevdesc==0) { mergefrag->preds=pred->next; } else { temppred=(struct preddescfmt*)(prevdesc); temppred->next=pred->next; } break ; } prevdesc=nextdesc; nextdesc=pred->next; } AddPredFrag((int)PI->procfrag,0,0,destfrag,frag); } if ((ValidPreds!=0)&&(ValidLive==RWLIVENESSINFO)) { BReconvergeLiveness(); } else { ValidLive=NOLIVENESSINFO; } /* update the GIS issue property information */ if (slotinstr->bubbles!=0) { slotinstr->issueprops=slotinstr->issueprops&(~(TPCASCADED|TPQUEUED)); slotinstr->issueprops=slotinstr->issueprops|TPIGSTART; slotinstr->bubbles=1; instr->bubbles=BIUNCONDOPT; if ((instr->issueprops&TPISCONTROL)!=0) { instr->issueprops=instr->issueprops&(~((TPIFTHEN|TPIFTHENELSE)|TPITECOLLAPSE)); } } /* print tracing information if selected */ if ((TraceEnab!=0)&&(FragReport!=0)) { printf("\n%s%s\nPERFORMED UNCOND BRANCH OPTIMISATION AT %8x\n",bannerline,bannerline,(int)instr); } } /*PerformUncondBranchOpt*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void PerformBranchOverOpt(struct fragfmt *frag,int prelim) { /********************************************************************************/ /** Performs a branch over optimisation. Looks for conditional branches over **/ /** unconditional branches and rerranges the code to delete the unconditonal **/ /** branch and make an inverted form of the conditional branch go to its **/ /** destination. If prelim is non-zero then the delay slot owner is only **/ /** marked as suitable for the optimisation to prevent GIS from filling the **/ /** slot. **/ /********************************************************************************/ struct fragfmt *nextfrag,*mergefrag,*destfrag; struct instrfmt *uncondbranch,*instr; struct lrecfmt *lrec; struct preddescfmt *pred,*temppred; int junk,prevdesc,nextdesc,i; /* check that the fragment ends in a conditional branch */ if ((frag->labref==0)||(frag->labrefinstr==NULL)||((frag->props&FALLSTHROUGH)==0)||(frag->nextfrag==0) ||((frag->props&(USERASSEMFRAG|NOOPTIMFRAG))!=0)) return ; /* now check out the branch for suitability */ instr=frag->labrefinstr; if (((instr->props&DELAYSLOTEXISTS)!=0)&&((instr->props&DELAYSLOTUNUSED)==0)) return ; /* find the next fragment and check it has just an unconditional branch */ nextfrag=frag->nextfrag; while ((nextfrag->nextfrag!=0)&&(nextfrag->labindex==0)&& (BMachineInstr(nextfrag->firstinstr,NEXTCHAIN)==NULL)&& (((nextfrag->props&FALLENINTO)!=0)||(nextfrag->final!=nextfrag))) { nextfrag=nextfrag->nextfrag; } if ((nextfrag->final!=nextfrag)||(nextfrag->labindex!=0)||(nextfrag->uses!=1)||((nextfrag->props &FALLENINTO)==0)||(nextfrag->labref==0)||((nextfrag->props&USERASSEMFRAG)!=0)||((nextfrag->props&NOOPTIMFRAG )!=0)||(nextfrag->nextfrag==0)||((nextfrag->props&FALLSTHROUGH)!=0)) return ; i=(int)BMachineInstr(nextfrag->firstinstr,NEXTCHAIN); if (i==0) return ; uncondbranch=(struct instrfmt*)(i); if (((uncondbranch->props&BRANCH)==0)||((uncondbranch->props&CONDITIONALBRANCH)!=0)||(((uncondbranch ->props&DELAYSLOTEXISTS)!=0)&&((uncondbranch->props&DELAYSLOTUNUSED)==0))) return ; /* find the next fragment and check it has the right label and */ /* properties - this should be the destination of the conditional branch */ lrec=BLabRecAd(frag->labref); destfrag=lrec->frag; destfrag=(struct fragfmt*)(destfrag->final); mergefrag=nextfrag->nextfrag; mergefrag=(struct fragfmt*)(mergefrag->final); if ((int)mergefrag!=(int)destfrag) return ; /* if we are doing a preliminary pass then just mark the construct */ if (prelim!=0) { instr->props|=NOGISDSFILL; uncondbranch->props|=NOGISDSFILL; return ; } /* go ahead and perform the optimisation on the construct */ lrec=BLabRecAd(nextfrag->labref); destfrag=lrec->frag; destfrag=(struct fragfmt*)(destfrag->final); frag->labref=nextfrag->labref; instr->u1.immval=frag->labref; instr->bubbles=BIOVEROPT; if ((instr->issueprops&TPISCONTROL)!=0) { instr->issueprops=instr->issueprops&(~((TPIFTHEN|TPIFTHENELSE)|TPITECOLLAPSE)); } junk=ARCHInvertBranch(instr,0,0); if ((uncondbranch->props&DELAYSLOTEXISTS)!=0) { BDeleteInstr(BMachineInstr(uncondbranch->nextinstr,NEXTCHAIN),1); } BDeleteInstr(uncondbranch,1); nextfrag->props|=FALLSTHROUGH; mergefrag->props|=FALLENINTO; if (mergefrag->uses==1) mergefrag->labindex=0; nextfrag->labref=0; nextfrag->labrefinstr=NULL; if (ValidPreds!=0) { prevdesc=0; nextdesc=mergefrag->preds; while (nextdesc!=0) { pred=(struct preddescfmt*)(nextdesc); if (pred->frag==(int)frag) { if (prevdesc==0) { mergefrag->preds=pred->next; } else { temppred=(struct preddescfmt*)(prevdesc); temppred->next=pred->next; } break ; } prevdesc=nextdesc; nextdesc=pred->next; } AddPredFrag((int)PI->procfrag,0,0,destfrag,frag); } if ((ValidPreds!=0)&&(ValidLive==RWLIVENESSINFO)) { BUpdateLiveRegs(frag); BUpdateLiveRegs(nextfrag); } else { ValidLive=NOLIVENESSINFO; } /* print tracing information if selected */ if ((TraceEnab!=0)&&(FragReport!=0)) { printf("\n%s%s\nPERFORMED BRANCH OVER OPTIMISATION AT %8x\n",bannerline,bannerline,(int)instr); } } /*PerformBranchOverOpt*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ /***/ /********************************************************************************/ /** BASIC BLOCK INSTRUCTION SCHEDULER SECTION **/ /********************************************************************************/ /***/ /** This is a generic basic block scheduler. It has the capability */ /** of generating schedules for both superscalar machines which may */ /** issue up to 2 instructions per clock cycle and for more conventional */ /** pipelined serial architectures. */ /***/ /** When an instruction has been selected for execution 'now' certain information*/ /** about its use of resources and its time to completion must be retained in*/ /** order to guide the choice of the next instruction to execute. */ #define dummy3forcb 0 /***/ /** This information is: */ /***/ /** (a) the number of clocks for which the instruction's execution unit is */ /** busy (ie can accept no further instructions) */ /***/ /** (b) the number of clocks until the instruction's result is available (thus*/ /** allowing a subsequent instruction dependent on the result to be */ /** considered for execution) */ /***/ /** (c) the priority of the instruction's execution unit during arbitration for*/ /** writeback slots to make the result available */ /***/ /** (d) the register(s) into which the result will be placed */ /***/ /** The information is managed by means of an array of instruction completion*/ /** records containing the following fields (not all fields will be present */ /** on some architectures): */ #define dummy4forcb 0 /***/ /** unitbusy: a bit-map of execution units which are busy (and thus unable to*/ /** except new instructions) during the given cycle. Only */ /** multi-cycle execution units may be busy in this way. */ /***/ /** when an instruction is selected for execution at clock N the */ /** unit fields of array elements N to (N + blockage clocks - 1) */ /** inclusive will be augmented by the appropriate unit identifier.*/ /***/ /** gregbusy: a bit-map of the integer registers which are busy during */ /** the given clock cycle. Any instruction which attempts to */ /** reference or define these registers will be stalled if it */ /** was to be issued on the given cycle. Note that a register is */ /** not considered to be busy during its writeback since it may */ /** then be referenced immediately via a feed-forward bus. */ /***/ /** when an instruction is selected for execution at clock N the */ /** gregbusy fields in elements N to (N + latency clocks - 1) */ /** inclusive will be augmented by the instruction's destination */ /** integer registers. */ /***/ /** fregbusy: a bit-map of the float registers which are busy during */ /** the given clock cycle. Any instruction which attempts to */ /** reference or define these registers will be stalled if it */ /** was to be issued on the given cycle. Note that a register is */ /** not considered to be busy during its writeback since it may */ /** then be referenced immediately via a feed-forward bus. */ /***/ /** when an instruction is selected for execution at clock N the */ /** fregbusy fields in elements N to (N + latency clocks - 1) */ /** inclusive will be augmented by the instruction's destination */ /** float registers. */ #define dummy5forcb 0 /***/ /** cregbusy: a bit-map of the control registers which are busy during */ /** the given clock cycle. Any instruction which attempts to */ /** reference or define these registers will be stalled if it */ /** was to be issued on the given cycle. Control registers are */ /** not considered to require writeback slots. */ /***/ /** when an instruction is selected for execution at clock N the */ /** cregbusy fields in elements N to (N + latency clocks - 1) */ /** inclusive will be augmented by the instruction's destination */ /** control registers. */ /***/ /** wbsuse : this is an array of elements giving information about the */ /** usage of the writeback-slots available on the given */ /** clock cycle. On a serial machine there is one writeback-slot */ /** per clock cycle whereas 2 are considered to be available on */ /** a superscalar machine: */ /***/ /** wbunit : the number of the execution unit granted a */ /** writeback-slot for the given cycle. This field is */ /** 0 if no booking has been made. */ /** wbstall: the stall factor associated with the instruction */ /** requiring the writeback-slot. This is used to prioritise*/ /** the bookings for writeback-slots and thus prevent an */ /** important result from a low priority execution unit */ /** being unreasonably delayed by higher priority units. */ /** This field is only valid if wbunit is non-zero. */ /** wbgreg : if wbunit is non-zero then this gives the integer */ /** registers written back during the writeback-slot. */ /** wbfreg : if wbunit is non-zero then this gives the float */ /** registers written back during the writeback-slot. */ /***/ #define dummy6forcb 0 /** Note that the gregbusy, fregbusy, cregbusy and unitbusy fields of a */ /** completion array entry may be usurped by an instruction of higher execution*/ /** unit priority which completes in the same clock. In this case these fields*/ /** are moved to the next appropriate entry, possibly causing a ripple effect*/ /** down the array. */ /***/ /** The completion array need only be large enough to accomodate the total */ /** execution time of the longest instruction plus the maximum time the */ /** writeback can be delayed. */ /***/ /** The letters CP in the following data items and procedure names are meant */ /** to indicate 'completion' or 'completion array'. */ /* The maximum number of instructions that we look ahead by when searching */ /* for suitable candidates for scheduling. */ #define MAXINSPECTIONS 30 /* The stall given to an instruction with no use of its results in the */ /* same fragment. */ #define NOSTALL (-99999) /* The following bit significant values are used in the properties field */ /* of the scheduler descriptor records. */ #define SDGENERICJUMP 1 /* any jump or call instruction */ #define SDCALLNDS 2 /* call without delay slot instruction */ #define SDCALLDS 4 /* call with a delay slot instruction */ #define SDJUMPNDS 8 /* jump without delay slot instruction */ #define SDJUMPDS 16 /* jump with a delay slot instruction */ #define SDHASMEMDEP 32 /* set if descriptor has a mem dependency */ #define SDSCHRESTRICT 64 /* set if instruction restricts scheduling*/ #define SDMDINACTIVE 128 /* set if memory dependency is inactive */ #define SDHASDS 256 /* set if instr has a delay slot */ #define SDSIMPLEINSTR 512 /* set if instr is simple one cycle exec */ #define SDDEPENDEDON 1024 /* set if instr access depended upon */ /* There is a schedule descriptor for each of the instructions in a particular*/ /* fragment. Much of the descriptor information is duplicated in the */ /* scheduler tables or in the instruction records themselves. By */ /* maintaining all the relevant information in a single record per */ /* instruction the core of the scheduler algorithm is speeded up. */ typedef struct SchDescFmt{ int prev; /* pointer to the previous schedule descriptor */ int next; /* pointer to the next schedule descriptor */ int prevcand; int nextcand; /* pointer to previous descriptor in cand group */ struct SchDescFmt *reguserA; /* pointer to the descriptor of the first */ /* succeeding instruction which uses the register*/ /* results of this instruction, or 0 if the */ /* results are not used in the same fragment */ struct SchDescFmt *reguserB; /* as reguserA but second user of results */ struct SchDescFmt *regcollision;/* pointer to the descriptor of the first */ /* succeeding instruction which redefines */ /* registers referenced by this instruction */ struct instrfmt *instr; /* pointer to the instruction record itself */ int props; /* properties of the instruction (defined above)*/ int gref; /* integer regs referenced by the instruction */ int gdef; /* integer regs defined by the instruction */ int fref; /* float regs referenced by the instruction */ int fdef; /* float regs defined by the instruction */ int cref; /* control regs referenced by the instruction */ int cdef; /* control regs defined by the instruction */ int dref; /* float regs (>31) referenced by the instruction*/ int ddef; /* float regs (>31) defined by the instruction */ int stallfactor; /* estimated stall factor for the instruction */ int unit; /* execution unit to which the instr is issued */ int blockage; /* blockage factor on the exec unit for the instr*/ unsigned int latency; /* latency of the instruction */ } SchDescFmt; #define SIZEOFSCHDESCFMT sizeof(SchDescFmt) /* record format for a single writeback-slot descriptor */ #if(ISERFREQUIRED==0) struct WBSfmt{ int wbunit; int wbstall; int wbgreg; }; #define SIZEOFWBSFMT 12 #elif(ISERFREQUIRED==1) struct WBSfmt{ int wbunit; int wbstall; int wbgreg; int wbfreg; }; #define SIZEOFWBSFMT 16 #else struct WBSfmt{ int wbunit; int wbstall; int wbgreg; int wbfreg; int wbdreg; }; #define SIZEOFWBSFMT 20 #endif; /* Work out number of writeback-slots available per clock cycle */ #if(ISSUPERSCALAR==0) #define NUMWBS 1 /* one writeback-slot available per clock */ #else #define NUMWBS 2 /* two writeback-slots available per clock */ #endif; /* record format of completion info for a single clock cycle */ #if(ISERFREQUIRED==0) struct CPfmt{ int unitbusy; int gregbusy; struct WBSfmt wbsuse [NUMWBS]; }; #define SIZEOFCPFMT (8+(SIZEOFWBSFMT*NUMWBS)) #elif(ISERFREQUIRED==2) struct CPfmt{ int unitbusy; int gregbusy; int fregbusy; int cregbusy; int dregbusy; struct WBSfmt wbsuse [NUMWBS]; }; #define SIZEOFCPFMT (20+(SIZEOFWBSFMT*NUMWBS)) #elif(ISCRFREQUIRED==0) struct CPfmt{ int unitbusy; int gregbusy; int fregbusy; struct WBSfmt wbsuse [NUMWBS]; }; #define SIZEOFCPFMT (12+(SIZEOFWBSFMT*NUMWBS)) #else struct CPfmt{ int unitbusy; int gregbusy; int fregbusy; int cregbusy; struct WBSfmt wbsuse [NUMWBS]; }; #define SIZEOFCPFMT (16+(SIZEOFWBSFMT*NUMWBS)) #endif; /* The current completion entry (scoreboard) array */ static struct CPfmt CPinfo [CPmax+1]; static int SchDescInfoBase; /* base of scheduler descriptor records */ static int CPindex; /* holds index of current CPinfo entry */ static void PrintSchDesc(int Desc,int SuppChainInfo) { /******************************************************************************/ /** Prints trace information for a schedule descriptor. Desc is a pointer to **/ /** the descriptor and if SuppChainInfo is non-zero then no descriptor chain **/ /** information is printed. **/ /******************************************************************************/ struct SchDescFmt *SchDesc; struct instrfmt *instr; int ca; /* print the instruction associated with the descriptor */ SchDesc=(struct SchDescFmt*)(Desc); instr=SchDesc->instr; printf("\n"); ca=-1; ARCHDisasInstr(instr,&ca); /* print descriptor chain information if required */ if (SuppChainInfo==0) { printf("PREV:%8x NEXT:%8x PREVCAND:%8x NEXTCAND:%8x\n",SchDesc->prev, SchDesc->next,SchDesc->prevcand,SchDesc->nextcand); } /* print descriptor properties */ printf("%s BLK:%1d LAT:%1d",ExecNames [SchDesc->unit],SchDesc->blockage,SchDesc->latency); if (SchDesc->stallfactor!=NOSTALL) { printf(" SF:%d",SchDesc->stallfactor); } printf(" PROP:0x%X",SchDesc->props); if ((SchDesc->gref!=0)||(SchDesc->fref!=0)||(SchDesc->cref!=0)||(SchDesc->dref!=0)) { printf(" REF:"); ARCHPrintRegisters(SchDesc->gref,SchDesc->fref,SchDesc->cref,SchDesc->dref); } if ((SchDesc->gdef!=0)||(SchDesc->fdef!=0)||(SchDesc->cdef!=0||(SchDesc->ddef!=0))) { printf(" DEF:"); ARCHPrintRegisters(SchDesc->gdef,SchDesc->fdef,SchDesc->cdef,SchDesc->cdef); } if (SchDesc->reguserA!=NULL) { printf(" RUA:%8x",(unsigned int)SchDesc->reguserA->instr); } if (SchDesc->reguserB!=NULL) { printf(" RUB:%8x",(unsigned)SchDesc->reguserB->instr); } if (SchDesc->regcollision!=NULL) { printf(" RC:%8x",(unsigned)SchDesc->regcollision->instr); } printf("\n"); } /*PrintSchDesc*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void PrintCandidateList(int StartDesc) { /******************************************************************************/ /** Prints a list of candidate descriptors starting with the descriptor **/ /** pointed to by StartDesc. Also checks the consistency of the candidate **/ /** descriptor chaining. **/ /******************************************************************************/ struct SchDescFmt *SchDesc; int CurSch,LastSch; CurSch=StartDesc; while (CurSch!=0) { SchDesc=(struct SchDescFmt*)(CurSch); if (((CurSch==StartDesc)&&(SchDesc->prevcand!=0))||((CurSch!=StartDesc)&&(SchDesc->prevcand!=LastSch ))) { BError("BROKEN CANDIDATE DESCRIPTOR CHAIN"); } LastSch=CurSch; PrintSchDesc(CurSch,1); CurSch=SchDesc->nextcand; } } /*PrintCandidateList*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void PrintDescriptorList(int StartDesc) { /******************************************************************************/ /** Prints a list of descriptors starting with the descriptor pointed to by **/ /** StartDesc. Also checks the consistency of the descriptor chaining. **/ /******************************************************************************/ struct SchDescFmt *SchDesc; int CurSch,LastSch; CurSch=StartDesc; while (CurSch!=0) { SchDesc=(struct SchDescFmt*)(CurSch); if (((CurSch==StartDesc)&&(SchDesc->prev!=0))||((CurSch!=StartDesc)&&(SchDesc->prev!=LastSch))) { printf("BROKEN DESCRIPTOR CHAIN\n"); break ; } LastSch=CurSch; PrintSchDesc(CurSch,1); CurSch=SchDesc->next; } } /*PrintDescriptorList*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void PrintScoreboard() { /******************************************************************************/ /** Prints the information currently in the scoreboard **/ /******************************************************************************/ struct CPfmt *cp; int EntryUsed,TopIndex,i,j,count; /* find the highest numbered clock with information associated with it */ TopIndex=CPindex; EntryUsed=0; do { TopIndex=(TopIndex-1)&CPmax; cp=&CPinfo [TopIndex]; if ((cp->unitbusy!=0)||(cp->gregbusy!=0)) EntryUsed=1; #if(ISERFREQUIRED!=0) if (cp->fregbusy!=0) EntryUsed=1; #if(ISERFREQUIRED==2) if (cp->dregbusy!=0) EntryUsed=1; #endif #if(ISCRFREQUIRED!=0) if (cp->cregbusy!=0) EntryUsed=1; #endif; #endif; for (i=1; i<=NUMWBS; i++) { if (cp->wbsuse [i-1].wbunit!=0) EntryUsed=1; } if (CPindex==TopIndex) EntryUsed=1; } while (!(EntryUsed!=0)) ; /* print the banner for the scoreboard printout */ printf("\n%s%s\n",bannerline,bannerline); /* print the significant clock cycles from the scoreboard */ i=CPindex; count=0; do { cp=&CPinfo [i]; printf(" %3d ",count); for (j=0; j<=MAXUNITNUM; j++) { if ((cp->unitbusy&(1<gregbusy); #if(ISERFREQUIRED!=0) printf("%08x ",cp->fregbusy); #if(ISERFREQUIRED==2) printf("D%08x ".cp->dregbusy); #endif #if(ISCRFREQUIRED!=0) printf("%08x ",cp->cregbusy); #endif; #endif; for (j=1; j<=NUMWBS; j++) { if (cp->wbsuse [j-1].wbunit!=0) { printf(" %s ",ExecNames [cp->wbsuse [j-1].wbunit]); #if(ISERFREQUIRED==0) printf("%08x",cp->wbsuse [j-1].wbgreg); #else if (cp->wbsuse [j-1].wbgreg!=0) { printf("G%08x",cp->wbsuse [j-1].wbgreg); } else { printf("X%08x",cp->wbsuse [j-1].wbfreg); } #endif; } } printf("\n"); count++; i=(i+1)&CPmax; } while (!(((i-1)&CPmax)==TopIndex)) ; printf("\n"); } /*PrintScoreboard*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void GetMemoryDependency(struct fragfmt *frag,int StartDesc,int EndDesc,int *DepDesc,int *DefOverlap ) { /********************************************************************************/ /** This function is used to find memory access dependencies, which cannot be **/ /** calculated from the register reference and definition vectors. StartDesc **/ /** is a pointer to a memory access descriptor. The sets DepDesc to a pointer **/ /** to the closest preceeding descriptor on which the there is a memory **/ /** dependency between it and its predecessor EndDesc. If no such dependency **/ /** exists then DepDesc is set to 0. The given descriptor may not be **/ /** scheduled to precede its dependee in the instruction sequence. If there **/ /** is a dependency then DefOverlap is made non-zero if the overlap is **/ /** definite, as opposed to just a possibility. **/ /********************************************************************************/ struct SchDescFmt *CurDesc,*SDesc; struct instrfmt *curinstr,*meminstr; int prevdesc,checkprops,checkvolload,overlap,searchend; /* find the end position of the memory dependency search */ CurDesc=(struct SchDescFmt*)(EndDesc); searchend=CurDesc->prev; /* get the actual instruction associated with the given descriptor */ SDesc=(struct SchDescFmt*)(StartDesc); meminstr=SDesc->instr; /* if the instruction is a store then we are looking for a preceeding */ /* load or store but if it is a load then we are only interested in a */ /* preceeding store */ checkvolload=0; if ((meminstr->props&STOREINSTR)!=0) { checkprops=LOADINSTR|STOREINSTR; } else { checkprops=STOREINSTR; if ((meminstr->privprops&VOLATILELOC)!=0) { checkvolload=1; } } /* search backwards through the descriptor chain to find a memory access */ prevdesc=SDesc->prev; while (prevdesc!=searchend) { /* get a pointer to the current instruction */ CurDesc=(struct SchDescFmt*)(prevdesc); curinstr=CurDesc->instr; /* do the memory overlap test to see if the original instruction */ /* is a dependent of the current instruction */ if (((curinstr->props&checkprops)!=0)||((checkvolload!=0)&&(((curinstr->privprops&VOLATILELOC)!=0)&&((curinstr ->props&LOADINSTR)!=0)))) { overlap=BMemoryOverlap((int)frag,(int)frag,meminstr,curinstr,-1,-1,1); if (overlap!=MONO) { if ((overlap==MOYES)||(overlap==MOEXACT)) *DefOverlap=1; else *DefOverlap=0; *DepDesc=prevdesc; return ; } } /* now examine the previous descriptor */ prevdesc=CurDesc->prev; } /* the given descriptor has no memory dependency */ *DepDesc=0; } /*GetMemoryDependency*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void InitCPinfo() { /********************************************************************************/ /** Initialise the instruction completion data items and reset the **/ /** simulator clock to zero. **/ /********************************************************************************/ int i,j; for (i=0; i<=CPmax; i++) { CPinfo [i].unitbusy=0; CPinfo [i].gregbusy=0; #if(ISERFREQUIRED!=0) CPinfo [i].fregbusy=0; #if(ISERFREQUIRED==2) CPinfo [i].dregbusy=0; #endif #if(ISCRFREQUIRED!=0) CPinfo [i].cregbusy=0; #endif; #endif; for (j=1; j<=NUMWBS; j++) { CPinfo [i].wbsuse [j-1].wbunit=0; } } CPindex=0; } /*InitCPinfo*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void BookWriteBackSlot(int index,int unit,int gdef,int fdef,int ddef,int stall) { /********************************************************************************/ /** Enter a writeback-slot booking into CPinfo entry 'index': if it is already **/ /** occupied then arbitrate the rival claims and shuffle entries down as **/ /** required. **/ /** Note that we do not model execution unit unavailability due to stalls **/ /** imposed while waiting for a writeback-slot. **/ /********************************************************************************/ struct CPfmt *cp; struct WBSfmt *wbs; int gd,fd,u,sf,bestslot; #if(ISERFREQUIRED==2) int dd; #endif do { cp=&CPinfo [index]; #if(ISSUPERSCALAR==0) bestslot=1; #else if (cp->wbsuse [1-1].wbunitwbsuse [2-1].wbunit) { bestslot=1; } else { bestslot=2; } #endif; wbs=&cp->wbsuse [bestslot-1]; if (wbs->wbunitwbunit; sf=wbs->wbstall; gd=wbs->wbgreg; wbs->wbunit=unit; wbs->wbstall=stall; wbs->wbgreg=gdef; #if(ISERFREQUIRED!=0) fd=wbs->wbfreg; wbs->wbfreg=fdef; fdef=fd; #endif; #if(ISERFREQUIRED==2) dd=wbs->wbdreg; wbs->wbdreg=ddef; ddef=dd; #endif if (u==0) break ; /* overwrote an empty entry */ unit=u; stall=sf; gdef=gd; } /* if we have to delay an instruction due to the unavailability of */ /* a writeback-slot then we mark the unit and its destination */ /* registers as busy during the waiting time */ cp->gregbusy=cp->gregbusy|gdef; #if(ISERFREQUIRED!=0) cp->fregbusy=cp->fregbusy|fdef; #endif; #if(ISERFREQUIRED==2) cp->dregbusy=cp->dregbusy|ddef; #endif index=(index+1)&CPmax; /* wrap-around if necessary */ if (index==CPindex) break ; /* we have run out of space in the */ /* scoreboard so abort */ } while (1) /* FOR EVER */; } /*BookWriteBackSlot*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void CreateCPEntry(int blockage,int latency,int stall,int unit,int gdef,int fdef, int ddef,int cdef) { /********************************************************************************/ /** An instruction has been chosen for execution. It is issued on the current **/ /** clock. The completion info array is updated appropriately for the **/ /** execution of the instruction. blockage is the number of cycles that the **/ /** execution unit will be busy for. latency is the number of cycles before **/ /** the instruction is ready to complete and write back its results (if any). **/ /** stall is the stall factor of the instruction. unit is the execution unit **/ /** to which the instruction is issued. gdef and fdef give the registers **/ /** defined by the instruction. **/ /********************************************************************************/ struct CPfmt *cp; int finishclock,i,unitvector; /* mark the destination registers as unavailable until the */ /* instruction completes */ for (i=0; i<=latency-1; i++) { cp=&CPinfo [((CPindex+i)&CPmax)]; cp->gregbusy=cp->gregbusy|gdef; #if(ISERFREQUIRED!=0) cp->fregbusy=cp->fregbusy|fdef; #if(ISERFREQUIRED==2) cp->fregbusy=cp->fregbusy|fdef; #endif #if(ISCRFREQUIRED!=0) cp->cregbusy=cp->cregbusy|cdef; #endif; #endif; } /* mark the execution unit as busy until it is able to */ /* accept new instructions again */ unitvector=1<unitbusy=cp->unitbusy|unitvector; } /* if the instruction generates any results in the GRF or ERF */ /* registers then we need to book a writeback-slot for it */ if ((gdef|fdef)!=0) { finishclock=(CPindex+latency)&CPmax; BookWriteBackSlot(finishclock,unit,gdef,fdef,ddef,stall); } } /*CreateCPEntry*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void AdvanceCPClock() { /********************************************************************************/ /** Advance the simulation clock by one beat. **/ /********************************************************************************/ struct CPfmt *cp; int i; /* clear the current entry in the scoreboard */ cp=&CPinfo [CPindex]; cp->unitbusy=0; cp->gregbusy=0; #if(ISERFREQUIRED!=0) cp->fregbusy=0; #if(ISCRFREQUIRED!=0) cp->cregbusy=0; #endif; #endif; for (i=1; i<=NUMWBS; i++) { cp->wbsuse [i-1].wbunit=0; } /* increment current entry index */ CPindex=(CPindex+1)&CPmax; } /*AdvanceCPClock*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static int SetupSchDescriptors(struct fragfmt *frag) { /********************************************************************************/ /** This function creates a chain of schedule descriptors for each real **/ /** machine instruction in the given fragment. The descriptors are placed in **/ /** the schedule descriptor info area. Returns the number of created **/ /** descriptors. **/ /********************************************************************************/ struct instrfmt *curinstr; struct mschedulef *SchedInfo; struct SchDescFmt *CurSchDesc,*PrevSchDesc,*SearchSchDesc,*DepSchDesc; int LastSchDesc,nextinstr,CurSch,gresult,fresult,cresult,dresult,SearchSch,latency,opcode,stallfactor,j,DepDesc ,count,guse,fuse,cuse,duse,junk,greferenced,freferenced,creferenced,dreferenced; /* get the base of the scheduling table */ SchedInfo=(struct mschedulef *)((int)&SchedTable [MINSCHOP-(MINSCHOP)]); /* create a descriptor for each real machine instruction in the frag */ count=0; LastSchDesc=0; nextinstr=(int)BMachineInstr(frag->firstinstr,NEXTCHAIN); while (nextinstr!=0) { curinstr=(struct instrfmt*)(nextinstr); CurSch=SchDescInfoBase+(count*SIZEOFSCHDESCFMT); CurSchDesc=(struct SchDescFmt*)(CurSch); /* maintain a double-linked list between the descriptors */ if (LastSchDesc!=0) { PrevSchDesc->next=CurSch; } CurSchDesc->prev=LastSchDesc; CurSchDesc->next=0; /* store default values into the following fields */ CurSchDesc->props=0; CurSchDesc->prevcand=0; CurSchDesc->nextcand=0; CurSchDesc->stallfactor=NOSTALL; /* we calculate register dependencies in the next pass */ CurSchDesc->reguserA=NULL; CurSchDesc->reguserB=NULL; CurSchDesc->regcollision=NULL; /* find the properties of the instruction */ if ((curinstr->props&ANYCALL)!=0) { if ((curinstr->props&DELAYSLOTEXISTS)==0) { CurSchDesc->props=SDGENERICJUMP|SDCALLNDS; } else { CurSchDesc->props=SDGENERICJUMP|SDCALLDS; } } else if ((curinstr->props&(BRANCH|JUMPINSTR))!=0) { if ((curinstr->props&DELAYSLOTEXISTS)==0) { CurSchDesc->props=SDGENERICJUMP|SDJUMPNDS; } else { CurSchDesc->props=SDGENERICJUMP|SDJUMPDS; } }; if (((curinstr->privprops&SCHRESTRICTINSTR)!=0)||((curinstr->props&BARRIERINSTR)!=0)) { CurSchDesc->props|=SDSCHRESTRICT; } if ((curinstr->props&DELAYSLOTEXISTS)!=0) { CurSchDesc->props|=SDHASDS; } /* copy across register reference and definition information */ CurSchDesc->instr=curinstr; CurSchDesc->gref=curinstr->rref; CurSchDesc->gdef=curinstr->rdef; CurSchDesc->fref=0; CurSchDesc->fdef=0; CurSchDesc->cref=0; CurSchDesc->cdef=0; #if(ISERFREQUIRED!=0) CurSchDesc->fref=curinstr->fref; CurSchDesc->fdef=curinstr->fdef; #endif #if(ISCRFREQUIRED!=0) CurSchDesc->cref=curinstr->cref; CurSchDesc->cdef=curinstr->cdef; #endif #if(ISERFREQUIRED==2) CurSchDesc->dref=curinstr->dref; CurSchDesc->ddef=curinstr->ddef; #endif /* copy across the instruction execution characteristics */ opcode=curinstr->opcode; #if(ISDOUBLEOPCODE!=0) opcode=(unsigned)opcode>>1; #endif; CurSchDesc->unit=SchedInfo [opcode-(MINSCHOP)].unit; CurSchDesc->blockage=SchedInfo [opcode-(MINSCHOP)].blockage; latency=SchedInfo [opcode-(MINSCHOP)].primlatency; CurSchDesc->latency=latency; /* On PENTIUM some instructions change their pairing */ /* or timing based on operand format. This is beyond */ /* the model so do a few frigs here */ #if(Target==PENTIUM) if (((curinstr->variant==OPMEMLIT)||(curinstr->variant==OPBMEMLIT))&&((curinstr->disp!=0)||((curinstr ->privprops&FIXEDINSTR)!=0))) CurSchDesc->blockage=CurSchDesc->blockage+1; /*wont pair*/ if ((curinstr->variant==OPRRM)&&((curinstr->cdef&(1<latency=CurSchDesc ->latency+1; if ((curinstr->variant==OPRMR)&&((curinstr->cdef&(1<latency=CurSchDesc ->latency+2; if (curinstr->u2.prefixed!=0) /* must go down U pipe */{ CurSchDesc->unit=U1; CurSchDesc->latency=CurSchDesc->latency+1; }; #endif; if ((latency<=1)&&((curinstr->props&(LOADINSTR|STOREINSTR))==0)) { CurSchDesc->props|=SDSIMPLEINSTR; } if (latency>1) { curinstr->privprops|=SLOWRESULTINSTR; } else { curinstr->privprops&=~SLOWRESULTINSTR; } /* ascertain any memory access dependency */ if ((curinstr->props&(LOADINSTR|STOREINSTR))!=0) { GetMemoryDependency(frag,CurSch,SchDescInfoBase,&DepDesc,&junk); if (DepDesc!=0) { CurSchDesc->props|=SDHASMEMDEP; DepSchDesc=(struct SchDescFmt*)(DepDesc); DepSchDesc->props|=SDDEPENDEDON; } } /* get the next real instruction in the fragment */ nextinstr=(int)BMachineInstr(curinstr->nextinstr,NEXTCHAIN); LastSchDesc=CurSch; PrevSchDesc=CurSchDesc; count++; } /* we now go through the descriptors we have just created and find, */ /* for each of them, the first succeeding descriptor which uses their */ /* register results */ if (count!=0) { CurSch=SchDescInfoBase; while (CurSch!=0) { CurSchDesc=(struct SchDescFmt*)(CurSch); CurSch=CurSchDesc->next; /* get the results generated by the current descriptor */ gresult=CurSchDesc->gdef; fresult=0; cresult=0; dresult=0; #if(ISERFREQUIRED!=0) fresult=CurSchDesc->fdef; #endif #if(ISCRFREQUIRED!=0) cresult=CurSchDesc->cdef; #endif #if(ISERFREQUIRED==2) dresult=CurSchDesc->ddef; #endif /* get the registers referenced by the current descriptor */ greferenced=CurSchDesc->gref; freferenced=0; creferenced=0; dreferenced=0; #if(ISERFREQUIRED!=0) freferenced=CurSchDesc->fref; #endif #if(ISCRFREQUIRED!=0) creferenced=CurSchDesc->cref; #endif #if(ISERFREQUIRED==2) dreferenced=CurSchDesc->dref; #endif /* look for subsequent register dependence and anti-dependence */ SearchSch=CurSch; while (SearchSch!=0) { SearchSchDesc=(struct SchDescFmt*)(SearchSch); if (((gresult&SearchSchDesc->gref)!=0)||((fresult&SearchSchDesc->fref)!=0)||((cresult&SearchSchDesc ->cref)!=0)) { /* found an instruction which uses descriptor's result */ if (CurSchDesc->reguserB==NULL) { if (CurSchDesc->reguserA==NULL) { CurSchDesc->reguserA=SearchSchDesc; } else { CurSchDesc->reguserB=SearchSchDesc; } } /* if the register user is only dependent on alternatively */ /* delayed registers then use the alternative latency from */ /* scheduling table */ guse=gresult&SearchSchDesc->gref; fuse=fresult&SearchSchDesc->fref; cuse=cresult&SearchSchDesc->cref; duse=dresult&SearchSchDesc->dref; if (((guse&ALTDELAYEDGRF)!=0)||((fuse&ALTDELAYEDERF)!=0)|| ((cuse&ALTDELAYEDCRF)!=0)||((duse&ALTDELAYEDDRF)!=0)) { curinstr=CurSchDesc->instr; opcode=curinstr->opcode; #if(ISDOUBLEOPCODE!=0) opcode=(unsigned)opcode>>1; #endif; /* if the register user uses both ordinary and */ /* alternatively delayed registers then the */ /* bigger of the two latencies is used */ if (((guse&(~ALTDELAYEDGRF))!=0)||((fuse&(~ALTDELAYEDERF))!=0)|| ((cuse&(~ALTDELAYEDCRF))!=0)||((duse&(~ALTDELAYEDDRF))!=0)) { if (SchedInfo [opcode-(MINSCHOP)].seclatency>CurSchDesc->latency) { CurSchDesc->latency=SchedInfo [opcode-(MINSCHOP)].seclatency; } } else { CurSchDesc->latency=SchedInfo [opcode-(MINSCHOP)].seclatency; } } } /* register dependencies are not considered live across */ /* a call */ if ((SearchSchDesc->props&(SDCALLDS|SDCALLNDS))!=0) break ; if (((gresult&SearchSchDesc->gdef)!=0)|| ((fresult&SearchSchDesc->fdef)!=0)|| ((cresult&SearchSchDesc->cdef)!=0)|| ((dresult&SearchSchDesc->ddef)!=0)) { /* some or all of the results are overwritten so delete */ /* them from the search */ gresult&=~SearchSchDesc->gdef; fresult&=~SearchSchDesc->fdef; cresult&=~SearchSchDesc->cdef; dresult&=~SearchSchDesc->ddef; } /* look for an instruction which overwrites some of the */ /* referenced registers */ if (((greferenced&SearchSchDesc->gdef)!=0)|| ((freferenced&SearchSchDesc->fdef)!=0)|| ((creferenced&SearchSchDesc->cdef)!=0)|| ((dreferenced&SearchSchDesc->ddef)!=0)) { if (CurSchDesc->regcollision==NULL) { CurSchDesc->regcollision=SearchSchDesc; } } /* look for an instruction which references the same */ /* registers again */ if (((greferenced&SearchSchDesc->gref)!=0)|| ((freferenced&SearchSchDesc->fref)!=0)|| ((creferenced&SearchSchDesc->cref)!=0)|| ((dreferenced&SearchSchDesc->dref)!=0)) { greferenced&=~SearchSchDesc->gref; freferenced&=~SearchSchDesc->fref; creferenced&=~SearchSchDesc->cref; dreferenced&=~SearchSchDesc->dref; } /* exit as soon as all useful information has been extracted */ if (((CurSchDesc->reguserB!=NULL)|| (((gresult|fresult)|cresult|dresult)==0))&&((CurSchDesc->regcollision!=NULL)|| ((greferenced|freferenced|creferenced|dreferenced)==0))) { break ; } SearchSch=SearchSchDesc->next; } } /* now calculate the stall factor as being the latencies of all the */ /* instructions in the dependency chain - if an intruction has no */ /* dependees but the reg is reused then this chain is used instead */ for (j=count-1; j>=0; j--) { CurSchDesc=(struct SchDescFmt*)(SchDescInfoBase+(j*SIZEOFSCHDESCFMT)); stallfactor=0; if (CurSchDesc->reguserA!=NULL) { DepSchDesc=(struct SchDescFmt*)(CurSchDesc->reguserA); if (DepSchDesc->stallfactor>stallfactor) { stallfactor=DepSchDesc->stallfactor; } } if (CurSchDesc->reguserB!=NULL) { DepSchDesc=(struct SchDescFmt*)(CurSchDesc->reguserB); if (DepSchDesc->stallfactor>stallfactor) { stallfactor=DepSchDesc->stallfactor; } } if (CurSchDesc->regcollision!=NULL) { DepSchDesc=(struct SchDescFmt*)(CurSchDesc->regcollision); if (DepSchDesc->stallfactor>stallfactor) { stallfactor=DepSchDesc->stallfactor; } } if ((CurSchDesc->props&SDDEPENDEDON)!=0) { stallfactor++; } if (stallfactor==0 && (CurSchDesc->props&SDSIMPLEINSTR)!=0 && CurSchDesc->reguserA==NULL && CurSchDesc->reguserB==NULL && CurSchDesc->regcollision==NULL) { stallfactor=NOSTALL; } CurSchDesc->stallfactor=stallfactor+CurSchDesc->latency; } } return count; } /*SetupSchDescriptors*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void ClaimSchDescriptors(int procfrag,int singlefrag) { /********************************************************************************/ /** Claim sufficient space for the schedule descriptor information area. **/ /** Procfrag is a pointer to the first fragment which is to be scheduled. If **/ /** singlefrag is non-zero then only a single fragment is to be scheduled. **/ /********************************************************************************/ struct fragfmt *frag; struct instrfmt *curinstr; int count,maxcount,nextinstr; /* find the maximum amount of space required for a single fragment */ maxcount=0; while (procfrag!=0) { frag=(struct fragfmt*)(procfrag); count=0; if (((frag->props&((USERASSEMFRAG|NOOPTIMFRAG)|DSWITCHJUMPFRAG))==0)&& (BMachineInstr(frag->firstinstr,NEXTCHAIN)!=NULL)) { /* count the number of real machine instructions in the fragment */ nextinstr=(int)BMachineInstr(frag->firstinstr,NEXTCHAIN); while (nextinstr!=0) { count++; curinstr=(struct instrfmt*)(nextinstr); nextinstr=(int)BMachineInstr(curinstr->nextinstr,NEXTCHAIN); } } if (count>maxcount) maxcount=count; if (singlefrag!=0) break ; procfrag=(int)frag->nextfrag; } /* grab enough space to hold a descriptor for each real machine instr */ if (maxcount>0) { SchDescInfoBase=BMalloc(maxcount*SIZEOFSCHDESCFMT,1,MASCHDESCRIPTORS); if (SchDescInfoBase==0) { BError("No space for basic block scheduler descriptors"); } } else { SchDescInfoBase=0; } } /*ClaimSchDescriptors*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void FreeSchDescriptors() { /********************************************************************************/ /** Frees the memory occupied by the schedule descriptor info area. **/ /********************************************************************************/ if (SchDescInfoBase!=0) BFree(SchDescInfoBase); } /*FreeSchDescriptors*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static int GetCandidateGroup(struct fragfmt *frag,int StartDesc) { /********************************************************************************/ /** Selects a group of instructions which may be issued on the next clock **/ /** cycle. A stall factor is calculated for each candidate. This is a **/ /** heuristic metric of the pipeline stall expected if the instruction is **/ /** issued in its current position in the instruction stream. The larger the **/ /** stall factor, the greater the likelihood that the instruction will cause a **/ /** stall. Thus we issue instructions with the largest stall factors first **/ /** with a view to minimising pipeline stalls. **/ /** If the given descriptor corresponds to a jump of any sort then only that **/ /** instruction, along with any associated delay slot instruction, are **/ /** returned as candidates. **/ /********************************************************************************/ struct SchDescFmt *LastSchDesc,*CurDesc; struct WBSfmt *wbs; struct CPfmt *cp; int busyu,gregbusy,fregbusy,cregbusy,dregbusy, gsources,fsources,csources,dsources, candidates,inspected,NextDesc,HasMemDep ,ForceCount,stallfact,IsCandDesc,wbsnum,MemDep,DefOverlap; /* get the current scoreboard busy units and registers */ busyu=CPinfo [CPindex].unitbusy; gregbusy=CPinfo [CPindex].gregbusy; fregbusy=0; cregbusy=0; dregbusy=0; #if(ISERFREQUIRED!=0) fregbusy=CPinfo [CPindex].fregbusy; #if(ISCRFREQUIRED!=0) cregbusy=CPinfo [CPindex].cregbusy; #endif #if(ISERFREQUIRED==2) dregbusy=CPinfo [CPindex].dregbusy; #endif #endif; /* don't take any notice of exempt registers during scheduling */ gregbusy&=~GRFEXEMPT; fregbusy&=~ERFEXEMPT; cregbusy&=~CRFEXEMPT; dregbusy&=~DRFEXEMPT; /* initialise register sources and candidate list */ gsources=0; fsources=0; csources=0; dsources=0; candidates=0; inspected=0; /* if the first instruction is a jump or restricted instruction */ /* of any sort then it has to be member of the candidate set, */ /*along with any associated delay slot instruction */ CurDesc=(struct SchDescFmt*)(StartDesc); if ((CurDesc->props&SDGENERICJUMP)!=0) { if ((CurDesc->props&(SDCALLDS|SDJUMPDS))!=0) { ForceCount=2; } else { ForceCount=1; } } else if ((CurDesc->props&SDHASDS)!=0) { ForceCount=2; } else if ((CurDesc->props&SDSCHRESTRICT)!=0) { ForceCount=1; } else { ForceCount=0; }; /* go through the descriptor chain looking for suitable candidates to */ /* issue on the next clock */ NextDesc=StartDesc; while ((NextDesc!=0)&&(inspected<=MAXINSPECTIONS)) { CurDesc=(struct SchDescFmt*)(NextDesc); if (ForceCount==0) inspected++; IsCandDesc=0; /* don't move instructions around a forced instruction of any sort */ if ((ForceCount==0)&&((CurDesc->props&((SDGENERICJUMP|SDSCHRESTRICT)|SDHASDS))!=0)) { break ; } /* check to see if the candidate has any memory dependencies */ HasMemDep=0; if (((CurDesc->props&SDHASMEMDEP)!=0)&&((CurDesc->props&SDMDINACTIVE)==0)) { GetMemoryDependency(frag,NextDesc,StartDesc,&MemDep,&DefOverlap); if (MemDep==0) { CurDesc->props|=SDMDINACTIVE; } else { HasMemDep=1; } } /* For an instruction to be issueable the following is required: */ /* - the registers defined by the instruction don't overwrite */ /* the current sources */ /* - the live registers defined by the instruction don't */ /* overwrite the current busy registers */ /* - the instruction sources are available now or the unit is to */ /* be executed by a reservation station */ /* - the instruction's execution unit is available now */ /* - the instruction has no active memory dependencies */ if (((gsources&CurDesc->gdef)==0)&& ((fsources&CurDesc->fdef)==0)&& ((csources&CurDesc->cdef)==0)&& ((dsources&CurDesc->ddef)==0)&& ((gregbusy&CurDesc->gdef)==0)&& ((fregbusy&CurDesc->fdef)==0)&& ((cregbusy&CurDesc->cdef)==0)&& ((dregbusy&CurDesc->ddef)==0)&& ((((gregbusy&CurDesc->gref)==0)&& ((fregbusy&CurDesc->fref)==0)&& ((cregbusy&CurDesc->cref)==0)&& ((dregbusy&CurDesc->dref)==0))||(IsExecRS [CurDesc->unit]!=0))&& ((busyu&(1<unit))==0)&& (HasMemDep==0)) { /* don't allow the issue of any instructions that will cause a */ /* writeback-slot delay for results that are more urgent */ stallfact=CurDesc->stallfactor; if ((ForceCount!=0)||((CurDesc->gdef|CurDesc->fdef|CurDesc->ddef)==0)) { IsCandDesc=1; } else { cp=&CPinfo [((CPindex+CurDesc->latency)&CPmax)]; for (wbsnum=1; wbsnum<=NUMWBS; wbsnum++) { wbs=&cp->wbsuse [wbsnum-1]; if ((wbs->wbunit==0)||(wbs->wbunit>=CurDesc->unit)||(wbs->wbstall<=stallfact)) IsCandDesc= 1; } } if (IsCandDesc!=0) { /* add the descriptor to the list of candidates */ CurDesc->nextcand=0; if (candidates!=0) { CurDesc->prevcand=(int)LastSchDesc; LastSchDesc->nextcand=NextDesc; } else { CurDesc->prevcand=0; candidates=NextDesc; } LastSchDesc=CurDesc; /* return any forced instruction and possible associated delay */ /* slot instruction if they are both eligible for issue now */ if (ForceCount!=0) { ForceCount--; if (ForceCount==0) return candidates; } } } /* if we are issueing a forced instruction of any kind then both */ /* it and any associated delay slot instruction must be */ /* available for issue on this clock cycle */ if ((IsCandDesc==0)&&(ForceCount!=0)) return 0; /* update running source and busy registers */ if (ForceCount==0) { gsources=(gsources|CurDesc->gref)&(~GRFEXEMPT); gregbusy=(gregbusy|CurDesc->gdef)&(~GRFEXEMPT); #if(ISERFREQUIRED!=0) fsources=(fsources|CurDesc->fref)&(~ERFEXEMPT); fregbusy=(fregbusy|CurDesc->fdef)&(~ERFEXEMPT); #if(ISCRFREQUIRED!=0) csources=(csources|CurDesc->cref)&(~CRFEXEMPT); cregbusy=(cregbusy|CurDesc->cdef)&(~CRFEXEMPT); #endif; #if(ISERFREQUIRED==2) dsources=(dsources|CurDesc->dref)&(~DRFEXEMPT); dregbusy=(dregbusy|CurDesc->ddef)&(~DRFEXEMPT); #endif #endif; } NextDesc=CurDesc->next; } /* return the list of candidates for issue on the next clock */ return candidates; } /*GetCandidateGroup*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static int SelectInstructionGroup(struct fragfmt *frag,int StartDesc) { /********************************************************************************/ /** We select one or two instructions that may be issued on the same clock. **/ /** Obviously we aim to pick a pair of instructions to gain maximum throughput **/ /** from the pipeline. We favour instructions with large individual stall **/ /** factors. **/ /********************************************************************************/ struct SchDescFmt *CurDesc,*FirstDesc,*SecondDesc; int candidates,MaxStallDesc,First,Second,MaxStall,FirstUnit,SwapTemp,NextDesc,Clash1,Clash2,SecondUnit; /* obtain a list of all the instructions which may be issued on the */ /* next clock */ candidates=GetCandidateGroup(frag,StartDesc); /* print debugging information */ if ((TraceEnab!=0)&&(VerSchReport!=0)) { printf("\nCANDIDATE GROUP:\n"); PrintCandidateList(candidates); printf("\n"); } if (candidates!=0) { /* Try every combination of instruction pairs from the instruction */ /* list to try and find two instructions that may be issued together. */ /* We favour pairs with high stall factors. */ MaxStallDesc=0; while (candidates!=0) { /* Find the instruction with the highest stall factor from the */ /* candidate list. This will be used as a provisional first */ /* member of an instruction pair. */ CurDesc=(struct SchDescFmt*)(candidates); if ((CurDesc->props&SDGENERICJUMP)!=0) { First=candidates; } else { MaxStall=NOSTALL-1; NextDesc=candidates; while (NextDesc!=0) { CurDesc=(struct SchDescFmt*)(NextDesc); if (CurDesc->stallfactor>MaxStall) { MaxStall=CurDesc->stallfactor; First=NextDesc; } NextDesc=CurDesc->nextcand; } } /* Remember the instruction with the largest stall factor from */ /* the original candidate list. We issue this if we are unable */ /* to find a suitable pair. */ if (MaxStallDesc==0) MaxStallDesc=First; /* remove the first instruction from the candidate list */ FirstDesc=(struct SchDescFmt*)(First); if (FirstDesc->prevcand!=0) { CurDesc=(struct SchDescFmt*)(FirstDesc->prevcand); CurDesc->nextcand=FirstDesc->nextcand; } else { candidates=FirstDesc->nextcand; } if (FirstDesc->nextcand!=0) { CurDesc=(struct SchDescFmt*)(FirstDesc->nextcand); CurDesc->prevcand=FirstDesc->prevcand; } /* on a superscalar machine we can issue more than one */ /* instruction per clock */ /* However on Pentium due to interlocking of U&Vpipes */ /* both instructions must have a blockage of 0 for */ /* dual issue even tho they go to different uints */ if (((ISSUPERSCALAR!=0)||((FirstDesc->props&SDGENERICJUMP)!=0))&&((Target!=PENTIUM)||(FirstDesc->blockage ==0))) { /* Get the execution unit used by the first instruction. If the */ /* execution unit can accept more than one instruction per clock */ /* then there is no constraint on which unit may be used by the */ /* second instruction of the pair. */ if (((FirstDesc->props&SDGENERICJUMP)!=0)||((NumUnits [FirstDesc->unit]>1)&&(FirstDesc->blockage ==0))) { FirstUnit=-1; } else { FirstUnit=FirstDesc->unit; } /* On PENTIUM the first issued will block the U pipe */ /* so the second MUST be able to go down the V pipe */ #if(Target==PENTIUM) FirstUnit=U1; #endif; /* Find a suitable partner for the first instruction from the */ /* remainder of the candidate list. We choose an instruction */ /* with as high a stall factor as possible */ Second=0; MaxStall=NOSTALL-1; NextDesc=candidates; while (NextDesc!=0) { CurDesc=(struct SchDescFmt*)(NextDesc); if ((CurDesc->unit!=FirstUnit)&&(CurDesc->stallfactor>MaxStall)&&((Target!=PENTIUM)||(CurDesc->blockage ==0))) { MaxStall=CurDesc->stallfactor; Second=NextDesc; } NextDesc=CurDesc->nextcand; } if (Second!=0) { /* We can issue a pair of instructions. We may also swap the */ /* order of the pair in order to reduce the chances of a */ /* stall if the instruction pairing turns out to be different */ /* from that simulated. We try to make all adjacent */ /* instructions issueable simultaneously. */ SecondDesc=(struct SchDescFmt*)(Second); if ((NumUnits [SecondDesc->unit]>1)&&(SecondDesc->blockage==0)) { SecondUnit=-1; } else { SecondUnit=SecondDesc->unit; } CurDesc=(struct SchDescFmt*)(StartDesc); if (((FirstDesc->props&SDGENERICJUMP)==0)&&(CurDesc->prev!=0)) { CurDesc=(struct SchDescFmt*)(CurDesc->prev); /* see if first member of pair is dependent on the */ /* results of the previously issued instruction */ if (((CurDesc->gdef&(FirstDesc->gref|FirstDesc->gdef))!=0)||((CurDesc->fdef& (FirstDesc->fref|FirstDesc ->fdef))!=0)||((CurDesc->cdef&(FirstDesc->cref|FirstDesc->cdef))!=0)) { Clash1=1; } else { Clash1=0; } /* see if second member of pair is dependent on the */ /* results of the previously issued instruction */ if (((CurDesc->gdef&(SecondDesc->gref|SecondDesc->gdef))!=0)||((CurDesc->fdef& (SecondDesc->fref|SecondDesc ->fdef))!=0)||((CurDesc->cdef&(SecondDesc->cref|SecondDesc->cdef))!=0)) { Clash2=1; } else { Clash2=0; } /* swap the instruction pair if that will lessen the */ /* chances of a stall if the pairing of the simulated */ /* execution turns out to be different from that in */ /* a real execution */ /* On PENTIUM can only swop if the first instr can */ /* go dowm the V pipe. It also desirable that one byte*/ /* instructions are issued as the first of the pair */ /* since this gives dual issue when out of I-cache */ #if(Target==PENTIUM) if ((FirstDesc->unit==U0)&&(FirstDesc->instr->csize>1)) { if ((SecondDesc->instr->csize==1)||((FirstUnit==CurDesc->unit)&&(Clash2==0))||((SecondUnit!=CurDesc ->unit)&&(Clash1!=0))) { SwapTemp=First; First=Second; Second=SwapTemp; } } /*Target # PENTIUM */ #else if (((FirstUnit==CurDesc->unit)&&(Clash2==0))||((SecondUnit!=CurDesc->unit)&&(Clash1!=0))) { SwapTemp=First; First=Second; Second=SwapTemp; } #endif; } FirstDesc=(struct SchDescFmt*)(First); FirstDesc->instr->issueprops=FirstDesc->instr->issueprops|TPIGSTART; SecondDesc=(struct SchDescFmt*)(Second); FirstDesc->prevcand=0; FirstDesc->nextcand=Second; SecondDesc->prevcand=First; SecondDesc->nextcand=0; return First; } } } /* there is no suitable instruction pairing in the instruction group */ /* so we just issue the instruction with the highest stall factor */ /* from the original candidate group */ FirstDesc=(struct SchDescFmt*)(MaxStallDesc); #if(ISSUPERSCALAR!=0) FirstDesc->instr->issueprops=FirstDesc->instr->issueprops|TPIGSTART; #endif; FirstDesc->prevcand=0; FirstDesc->nextcand=0; return MaxStallDesc; } else { /* no instructions may be issued on the current cycle */ return 0; } } /*SelectInstructionGroup*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void MoveDescBefore(int fragaddr,int which,int where) { /********************************************************************************/ /** Which and where are descriptors in the fragment fragaddr. This routine **/ /** alters the linkage of the scheduling descriptors and their corresponding **/ /** instructions so that 'which' becomes the previous descriptor to 'where'. **/ /********************************************************************************/ struct fragfmt *frag; struct SchDescFmt *work,*whichdesc,*wheredesc; frag=(struct fragfmt *)fragaddr; /* get the which and where scheduling descriptors */ whichdesc=(struct SchDescFmt*)(which); wheredesc=(struct SchDescFmt*)(where); /* first unhook 'which' from its current position */ if (whichdesc->prev!=0) { work=(struct SchDescFmt*)(whichdesc->prev); work->next=whichdesc->next; } if (whichdesc->next!=0) { work=(struct SchDescFmt*)(whichdesc->next); work->prev=whichdesc->prev; } /* insert 'which' before 'where' in the descriptor chain */ whichdesc->prev=wheredesc->prev; if (wheredesc->prev!=0) { work=(struct SchDescFmt*)(wheredesc->prev); work->next=which; } wheredesc->prev=which; whichdesc->next=where; /* make an equivalent reordering change to the actual instructions */ /* corresponding to 'which' and 'where' */ BMoveBefore(frag,frag,whichdesc->instr,wheredesc->instr); } /*MoveDescBefore*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void MoveDescAfter(int fragaddr,int which,int where) { /********************************************************************************/ /** Which and where are descriptors in the fragment fragaddr. This routine **/ /** alters the linkage of the scheduling descriptors and their corresponding **/ /** instructions so that 'which' becomes the next descriptor after 'where'. **/ /********************************************************************************/ struct fragfmt *frag; struct SchDescFmt *work,*whichdesc,*wheredesc; frag=(struct fragfmt *)fragaddr; /* get the which and where scheduling descriptors */ whichdesc=(struct SchDescFmt*)(which); wheredesc=(struct SchDescFmt*)(where); /* first unhook 'which' from its current position */ if (whichdesc->prev!=0) { work=(struct SchDescFmt*)(whichdesc->prev); work->next=whichdesc->next; } if (whichdesc->next!=0) { work=(struct SchDescFmt*)(whichdesc->next); work->prev=whichdesc->prev; } /* insert 'which' after 'where' in the descriptor chain */ whichdesc->next=wheredesc->next; if (wheredesc->next!=0) { work=(struct SchDescFmt*)(wheredesc->next); work->prev=which; } wheredesc->next=which; whichdesc->prev=where; /* make an equivalent reordering change to the actual instructions */ /* corresponding to 'which' and 'where' */ BMoveAfter(frag,frag,whichdesc->instr,wheredesc->instr); } /*MoveDescAfter*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void DoReschedFrag(int fragaddr) { /********************************************************************************/ /** Schedule the given fragment in an attempt to improve the utilisation of **/ /** the processor's resources and reduce pipeline stalls and data hazards. **/ /********************************************************************************/ struct SchDescFmt *CurSchDesc,*FirstIssue,*NewSchDesc; struct fragfmt *frag; struct instrfmt *issueinstr; static int LastIssueIndex; struct CPfmt *cp; int CurSch,IssueGroup,MoveAfter,change,DescriptCount,AccessPenalty, oldunitbusy,oldgregbusy,oldfregbusy,oldcregbusy; #if(ISERFREQUIRED==2) int olddregbusy; #endif /* generate a list of descriptors for all the real machine */ /* instructions in the fragment */ frag=(struct fragfmt*)(fragaddr); DescriptCount=SetupSchDescriptors(frag); if (DescriptCount!=0) { /* clear the pipeline status */ InitCPinfo(); LastIssueIndex=0; /* print debugging information */ if ((TraceEnab!=0)&&(SchReport!=0)) { printf("\n%s%s\nFRAGMENT DESCRIPTOR CHAIN:\n",bannerline,bannerline); PrintDescriptorList(SchDescInfoBase); printf("\n"); } /* Go through the descriptor list scheduling the instructions */ /* as we go. The current descriptor is always the first one */ /* which is yet to be rescheduled. */ CurSch=SchDescInfoBase; while (CurSch!=0) { CurSchDesc=(struct SchDescFmt*)(CurSch); /* select a group of instructions that may be issued next */ IssueGroup=SelectInstructionGroup(frag,CurSch); /* print debugging information */ if ((TraceEnab!=0)&&(SchReport!=0)) { printf("\nISSUE GROUP:\n"); PrintCandidateList(IssueGroup); printf("\n"); } if (IssueGroup!=0) { FirstIssue=(struct SchDescFmt*)(IssueGroup); FirstIssue->instr->bubbles=(CPindex-LastIssueIndex)&CPmax; LastIssueIndex=CPindex; MoveAfter=0; AccessPenalty=0; while (IssueGroup!=0) { NewSchDesc=(struct SchDescFmt*)(IssueGroup); /* update the scoreboard to reflect the issue of the instr */ CreateCPEntry(NewSchDesc->blockage,NewSchDesc->latency, NewSchDesc->stallfactor,NewSchDesc->unit, NewSchDesc->gdef,NewSchDesc->fdef, NewSchDesc->ddef,NewSchDesc->cdef); /* see if there is any access penalty for load or store */ /* instructions on a non-Harvard architecture */ issueinstr=NewSchDesc->instr; if (((issueinstr->props&STOREINSTR)!=0)&&(STOREACCESSPENALTY!=0)) { AccessPenalty+=STOREACCESSPENALTY; if (issueinstr->datasize>4) { AccessPenalty++; } } else if (((issueinstr->props&LOADINSTR)!=0)&&(LOADACCESSPENALTY!=0)) { AccessPenalty+=LOADACCESSPENALTY; if (issueinstr->datasize>4) { AccessPenalty++; } }; /* rechain the descriptor list to reflect the new order */ if (IssueGroup==CurSch) { MoveAfter=1; } else if (MoveAfter==0) { if (CurSchDesc->prev!=IssueGroup) { MoveDescBefore(fragaddr,IssueGroup,CurSch); } } else { if (CurSchDesc->next!=IssueGroup) { MoveDescAfter(fragaddr,IssueGroup,CurSch); } CurSch=IssueGroup; CurSchDesc=(struct SchDescFmt*)(CurSch); }; IssueGroup=NewSchDesc->nextcand; } /* print debugging information */ if ((TraceEnab!=0)&&(SchReport!=0)) { printf("\nCURRENT SCOREBOARD STATE:\n"); PrintScoreboard(); printf("\n"); } /* advance the simulator clock */ AdvanceCPClock(); while (AccessPenalty>0) { AdvanceCPClock(); AccessPenalty--; } if ((FirstIssue->props&SDGENERICJUMP)!=0) { /* we reset the scoreboard upon encountering a call */ if (((FirstIssue->props&SDCALLDS)!=0)||((FirstIssue->props&SDCALLNDS)!=0)) { InitCPinfo(); LastIssueIndex=0; } } /* if the current descriptor was included in the issue */ /* group the advance the pointer to the next descriptor */ if (MoveAfter!=0) CurSch=CurSchDesc->next; } else { /* if we cannot issue any instructions now then advance */ /* the simulator clock until there is some change of */ /* state in the scoreboard */ cp=&CPinfo [CPindex]; oldunitbusy=cp->unitbusy; oldgregbusy=cp->gregbusy; #if(ISERFREQUIRED!=0) oldfregbusy=cp->fregbusy; #if(ISCRFREQUIRED!=0) oldcregbusy=cp->cregbusy; #endif; #if(ISERFREQUIRED==2) olddregbusy=cp->dregbusy; #endif #endif; /* advance the simulator clock until there is some change */ /* in the state of the processor */ do { AdvanceCPClock(); change=0; /* On Pentium the pipes stay in step so both units */ /* must be free before stating another instr pair */ #if(Target==PENTIUM) if (((CPinfo [CPindex].unitbusy==0)&&(oldunitbusy!=0))||(CPinfo [CPindex].gregbusy!=oldgregbusy)) change=1; #else if ((CPinfo [CPindex].unitbusy!=oldunitbusy)||(CPinfo [CPindex].gregbusy!=oldgregbusy)) { change=1; } #endif; #if(ISERFREQUIRED!=0) if (CPinfo [CPindex].fregbusy!=oldfregbusy) { change=1; } #if(ISCRFREQUIRED!=0) if (CPinfo [CPindex].cregbusy!=oldcregbusy) { change=1; }; #endif; #if(ISERFREQUIRED==2) if (CPinfo [CPindex].dregbusy!=olddcregbusy) { change=1; }; #endif #endif; } while (!(change!=0)) ; } } } } /*DoReschedFrag*/ /***/ /**/ /* Copyright (c) 1994 Edinburgh Portable Compilers Ltd. All Rights Reserved.*/ /**/ /***/ static void SchedProc(int prelimsch) { /********************************************************************************/ /** SchedProc inspects the instructions in the current procedure and attempts **/ /** to reorder in a way which reduces pipeline stalls. Prelimsch is non-zero **/ /** if a preliminary schedule pass is being performed. All fragments in the **/ /** procedure are scheduled using a heuristic method designed to reduce **/ /** pipeline stalls and register interlocks. The algorithm performs a **/ /** clock-by-clock simulation of the expected behaviour of the processor as it **/ /** executes the code. We assume a serialized state upon entry to all **/ /** fragments. After scheduling we try and fill the delay slots of branches **/ /** and calls. **/ /********************************************************************************/ struct fragfmt *frag; struct instrfmt *instr; int fragnum,nextfrag,nexti; /* claim space to hold the schedule descriptors */ ClaimSchDescriptors((int)PI->procfrag,0); /* we now perform the actual scheduling */ fragnum=0; nextfrag=(int)PI->procfrag; while (nextfrag!=0) { frag=(struct fragfmt*)(nextfrag); SetVectors(fragnum+1); if ((BMachineInstr(frag->firstinstr,NEXTCHAIN)!=NULL)&& ((frag->props&((USERASSEMFRAG|DSWITCHJUMPFRAG)|NOOPTIMFRAG))==0)) { if ((CurInhibVect&1)==0) { if ((TraceEnab!=0)&&(SchReport!=0)) { printf("\n%s%s\nBASIC BLOCK SCHEDULER - FRAGMENT:%d", bannerline,bannerline,fragnum+1); printf(" [%8x]\n",nextfrag); } /* schedule the instructions in the fragment */ DoReschedFrag(nextfrag); /* now try to fill any unused delay slots in the fragment */ if ((prelimsch==0)&&((CurInhibVect&2)==0)) { /* perform if/then optimisations to improve code for */ /* processors with annulled delay slots */ PerformIfThenOpt(frag); PerformIfThenElseOpt(frag,0); PerformUncondBranchOpt(frag,0); PerformBranchOverOpt(frag,0); /* go through the fragment and attempt to fill any */ /* empty delay slots */ nexti=(int)BMachineInstr(frag->firstinstr,NEXTCHAIN); while (nexti!=0) { instr=(struct instrfmt*)(nexti); if (((instr->props&DELAYSLOTUNUSED)!=0)&&((instr->privprops&DSWITCHJUMPINSTR)==0)) { ARCHFillDelaySlot(frag,instr); } nexti=(int)BMachineInstr(instr->nextinstr,NEXTCHAIN); } } } } fragnum++; nextfrag=(int)frag->nextfrag; } SetVectors(0); /* free the space claimed to hold the schedule descriptors */ FreeSchDescriptors(); } /*SchedProc*/