@@ -89,9 +89,6 @@ should go to.
8989
9090use build:: { BlockAnd , BlockAndExtension , Builder , CFG } ;
9191use rustc:: middle:: region:: CodeExtent ;
92- use rustc:: middle:: lang_items;
93- use rustc:: middle:: const_val:: ConstVal ;
94- use rustc:: ty:: subst:: { Kind , Subst } ;
9592use rustc:: ty:: { Ty , TyCtxt } ;
9693use rustc:: mir:: * ;
9794use rustc:: mir:: transform:: MirSource ;
@@ -127,21 +124,6 @@ pub struct Scope<'tcx> {
127124 /// end of the vector (top of the stack) first.
128125 drops : Vec < DropData < ' tcx > > ,
129126
130- /// A scope may only have one associated free, because:
131- ///
132- /// 1. We require a `free` to only be scheduled in the scope of
133- /// `EXPR` in `box EXPR`;
134- /// 2. It only makes sense to have it translated into the diverge-path.
135- ///
136- /// This kind of drop will be run *after* all the regular drops
137- /// scheduled onto this scope, because drops may have dependencies
138- /// on the allocated memory.
139- ///
140- /// This is expected to go away once `box EXPR` becomes a sugar
141- /// for placement protocol and gets desugared in some earlier
142- /// stage.
143- free : Option < FreeData < ' tcx > > ,
144-
145127 /// The cache for drop chain on “normal” exit into a particular BasicBlock.
146128 cached_exits : FxHashMap < ( BasicBlock , CodeExtent ) , BasicBlock > ,
147129}
@@ -170,22 +152,6 @@ enum DropKind {
170152 Storage
171153}
172154
173- #[ derive( Debug ) ]
174- struct FreeData < ' tcx > {
175- /// span where free obligation was incurred
176- span : Span ,
177-
178- /// Lvalue containing the allocated box.
179- value : Lvalue < ' tcx > ,
180-
181- /// type of item for which the box was allocated for (i.e. the T in Box<T>).
182- item_ty : Ty < ' tcx > ,
183-
184- /// The cached block containing code to run the free. The block will also execute all the drops
185- /// in the scope.
186- cached_block : Option < BasicBlock >
187- }
188-
189155#[ derive( Clone , Debug ) ]
190156pub struct BreakableScope < ' tcx > {
191157 /// Extent of the loop
@@ -224,9 +190,6 @@ impl<'tcx> Scope<'tcx> {
224190 * cached_block = None ;
225191 }
226192 }
227- if let Some ( ref mut freedata) = self . free {
228- freedata. cached_block = None ;
229- }
230193 }
231194
232195 /// Returns the cached entrypoint for diverging exit from this scope.
@@ -242,8 +205,6 @@ impl<'tcx> Scope<'tcx> {
242205 } ) ;
243206 if let Some ( cached_block) = drops. next ( ) {
244207 Some ( cached_block. expect ( "drop cache is not filled" ) )
245- } else if let Some ( ref data) = self . free {
246- Some ( data. cached_block . expect ( "free cache is not filled" ) )
247208 } else {
248209 None
249210 }
@@ -333,7 +294,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
333294 extent_span : extent. 1 . span ,
334295 needs_cleanup : false ,
335296 drops : vec ! [ ] ,
336- free : None ,
337297 cached_exits : FxHashMap ( )
338298 } ) ;
339299 }
@@ -382,7 +342,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
382342 } ) ;
383343 let len = self . scopes . len ( ) ;
384344 assert ! ( scope_count < len, "should not use `exit_scope` to pop ALL scopes" ) ;
385- let tmp = self . get_unit_temp ( ) ;
386345
387346 // If we are emitting a `drop` statement, we need to have the cached
388347 // diverge cleanup pads ready in case that drop panics.
@@ -415,13 +374,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
415374
416375 // End all regions for scopes out of which we are breaking.
417376 self . cfg . push_end_region ( block, extent. 1 , scope. extent ) ;
418-
419- if let Some ( ref free_data) = scope. free {
420- let next = self . cfg . start_new_block ( ) ;
421- let free = build_free ( self . hir . tcx ( ) , & tmp, free_data, next) ;
422- self . cfg . terminate ( block, scope. source_info ( span) , free) ;
423- block = next;
424- }
425377 }
426378 }
427379 let scope = & self . scopes [ len - scope_count] ;
@@ -607,36 +559,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
607559 span_bug ! ( span, "extent {:?} not in scope to drop {:?}" , extent, lvalue) ;
608560 }
609561
610- /// Schedule dropping of a not-yet-fully-initialised box.
611- ///
612- /// This cleanup will only be translated into unwind branch.
613- /// The extent should be for the `EXPR` inside `box EXPR`.
614- /// There may only be one “free” scheduled in any given scope.
615- pub fn schedule_box_free ( & mut self ,
616- span : Span ,
617- extent : CodeExtent ,
618- value : & Lvalue < ' tcx > ,
619- item_ty : Ty < ' tcx > ) {
620- for scope in self . scopes . iter_mut ( ) . rev ( ) {
621- // See the comment in schedule_drop above. The primary difference is that we invalidate
622- // the unwind blocks unconditionally. That’s because the box free may be considered
623- // outer-most cleanup within the scope.
624- scope. invalidate_cache ( true ) ;
625- if scope. extent == extent {
626- assert ! ( scope. free. is_none( ) , "scope already has a scheduled free!" ) ;
627- scope. needs_cleanup = true ;
628- scope. free = Some ( FreeData {
629- span : span,
630- value : value. clone ( ) ,
631- item_ty : item_ty,
632- cached_block : None
633- } ) ;
634- return ;
635- }
636- }
637- span_bug ! ( span, "extent {:?} not in scope to free {:?}" , extent, value) ;
638- }
639-
640562 // Other
641563 // =====
642564 /// Creates a path that performs all required cleanup for unwinding.
@@ -650,7 +572,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
650572 }
651573 assert ! ( !self . scopes. is_empty( ) ) ; // or `any` above would be false
652574
653- let unit_temp = self . get_unit_temp ( ) ;
654575 let Builder { ref mut hir, ref mut cfg, ref mut scopes,
655576 ref mut cached_resume_block, .. } = * self ;
656577
@@ -679,7 +600,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
679600
680601 for scope in scopes. iter_mut ( ) {
681602 target = build_diverge_scope (
682- hir. tcx ( ) , cfg, & unit_temp , scope. extent_span , scope, target) ;
603+ hir. tcx ( ) , cfg, scope. extent_span , scope, target) ;
683604 }
684605 Some ( target)
685606 }
@@ -805,9 +726,8 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
805726 block. unit ( )
806727}
807728
808- fn build_diverge_scope < ' a , ' gcx , ' tcx > ( tcx : TyCtxt < ' a , ' gcx , ' tcx > ,
729+ fn build_diverge_scope < ' a , ' gcx , ' tcx > ( _tcx : TyCtxt < ' a , ' gcx , ' tcx > ,
809730 cfg : & mut CFG < ' tcx > ,
810- unit_temp : & Lvalue < ' tcx > ,
811731 span : Span ,
812732 scope : & mut Scope < ' tcx > ,
813733 mut target : BasicBlock )
@@ -832,19 +752,6 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
832752 scope : visibility_scope
833753 } ;
834754
835- // Next, build up any free.
836- if let Some ( ref mut free_data) = scope. free {
837- target = if let Some ( cached_block) = free_data. cached_block {
838- cached_block
839- } else {
840- let into = cfg. start_new_cleanup_block ( ) ;
841- cfg. terminate ( into, source_info ( free_data. span ) ,
842- build_free ( tcx, unit_temp, free_data, target) ) ;
843- free_data. cached_block = Some ( into) ;
844- into
845- } ;
846- }
847-
848755 // Next, build up the drops. Here we iterate the vector in
849756 // *forward* order, so that we generate drops[0] first (right to
850757 // left in diagram above).
@@ -888,24 +795,3 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
888795
889796 target
890797}
891-
892- fn build_free < ' a , ' gcx , ' tcx > ( tcx : TyCtxt < ' a , ' gcx , ' tcx > ,
893- unit_temp : & Lvalue < ' tcx > ,
894- data : & FreeData < ' tcx > ,
895- target : BasicBlock )
896- -> TerminatorKind < ' tcx > {
897- let free_func = tcx. require_lang_item ( lang_items:: BoxFreeFnLangItem ) ;
898- let substs = tcx. intern_substs ( & [ Kind :: from ( data. item_ty ) ] ) ;
899- TerminatorKind :: Call {
900- func : Operand :: Constant ( box Constant {
901- span : data. span ,
902- ty : tcx. type_of ( free_func) . subst ( tcx, substs) ,
903- literal : Literal :: Value {
904- value : ConstVal :: Function ( free_func, substs) ,
905- }
906- } ) ,
907- args : vec ! [ Operand :: Consume ( data. value. clone( ) ) ] ,
908- destination : Some ( ( unit_temp. clone ( ) , target) ) ,
909- cleanup : None
910- }
911- }
0 commit comments