Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(155)

Side by Side Diff: runtime/vm/flow_graph_allocator.h

Issue 10800037: New linear scan allocator. (Closed) Base URL: https://dart.googlecode.com/svn/branches/bleeding_edge/dart
Patch Set: address Kevin's comments Created 8 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | runtime/vm/flow_graph_allocator.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #ifndef VM_FLOW_GRAPH_ALLOCATOR_H_ 5 #ifndef VM_FLOW_GRAPH_ALLOCATOR_H_
6 #define VM_FLOW_GRAPH_ALLOCATOR_H_ 6 #define VM_FLOW_GRAPH_ALLOCATOR_H_
7 7
8 #include "vm/growable_array.h" 8 #include "vm/growable_array.h"
9 #include "vm/intermediate_language.h" 9 #include "vm/intermediate_language.h"
10 10
11 namespace dart { 11 namespace dart {
12 12
13 class AllocationFinger;
13 class FlowGraphBuilder; 14 class FlowGraphBuilder;
14 class LiveRange; 15 class LiveRange;
15 class UseInterval; 16 class UseInterval;
17 class UsePosition;
16 18
17 class FlowGraphAllocator : public ValueObject { 19 class FlowGraphAllocator : public ValueObject {
18 public: 20 public:
19 FlowGraphAllocator(const GrowableArray<BlockEntryInstr*>& block_order, 21 FlowGraphAllocator(const GrowableArray<BlockEntryInstr*>& block_order,
20 FlowGraphBuilder* builder); 22 FlowGraphBuilder* builder);
21 23
22 void AllocateRegisters(); 24 void AllocateRegisters();
23 25
24 // Build live-in and live-out sets for each block. 26 // Build live-in and live-out sets for each block.
25 void AnalyzeLiveness(); 27 void AnalyzeLiveness();
(...skipping 15 matching lines...) Expand all
41 43
42 // Perform fix-point iteration updating live-out and live-in sets 44 // Perform fix-point iteration updating live-out and live-in sets
43 // for blocks until they stop changing. 45 // for blocks until they stop changing.
44 void ComputeLiveInAndLiveOutSets(); 46 void ComputeLiveInAndLiveOutSets();
45 47
46 // Print results of liveness analysis. 48 // Print results of liveness analysis.
47 void DumpLiveness(); 49 void DumpLiveness();
48 50
49 // Visit blocks in the code generation order (reverse post order) and 51 // Visit blocks in the code generation order (reverse post order) and
50 // linearly assign consequent lifetime positions to every instruction. 52 // linearly assign consequent lifetime positions to every instruction.
51 // Each instruction gets two positions: 53 // We assign position as follows:
52 // 54 //
53 // 2 * n - even one corresponding to instruction's start 55 // 2 * n - even position corresponding to an implicit parallel move
56 // preceding the instruction;
54 // 57 //
55 // 2 * n + 1 - odd one corresponding to instruction's end 58 // 2 * n + 1 - odd position corresponding to instruction itself;
56 // 59 //
57 // Having two positions allows us to capture non-trivial register 60 // Having positions corresponding to parallel moves between every two
58 // constraints in use intervals: for example we can declare that 61 // instructions allows us to capture non-trivial shapes of use intervals.
59 // an input value is only used at the start of the instruction and 62 // For specific examples see comments inside ProcessOneInstruction.
60 // this might allow register allocator to allocate both this input
61 // and output (or temp) to the same register if this is the last
62 // use of the value.
63 // Additionally creates parallel moves at the joins' predecessors 63 // Additionally creates parallel moves at the joins' predecessors
64 // that will be used for phi resolution. 64 // that will be used for phi resolution.
65 void NumberInstructions(); 65 void NumberInstructions();
66 Instruction* InstructionAt(intptr_t pos) const;
67 bool IsBlockEntry(intptr_t pos) const;
66 68
67 LiveRange* GetLiveRange(intptr_t vreg); 69 LiveRange* GetLiveRange(intptr_t vreg);
70
71 // Visit instructions in the postorder and build live ranges for
72 // all SSA values.
68 void BuildLiveRanges(); 73 void BuildLiveRanges();
69 void PrintLiveRanges(); 74 Instruction* ConnectOutgoingPhiMoves(BlockEntryInstr* block);
75 void ProcessOneInstruction(BlockEntryInstr* block, Instruction* instr);
76 void ConnectIncomingPhiMoves(BlockEntryInstr* block);
77 void BlockLocation(Location loc, intptr_t from, intptr_t to);
70 78
71 // Register use of the given virtual register at lifetime position use_pos. 79 // Process live ranges sorted by their start and assign registers
72 // If definition position is unknown then start of the block contaning 80 // to them
73 // use_pos will be passed. 81 void AllocateCPURegisters();
74 void UseValue(Instruction* instr,
75 intptr_t def_pos, // Lifetime position for the definition.
76 intptr_t use_pos, // Lifetime position for the use.
77 intptr_t vreg,
78 Location* loc,
79 bool use_at_end);
80
81 // Register definition of the given virtual register at lifetime position
82 // def_pos. Existing use interval will be shortened to start at def_pos.
83 void Define(Instruction* instr,
84 intptr_t def_pos,
85 intptr_t vreg,
86 Location* loc);
87
88 void AddToUnallocated(UseInterval* chain);
89 void BlockLocation(Location loc, intptr_t pos);
90
91 bool AllocateFreeRegister(UseInterval* unallocated);
92 void AssignFreeRegister(UseInterval* unallocated, Register reg);
93
94 void FinalizeInterval(UseInterval* interval, Location loc);
95 void AdvanceActiveIntervals(const intptr_t start); 82 void AdvanceActiveIntervals(const intptr_t start);
96 83
84 // Connect split siblings over non-linear control flow edges.
85 void ResolveControlFlow();
86 void ConnectSplitSiblings(LiveRange* range,
87 BlockEntryInstr* source_block,
88 BlockEntryInstr* target_block);
89
90
91 // Update location slot corresponding to the use with location allocated for
92 // the use's live range.
93 void ConvertUseTo(UsePosition* use, Location loc);
94 void ConvertAllUses(LiveRange* range);
95
96 // Add live range to the list of unallocated live ranges to be processed
97 // by the allocator.
98 void AddToUnallocated(LiveRange* range);
99 #ifdef DEBUG
97 bool UnallocatedIsSorted(); 100 bool UnallocatedIsSorted();
98 void AllocateCPURegisters(); 101 #endif
102
103 // Try to find a free register for an unallocated live range.
104 bool AllocateFreeRegister(LiveRange* unallocated);
105
106 // Try to find a register that can be used by a given live range.
107 // If all registers are occupied consider evicting interference for
108 // a register that is going to be used as far from the start of
109 // the unallocated live range as possible.
110 void AllocateAnyRegister(LiveRange* unallocated);
111
112 // Assign selected non-free register to an unallocated live range and
113 // evict any interference that can be evicted by splitting and spilling
114 // parts of interfering live ranges. Place non-spilled parts into
115 // the list of unallocated ranges.
116 void AssignNonFreeRegister(LiveRange* unallocated, Register reg);
117 bool EvictIntersection(LiveRange* allocated, LiveRange* unallocated);
118 void RemoveEvicted(Register reg, intptr_t first_evicted);
119
120 // Find first intersection between unallocated live range and
121 // live ranges currently allocated to the given register.
122 intptr_t FirstIntersectionWithAllocated(Register reg,
123 LiveRange* unallocated);
124
125 bool UpdateFreeUntil(Register reg,
126 LiveRange* unallocated,
127 intptr_t* cur_free_until,
128 intptr_t* cur_blocked_at);
129
130 // Split given live range in an optimal position between given positions.
131 LiveRange* SplitBetween(LiveRange* range, intptr_t from, intptr_t to);
132
133 // Find a spill slot that can be used by the given live range.
134 intptr_t AllocateSpillSlotFor(LiveRange* range);
135
136 // Allocate the given live range to a spill slot.
137 void Spill(LiveRange* range);
138
139 // Spill the given live range from the given position onwards.
140 void SpillAfter(LiveRange* range, intptr_t from);
141
142 // Spill the given live range from the given position until some
143 // position preceeding the to position.
144 void SpillBetween(LiveRange* range, intptr_t from, intptr_t to);
145
146 MoveOperands* AddMoveAt(intptr_t pos, Location to, Location from);
147
148 void PrintLiveRanges();
99 149
100 // TODO(vegorov): this field is used only to call Bailout. Remove when 150 // TODO(vegorov): this field is used only to call Bailout. Remove when
101 // all bailouts are gone. 151 // all bailouts are gone.
102 FlowGraphBuilder* builder_; 152 FlowGraphBuilder* builder_;
103 153
104 const GrowableArray<BlockEntryInstr*>& block_order_; 154 const GrowableArray<BlockEntryInstr*>& block_order_;
105 const GrowableArray<BlockEntryInstr*>& postorder_; 155 const GrowableArray<BlockEntryInstr*>& postorder_;
106 156
157 GrowableArray<Instruction*> instructions_;
158
107 // Live-out sets for each block. They contain indices of SSA values 159 // Live-out sets for each block. They contain indices of SSA values
108 // that are live out from this block: that is values that were either 160 // that are live out from this block: that is values that were either
109 // defined in this block or live into it and that are used in some 161 // defined in this block or live into it and that are used in some
110 // successor block. 162 // successor block.
111 GrowableArray<BitVector*> live_out_; 163 GrowableArray<BitVector*> live_out_;
112 164
113 // Kill sets for each block. They contain indices of SSA values that 165 // Kill sets for each block. They contain indices of SSA values that
114 // are defined by this block. 166 // are defined by this block.
115 GrowableArray<BitVector*> kill_; 167 GrowableArray<BitVector*> kill_;
116 168
117 // Live-in sets for each block. They contain indices of SSA values 169 // Live-in sets for each block. They contain indices of SSA values
118 // that are used by this block or its successors. 170 // that are used by this block or its successors.
119 GrowableArray<BitVector*> live_in_; 171 GrowableArray<BitVector*> live_in_;
120 172
121 // Number of virtual registers. Currently equal to the number of 173 // Number of virtual registers. Currently equal to the number of
122 // SSA values. 174 // SSA values.
123 const intptr_t vreg_count_; 175 const intptr_t vreg_count_;
124 176
125 // LiveRanges corresponding to SSA values. 177 // LiveRanges corresponding to SSA values.
126 GrowableArray<LiveRange*> live_ranges_; 178 GrowableArray<LiveRange*> live_ranges_;
127 179
128 // Worklist for register allocator. Always maintained sorted according 180 // Worklist for register allocator. Always maintained sorted according
129 // to ShouldBeAllocatedBefore predicate. 181 // to ShouldBeAllocatedBefore predicate.
130 GrowableArray<UseInterval*> unallocated_; 182 GrowableArray<LiveRange*> unallocated_;
131 183
132 // Per register lists of allocated UseIntervals, linked through 184 // Per register lists of allocated live ranges. Contain only those
133 // next_allocated field. Contains only those intervals that 185 // ranges that can be affected by future allocation decisions.
134 // can be affected by future allocation decisions. Those intervals 186 // Those live ranges that end before the start of the current live range are
135 // that end before the start of the current UseInterval are removed 187 // removed from the list and will not be affected.
136 // from this list and will not be affected. 188 GrowableArray<LiveRange*> cpu_regs_[kNumberOfCpuRegisters];
137 UseInterval* cpu_regs_[kNumberOfCpuRegisters]; 189
190 // List of used spill slots. Contain positions after which spill slots
191 // become free and can be reused for allocation.
192 GrowableArray<intptr_t> spill_slots_;
193
194 bool blocked_cpu_regs_[kNumberOfCpuRegisters];
138 195
139 DISALLOW_COPY_AND_ASSIGN(FlowGraphAllocator); 196 DISALLOW_COPY_AND_ASSIGN(FlowGraphAllocator);
140 }; 197 };
141 198
142 199
143 // UsePosition represents a single use of an SSA value by some instruction. 200 // UsePosition represents a single use of an SSA value by some instruction.
144 // It points to a location slot which either tells register allocator 201 // It points to a location slot which either tells register allocator
145 // where instruction expects the value (if slot contains a fixed location) or 202 // where instruction expects the value (if slot contains a fixed location) or
146 // asks register allocator to allocate storage (register or spill slot) for 203 // asks register allocator to allocate storage (register or spill slot) for
147 // this use with certain properties (if slot contain an unallocated location). 204 // this use with certain properties (if slot contain an unallocated location).
148 class UsePosition : public ZoneAllocated { 205 class UsePosition : public ZoneAllocated {
149 public: 206 public:
150 enum UseFlag { 207 UsePosition(intptr_t pos,
151 kNoFlag = 0,
152 kFixedUse = 1,
153 kSameAsFirstUse = 2,
154 kOther = 3
155 };
156
157 static const intptr_t kUseFlagMask = 0x3;
158 static const intptr_t kPositionShift = 2;
159
160 static UseFlag FlagForUse(const Location& loc) {
161 if (loc.IsRegister()) return kFixedUse;
162 if (loc.IsUnallocated() && (loc.policy() == Location::kSameAsFirstInput)) {
163 return kSameAsFirstUse;
164 }
165 return kOther;
166 }
167
168 // TODO(vegorov): we encode either position or instruction pointer
169 // into the pos_ field to generate moves when needed to resolve
170 // fixed or same-as-first constraints, but this looks ugly.
171 UsePosition(Instruction* instr,
172 intptr_t pos,
173 UsePosition* next, 208 UsePosition* next,
174 Location* location_slot) 209 Location* location_slot)
175 : pos_(pos << kPositionShift), 210 : pos_(pos),
176 location_slot_(location_slot), 211 location_slot_(location_slot),
177 next_(next) { 212 next_(next) {
178 // Non-NULL instr is considered unlikely so we preinitialize pos_ field
179 // with an encoded position even if instr is not NULL.
180 if (instr != NULL) {
181 ASSERT(location_slot_ != NULL);
182 pos_ = reinterpret_cast<intptr_t>(instr) | FlagForUse(*location_slot_);
183 }
184 ASSERT(this->pos() == pos);
185 } 213 }
186 214
187 // Tell the use that it should load the value from the given location.
188 // If location slot for the use is flexible (unallocated) it will be updated
189 // with the given location. Otherwise a move will be scheduled from the given
190 // location to the location already stored in the slot.
191 void AssignLocation(Location loc);
192
193 Location* location_slot() const { return location_slot_; } 215 Location* location_slot() const { return location_slot_; }
194 void set_location_slot(Location* location_slot) { 216 void set_location_slot(Location* location_slot) {
195 location_slot_ = location_slot; 217 location_slot_ = location_slot;
196 } 218 }
197 219
198 void set_next(UsePosition* next) { next_ = next; } 220 void set_next(UsePosition* next) { next_ = next; }
199 UsePosition* next() const { return next_; } 221 UsePosition* next() const { return next_; }
200 222
201 intptr_t pos() const { 223 intptr_t pos() const { return pos_; }
202 if ((pos_ & kUseFlagMask) != kNoFlag) {
203 return instr()->lifetime_position();
204 }
205 return pos_ >> kPositionShift;
206 }
207
208 Instruction* instr() const {
209 ASSERT((pos_ & kUseFlagMask) != kNoFlag);
210 return reinterpret_cast<Instruction*>(pos_ & ~kUseFlagMask);
211 }
212 224
213 bool HasHint() const { 225 bool HasHint() const {
214 return (pos_ & kUseFlagMask) == kFixedUse; 226 return (location_slot() != NULL) && (location_slot()->IsRegister());
215 } 227 }
216 228
217 Location hint() const { 229 Location hint() const {
218 ASSERT(HasHint()); 230 ASSERT(HasHint());
219 ASSERT(location_slot()->IsRegister()); 231 return *location_slot();
220 return *location_slot_;
221 } 232 }
222 233
223 private: 234 private:
224 intptr_t pos_; 235 const intptr_t pos_;
225 Location* location_slot_; 236 Location* location_slot_;
226 UsePosition* next_; 237 UsePosition* next_;
238
239 DISALLOW_COPY_AND_ASSIGN(UsePosition);
227 }; 240 };
228 241
229 242
230 // UseInterval represents a holeless half open interval of liveness for a given 243 // UseInterval represents a holeless half open interval of liveness for a given
231 // SSA value: [start, end) in terms of lifetime positions that 244 // SSA value: [start, end) in terms of lifetime positions that
232 // NumberInstructions assigns to instructions. Register allocator has to keep 245 // NumberInstructions assigns to instructions. Register allocator has to keep
233 // a value live in the register or in a spill slot from start position and until 246 // a value live in the register or in a spill slot from start position and until
234 // the end position. The interval can cover zero or more uses. 247 // the end position. The interval can cover zero or more uses.
235 // During the register allocation UseIntervals from different live ranges
236 // allocated to the same register will be chained together through
237 // next_allocated_ field.
238 // Note: currently all uses of the same SSA value are linked together into a 248 // Note: currently all uses of the same SSA value are linked together into a
239 // single list (and not split between UseIntervals). 249 // single list (and not split between UseIntervals).
240 class UseInterval : public ZoneAllocated { 250 class UseInterval : public ZoneAllocated {
241 public: 251 public:
242 UseInterval(intptr_t vreg, intptr_t start, intptr_t end, UseInterval* next) 252 UseInterval(intptr_t start, intptr_t end, UseInterval* next)
243 : vreg_(vreg), 253 : start_(start),
244 start_(start),
245 end_(end), 254 end_(end),
246 uses_((next == NULL) ? NULL : next->uses_), 255 next_(next) { }
247 next_(next),
248 next_allocated_(next) { }
249 256
250
251 void AddUse(Instruction* instr, intptr_t pos, Location* loc);
252 void Print(); 257 void Print();
253 258
254 intptr_t vreg() const { return vreg_; }
255 intptr_t start() const { return start_; } 259 intptr_t start() const { return start_; }
256 intptr_t end() const { return end_; } 260 intptr_t end() const { return end_; }
257 UsePosition* first_use() const { return uses_; }
258 UseInterval* next() const { return next_; } 261 UseInterval* next() const { return next_; }
259 262
260 bool Contains(intptr_t pos) const { 263 bool Contains(intptr_t pos) const {
261 return (start() <= pos) && (pos < end()); 264 return (start() <= pos) && (pos < end());
262 } 265 }
263 266
264 // Return the smallest position that is covered by both UseIntervals or 267 // Return the smallest position that is covered by both UseIntervals or
265 // kIllegalPosition if intervals do not intersect. 268 // kIllegalPosition if intervals do not intersect.
266 intptr_t Intersect(UseInterval* other); 269 intptr_t Intersect(UseInterval* other);
267 270
268 UseInterval* Split(intptr_t pos);
269
270 void set_next_allocated(UseInterval* next_allocated) {
271 next_allocated_ = next_allocated;
272 }
273 UseInterval* next_allocated() const { return next_allocated_; }
274
275 private: 271 private:
276 friend class LiveRange; 272 friend class LiveRange;
277 const intptr_t vreg_;
278 273
279 intptr_t start_; 274 intptr_t start_;
280 intptr_t end_; 275 intptr_t end_;
276 UseInterval* next_;
281 277
282 UsePosition* uses_; 278 DISALLOW_COPY_AND_ASSIGN(UseInterval);
279 };
283 280
284 UseInterval* next_; 281
285 UseInterval* next_allocated_; 282 // AllocationFinger is used to keep track of currently active position
283 // for the register allocator and cache lookup results.
284 class AllocationFinger : public ValueObject {
285 public:
286 AllocationFinger()
287 : first_pending_use_interval_(NULL),
288 first_register_use_(NULL),
289 first_register_beneficial_use_(NULL),
290 first_hinted_use_(NULL) {
291 }
292
293 void Initialize(LiveRange* range);
294 bool Advance(intptr_t start);
295
296 UseInterval* first_pending_use_interval() const {
297 return first_pending_use_interval_;
298 }
299
300 Location FirstHint();
301 UsePosition* FirstRegisterUse(intptr_t after_pos);
302 UsePosition* FirstRegisterBeneficialUse(intptr_t after_pos);
303
304 private:
305 UseInterval* first_pending_use_interval_;
306 UsePosition* first_register_use_;
307 UsePosition* first_register_beneficial_use_;
308 UsePosition* first_hinted_use_;
309
310 DISALLOW_COPY_AND_ASSIGN(AllocationFinger);
286 }; 311 };
287 312
288 313
289 // LiveRange represents a sequence of UseIntervals for a given SSA value. 314 // LiveRange represents a sequence of UseIntervals for a given SSA value.
290 // TODO(vegorov): this class is actually redundant currently.
291 class LiveRange : public ZoneAllocated { 315 class LiveRange : public ZoneAllocated {
292 public: 316 public:
293 explicit LiveRange(intptr_t vreg) : vreg_(vreg), head_(NULL) { } 317 explicit LiveRange(intptr_t vreg)
318 : vreg_(vreg),
319 uses_(NULL),
320 first_use_interval_(NULL),
321 last_use_interval_(NULL),
322 next_sibling_(NULL),
323 finger_() {
324 }
294 325
295 void DefineAt(Instruction* instr, intptr_t pos, Location* loc); 326 static LiveRange* MakeTemp(intptr_t pos, Location* location_slot);
296 327
297 void UseAt(Instruction* instr, 328 intptr_t vreg() const { return vreg_; }
298 intptr_t def_pos, 329 LiveRange* next_sibling() const { return next_sibling_; }
299 intptr_t use_pos, 330 UsePosition* first_use() const { return uses_; }
300 bool use_at_end, 331 void set_first_use(UsePosition* use) { uses_ = use; }
301 Location* loc); 332 UseInterval* first_use_interval() const { return first_use_interval_; }
333 UseInterval* last_use_interval() const { return last_use_interval_; }
334 Location assigned_location() const { return assigned_location_; }
335 intptr_t Start() const { return first_use_interval()->start(); }
336 intptr_t End() const { return last_use_interval()->end(); }
302 337
338 AllocationFinger* finger() { return &finger_; }
339
340 void set_assigned_location(Location location) {
341 assigned_location_ = location;
342 }
343
344 void DefineAt(intptr_t pos);
345
346 void AddUse(intptr_t pos, Location* location_slot);
303 void AddUseInterval(intptr_t start, intptr_t end); 347 void AddUseInterval(intptr_t start, intptr_t end);
304 348
305 void Print(); 349 void Print();
306 350
307 UseInterval* head() const { return head_; } 351 void AssignLocation(UseInterval* use, Location loc);
352
353 LiveRange* SplitAt(intptr_t pos);
354
355 bool CanCover(intptr_t pos) const {
356 return (Start() <= pos) && (pos < End());
357 }
308 358
309 private: 359 private:
360 LiveRange(intptr_t vreg,
361 UsePosition* uses,
362 UseInterval* first_use_interval,
363 UseInterval* last_use_interval,
364 LiveRange* next_sibling)
365 : vreg_(vreg),
366 uses_(uses),
367 first_use_interval_(first_use_interval),
368 last_use_interval_(last_use_interval),
369 next_sibling_(next_sibling),
370 finger_() {
371 }
372
310 const intptr_t vreg_; 373 const intptr_t vreg_;
311 UseInterval* head_; 374 Location assigned_location_;
375
376 UsePosition* uses_;
377 UseInterval* first_use_interval_;
378 UseInterval* last_use_interval_;
379
380 LiveRange* next_sibling_;
381
382 AllocationFinger finger_;
383
384 DISALLOW_COPY_AND_ASSIGN(LiveRange);
312 }; 385 };
313 386
314 387
315 } // namespace dart 388 } // namespace dart
316 389
317 #endif // VM_FLOW_GRAPH_ALLOCATOR_H_ 390 #endif // VM_FLOW_GRAPH_ALLOCATOR_H_
OLDNEW
« no previous file with comments | « no previous file | runtime/vm/flow_graph_allocator.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698