KJS

collector.cpp
1 /*
2  * This file is part of the KDE libraries
3  * Copyright (C) 2003, 2004, 2005, 2006, 2007 Apple Computer, Inc.
4  * Copyright (C) 2007 Eric Seidel <[email protected]>
5  * Copyright (C) 2007 Maksim Orlovich <[email protected]>
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  *
21  */
22 
23 #include "collector.h"
24 
25 #include <wtf/FastMalloc.h>
26 #include <wtf/HashCountedSet.h>
27 #include "internal.h"
28 #include "list.h"
29 #include "value.h"
30 
31 #include <setjmp.h>
32 #include <limits.h>
33 #include <algorithm>
34 
35 #if PLATFORM(DARWIN)
36 
37 #include <pthread.h>
38 #include <mach/mach_port.h>
39 #include <mach/mach_init.h>
40 #include <mach/task.h>
41 #include <mach/thread_act.h>
42 #include <mach/vm_map.h>
43 
44 #elif PLATFORM(WIN_OS) || defined(WTF_COMPILER_CYGWIN)
45 
46 #include <windows.h>
47 #include <winnt.h>
48 
49 #elif PLATFORM(UNIX)
50 
51 #include <stdlib.h>
52 #include <sys/types.h>
53 #include <sys/mman.h>
54 #include <pthread.h> //krazy:exclude=includes (yes, it's duplicate, but in a different #if branch)
55 #include <unistd.h>
56 
57 #if PLATFORM(SOLARIS_OS)
58 #include <thread.h>
59 #include <signal.h>
60 using std::memset;
61 #endif
62 
63 #if HAVE_PTHREAD_NP_H
64 #include <pthread_np.h>
65 #endif
66 
67 #endif
68 
69 #define DEBUG_COLLECTOR 0
70 
71 #if HAVE_VALGRIND_MEMCHECK_H && !defined(NDEBUG)
72 #include <valgrind/memcheck.h>
73 #if defined(VALGRIND_MAKE_MEM_DEFINED)
74 #define VG_DEFINED(p) VALGRIND_MAKE_MEM_DEFINED(&p, sizeof(void*))
75 #else
76 #define VG_DEFINED(p)
77 #endif
78 #else
79 #define VG_DEFINED(p)
80 #endif
81 
82 using std::max;
83 
84 namespace KJS
85 {
86 
87 // tunable parameters
88 const size_t SPARE_EMPTY_BLOCKS = 2;
89 const size_t MIN_ARRAY_SIZE = 14;
90 const size_t GROWTH_FACTOR = 2;
91 const size_t LOW_WATER_FACTOR = 4;
92 const size_t ALLOCATIONS_PER_COLLECTION = 4000;
93 
94 // A whee bit like a WTF::Vector, but perfectly POD, so can be used in global context
95 // w/o worries.
96 struct BlockList {
97  CollectorBlock **m_data;
98  size_t m_used;
99  size_t m_capacity;
100 
101  CollectorBlock *operator[](size_t pos)
102  {
103  return m_data[pos];
104  }
105 
106  size_t used() const
107  {
108  return m_used;
109  }
110 
111  void append(CollectorBlock *block)
112  {
113  if (m_used == m_capacity) {
114  static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock *) / GROWTH_FACTOR;
115  if (m_capacity > maxNumBlocks) {
116  CRASH();
117  }
118  m_capacity = max(MIN_ARRAY_SIZE, m_capacity * GROWTH_FACTOR);
119  m_data = static_cast<CollectorBlock **>(fastRealloc(m_data, m_capacity * sizeof(CollectorBlock *)));
120  }
121  m_data[m_used] = block;
122  ++m_used;
123  }
124 
125  void remove(CollectorBlock *block)
126  {
127  size_t c;
128  for (c = 0; c < m_used; ++c)
129  if (m_data[c] == block) {
130  break;
131  }
132 
133  if (c == m_used) {
134  return;
135  }
136 
137  // Move things up, and shrink..
138  --m_used;
139  for (; c < m_used; ++c) {
140  m_data[c] = m_data[c + 1];
141  }
142  }
143 };
144 
145 struct CollectorHeap {
146  // This contains the list of both normal and oversize blocks
147  BlockList allBlocks;
148 
149  // Just the normal blocks
150  BlockList blocks;
151 
152  size_t firstBlockWithPossibleSpace;
153 
154  // The oversize block handling is a bit tricky, since we do not wish to slow down
155  // the usual paths. Hence, we do the following:
156  // 1) We set the marked bits for any extension portions of the block.
157  // this way the stack GC doesn't have to do anything special if a pointer
158  // happens to go that way.
159  // 2) We keep track of an allBlocks list to help the stack GC tests things.
160  //
161  // The oversize heap itself represents blocks as follows:
162  // 1) free: marked = false, allocd = false, trailer = false, zeroIfFree = 0
163  // 2) alloc'd, head: marked = depends, allocd = true, trailer = false, zeroIfFree is uncertain
164  // 3) alloc'd, trailer: marked = true (see above), allocd = true, trailer = true, zeroIfFree is uncertain
165  //
166  // For now, we don't use a freelist, so performance may be quite bad if
167  // this is used heavily; this is just because the cell does not have
168  // back and forward links; which we need since we can pick a non-first cell
169  // ### actually, it may be possible to shuffle the list. Not now, though
170  BlockList oversizeBlocks;
171 
172  size_t numLiveObjects;
173  size_t numLiveObjectsAtLastCollect;
174  size_t extraCost;
175 };
176 
177 static CollectorHeap heap;
178 
179 bool Collector::memoryFull = false;
180 
181 static CollectorBlock *allocateBlock()
182 {
183 #if PLATFORM(DARWIN)
184  vm_address_t address = 0;
185  vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
186 #elif PLATFORM(WIN_OS) || defined(WTF_COMPILER_CYGWIN)
187  // windows virtual address granularity is naturally 64k
188  LPVOID address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
189 #elif HAVE_FUNC_POSIX_MEMALIGN
190  void *address;
191  posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE);
192  memset(reinterpret_cast<void *>(address), 0, BLOCK_SIZE);
193 #else
194  static size_t pagesize = getpagesize();
195 
196  size_t extra = 0;
197  if (BLOCK_SIZE > pagesize) {
198  extra = BLOCK_SIZE - pagesize;
199  }
200 
201  void *mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
202  uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
203 
204  size_t adjust = 0;
205  if ((address & BLOCK_OFFSET_MASK) != 0) {
206  adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
207  }
208 
209  if (adjust > 0) {
210  munmap(reinterpret_cast<void *>(address), adjust);
211  }
212 
213  if (adjust < extra) {
214  munmap(reinterpret_cast<void *>(address + adjust + BLOCK_SIZE), extra - adjust);
215  }
216 
217  address += adjust;
218  memset(reinterpret_cast<void *>(address), 0, BLOCK_SIZE);
219 #endif
220  CollectorBlock *ptr = reinterpret_cast<CollectorBlock *>(address);
221  heap.allBlocks.append(ptr); // Register with the heap
222  return ptr;
223 }
224 
225 static void freeBlock(CollectorBlock *block)
226 {
227  // Unregister the block first
228  heap.allBlocks.remove(block);
229 
230 #if PLATFORM(DARWIN)
231  vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
232 #elif PLATFORM(WIN_OS) || defined(WTF_COMPILER_CYGWIN)
233  VirtualFree(block, BLOCK_SIZE, MEM_RELEASE);
234 #elif HAVE_FUNC_POSIX_MEMALIGN
235  free(block);
236 #else
237  munmap(block, BLOCK_SIZE);
238 #endif
239 }
240 
241 void Collector::recordExtraCost(size_t cost)
242 {
243  // Our frequency of garbage collection tries to balance memory use against speed
244  // by collecting based on the number of newly created values. However, for values
245  // that hold on to a great deal of memory that's not in the form of other JS values,
246  // that is not good enough - in some cases a lot of those objects can pile up and
247  // use crazy amounts of memory without a GC happening. So we track these extra
248  // memory costs. Only unusually large objects are noted, and we only keep track
249  // of this extra cost until the next GC. In garbage collected languages, most values
250  // are either very short lived temporaries, or have extremely long lifetimes. So
251  // if a large value survives one garbage collection, there is not much point to
252  // collecting more frequently as long as it stays alive.
253 
254  heap.extraCost += cost;
255 }
256 
257 static void *allocOversize(size_t s)
258 {
259  size_t cellsNeeded = (s + (CELL_SIZE - 1)) / CELL_SIZE;
260 
261  // printf("allocOversize, size:%d, cellsNeeded:%d\n", s, cellsNeeded);
262 
263  // We are not oversize enough to deal with things close to 64K in size
264  assert(cellsNeeded <= CELLS_PER_BLOCK);
265 
266  // Look through the blocks, see if one has enough, and where.
267  CollectorBlock *sufficientBlock = nullptr;
268  size_t startOffset = -1;
269  for (size_t b = 0; b < heap.oversizeBlocks.used() && !sufficientBlock; ++b) {
270  CollectorBlock *candidate = heap.oversizeBlocks[b];
271  if (cellsNeeded <= CELLS_PER_BLOCK - candidate->usedCells) {
272  // Well, there is a chance we will fit.. Let's see if we can find a batch long enough..
273  for (size_t i = 0; i < CELLS_PER_BLOCK; i++) {
274  if (i % 32 == 0 && candidate->allocd.bits[i / 32] == 0xFFFFFFFF) {
275  // Can skip this and 31 other cells
276  i += 31;
277  continue;
278  }
279 
280  if (candidate->allocd.get(i)) {
281  continue;
282  }
283 
284  // This cell is free -- are there enough free cells after it?
285  startOffset = i;
286  size_t last = i + cellsNeeded - 1;
287 
288  if (last >= CELLS_PER_BLOCK) { // No way it will fit
289  break;
290  }
291 
292  ++i;
293  while (i <= last && !candidate->allocd.get(i)) {
294  ++i;
295  }
296 
297  // Did we get through enough?
298  if (i == last + 1) {
299  sufficientBlock = candidate;
300  break;
301  }
302 
303  // Not enough room -- and the current entry has a non-zero zeroIfFree,
304  // so we should go on at i + 1 on next iteration
305 
306  } // for each cell per block.
307  } // if enough room in block
308  } // for each block
309 
310  if (!sufficientBlock) {
311  sufficientBlock = allocateBlock();
312  startOffset = 0;
313  heap.oversizeBlocks.append(sufficientBlock);
314  }
315 
316  sufficientBlock->usedCells += cellsNeeded;
317 
318  // Set proper bits for trailers and the head
319  sufficientBlock->allocd.set(startOffset);
320  for (size_t t = startOffset + 1; t < startOffset + cellsNeeded; ++t) {
321  sufficientBlock->trailer.set(t);
322  sufficientBlock->marked.set(t);
323  sufficientBlock->allocd.set(t);
324  }
325 
326  void *result = sufficientBlock->cells + startOffset;
327  memset(result, 0, s);
328  heap.numLiveObjects = heap.numLiveObjects + 1;
329  return result;
330 }
331 
332 void *Collector::allocate(size_t s)
333 {
334  assert(JSLock::lockCount() > 0);
335 
336  // collect if needed
337  size_t numLiveObjects = heap.numLiveObjects;
338  size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
339  size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
340  size_t newCost = numNewObjects + heap.extraCost;
341 
342  if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) {
343  collect();
344  numLiveObjects = heap.numLiveObjects;
345  }
346 
347  if (s > CELL_SIZE) {
348  return allocOversize(s);
349  }
350 
351  // slab allocator
352 
353  size_t usedBlocks = heap.blocks.used();
354 
355  size_t i = heap.firstBlockWithPossibleSpace;
356  CollectorBlock *targetBlock;
357  size_t targetBlockUsedCells;
358  if (i != usedBlocks) {
359  targetBlock = heap.blocks[i];
360  targetBlockUsedCells = targetBlock->usedCells;
361  assert(targetBlockUsedCells <= CELLS_PER_BLOCK);
362  while (targetBlockUsedCells == CELLS_PER_BLOCK) {
363  if (++i == usedBlocks) {
364  goto allocateNewBlock;
365  }
366  targetBlock = heap.blocks[i];
367  targetBlockUsedCells = targetBlock->usedCells;
368  assert(targetBlockUsedCells <= CELLS_PER_BLOCK);
369  }
370  heap.firstBlockWithPossibleSpace = i;
371  } else {
372  allocateNewBlock:
373  // didn't find one, need to allocate a new block
374  targetBlock = allocateBlock();
375  targetBlock->freeList = targetBlock->cells;
376  targetBlockUsedCells = 0;
377  heap.blocks.append(targetBlock);
378  heap.firstBlockWithPossibleSpace = usedBlocks; // previous usedBlocks -> new one's index
379  }
380 
381  // find a free spot in the block and detach it from the free list
382  CollectorCell *newCell = targetBlock->freeList;
383 
384  // "next" field is a byte offset -- 0 means next cell, so a zeroed block is already initialized
385  // could avoid the casts by using a cell offset, but this avoids a relatively-slow multiply
386  targetBlock->freeList = reinterpret_cast<CollectorCell *>(reinterpret_cast<char *>(newCell + 1) + newCell->u.freeCell.next);
387 
388  targetBlock->usedCells = targetBlockUsedCells + 1;
389  heap.numLiveObjects = numLiveObjects + 1;
390 
391  return newCell;
392 }
393 
394 #if USE(MULTIPLE_THREADS)
395 
396 struct Collector::Thread {
397  Thread(pthread_t pthread, mach_port_t mthread) : posixThread(pthread), machThread(mthread) {}
398  Thread *next;
399  pthread_t posixThread;
400  mach_port_t machThread;
401 };
402 
403 pthread_key_t registeredThreadKey;
404 pthread_once_t registeredThreadKeyOnce = PTHREAD_ONCE_INIT;
405 
406 Collector::Thread *registeredThreads;
407 
408 static void destroyRegisteredThread(void *data)
409 {
410  Collector::Thread *thread = (Collector::Thread *)data;
411 
412  if (registeredThreads == thread) {
413  registeredThreads = registeredThreads->next;
414  } else {
415  Collector::Thread *last = registeredThreads;
416  for (Collector::Thread *t = registeredThreads->next; t != NULL; t = t->next) {
417  if (t == thread) {
418  last->next = t->next;
419  break;
420  }
421  last = t;
422  }
423  }
424 
425  delete thread;
426 }
427 
428 static void initializeRegisteredThreadKey()
429 {
430  pthread_key_create(&registeredThreadKey, destroyRegisteredThread);
431 }
432 
433 void Collector::registerThread()
434 {
435  pthread_once(&registeredThreadKeyOnce, initializeRegisteredThreadKey);
436 
437  if (!pthread_getspecific(registeredThreadKey)) {
438  pthread_t pthread = pthread_self();
439  WTF::fastMallocRegisterThread(pthread);
440  Collector::Thread *thread = new Collector::Thread(pthread, pthread_mach_thread_np(pthread));
441  thread->next = registeredThreads;
442  registeredThreads = thread;
443  pthread_setspecific(registeredThreadKey, thread);
444  }
445 }
446 
447 #endif
448 
449 #define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char *) - 1)) == 0)
450 
451 // cell size needs to be a power of two for this to be valid
452 #define IS_CELL_ALIGNED(p) (((intptr_t)(p) & CELL_MASK) == 0)
453 
454 void Collector::markStackObjectsConservatively(void *start, void *end)
455 {
456  if (start > end) {
457  void *tmp = start;
458  start = end;
459  end = tmp;
460  }
461 
462  assert(((char *)end - (char *)start) < 0x1000000);
463  assert(IS_POINTER_ALIGNED(start));
464  assert(IS_POINTER_ALIGNED(end));
465 
466  char **p = (char **)start;
467  char **e = (char **)end;
468 
469  // We use allBlocks here since we want to mark oversize cells as well.
470  // Their trailers will have the mark bit set already, to avoid trouble.
471  size_t usedBlocks = heap.allBlocks.used();
472  CollectorBlock **blocks = heap.allBlocks.m_data;
473 
474  const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);
475 
476  while (p != e) {
477  char *x = *p++;
478  VG_DEFINED(x);
479  if (IS_CELL_ALIGNED(x) && x) {
480  uintptr_t offset = reinterpret_cast<uintptr_t>(x) & BLOCK_OFFSET_MASK;
481  CollectorBlock *blockAddr = reinterpret_cast<CollectorBlock *>(x - offset);
482  for (size_t block = 0; block < usedBlocks; block++) {
483  if ((blocks[block] == blockAddr) && (offset <= lastCellOffset)) {
484  if (((CollectorCell *)x)->u.freeCell.zeroIfFree != nullptr) {
485  JSCell *imp = reinterpret_cast<JSCell *>(x);
486  if (!imp->marked()) {
487  imp->mark();
488  }
489  }
490  } // if valid block
491  } // for each block
492  } // if cell-aligned
493  } // for each pointer
494 }
495 
496 static inline void *currentThreadStackBase()
497 {
498 #if PLATFORM(DARWIN)
499  pthread_t thread = pthread_self();
500  void *stackBase = pthread_get_stackaddr_np(thread);
501 #elif (PLATFORM(WIN_OS) || defined(WTF_COMPILER_CYGWIN))
502  // tested with mingw32, mingw64, msvc2008, cygwin
503  NT_TIB *pTib = (NT_TIB *)NtCurrentTeb();
504  void *stackBase = (void *)pTib->StackBase;
505 #elif PLATFORM(SOLARIS_OS)
506  stack_t s;
507  thr_stksegment(&s);
508  return s.ss_sp;
509  // NOTREACHED
510  void *stackBase = nullptr;
511 #elif PLATFORM(UNIX)
512  static void *stackBase = nullptr;
513  static pthread_t stackThread;
514  pthread_t thread = pthread_self();
515  if (stackBase == nullptr || thread != stackThread) {
516  pthread_attr_t sattr;
517 #if HAVE_PTHREAD_NP_H || defined(__NetBSD__)
518  // e.g. on FreeBSD 5.4, [email protected]
519  // also on NetBSD 3 and 4, [email protected]
520  // HIGHLY RECCOMENDED by manpage to allocate storage, avoids
521  // crashing in JS immediately in FreeBSD.
522  pthread_attr_init(&sattr);
523  pthread_attr_get_np(thread, &sattr);
524 #else
525  // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
526  pthread_getattr_np(thread, &sattr);
527 #endif // picking the _np function to use --- Linux or BSD
528 
529  size_t stackSize;
530  pthread_attr_getstack(&sattr, &stackBase, &stackSize);
531  stackBase = (char *)stackBase + stackSize; // a matter of interpretation, apparently...
532  pthread_attr_destroy(&sattr);
533  assert(stackBase);
534  stackThread = thread;
535  }
536 #else
537 #error Need a way to get the stack base on this platform
538 #endif
539  return stackBase;
540 }
541 
542 void Collector::markCurrentThreadConservatively()
543 {
544  // setjmp forces volatile registers onto the stack
545  jmp_buf registers;
546 #if defined(WTF_COMPILER_MSVC)
547 #pragma warning(push)
548 #pragma warning(disable: 4611)
549 #endif
550  setjmp(registers);
551 #if defined(WTF_COMPILER_MSVC)
552 #pragma warning(pop)
553 #endif
554 
555  void *dummy;
556  void *stackPointer = &dummy;
557  void *stackBase = currentThreadStackBase();
558 
559  markStackObjectsConservatively(stackPointer, stackBase);
560 }
561 
562 #if USE(MULTIPLE_THREADS)
563 
564 typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
565 
566 void Collector::markOtherThreadConservatively(Thread *thread)
567 {
568  thread_suspend(thread->machThread);
569 
570 #if PLATFORM(X86)
571  i386_thread_state_t regs;
572  unsigned user_count = sizeof(regs) / sizeof(int);
573  thread_state_flavor_t flavor = i386_THREAD_STATE;
574 #elif PLATFORM(X86_64)
575  x86_thread_state64_t regs;
576  unsigned user_count = x86_THREAD_STATE64_COUNT;
577  thread_state_flavor_t flavor = x86_THREAD_STATE64;
578 #elif PLATFORM(PPC)
579  ppc_thread_state_t regs;
580  unsigned user_count = PPC_THREAD_STATE_COUNT;
581  thread_state_flavor_t flavor = PPC_THREAD_STATE;
582 #elif PLATFORM(PPC64)
583  ppc_thread_state64_t regs;
584  unsigned user_count = PPC_THREAD_STATE64_COUNT;
585  thread_state_flavor_t flavor = PPC_THREAD_STATE64;
586 #else
587 #error Unknown Architecture
588 #endif
589  // get the thread register state
590  thread_get_state(thread->machThread, flavor, (thread_state_t)&regs, &user_count);
591 
592  // scan the registers
593  markStackObjectsConservatively((void *)&regs, (void *)((char *)&regs + (user_count * sizeof(usword_t))));
594 
595  // scan the stack
596 #if PLATFORM(X86) && __DARWIN_UNIX03
597  markStackObjectsConservatively((void *)regs.__esp, pthread_get_stackaddr_np(thread->posixThread));
598 #elif PLATFORM(X86)
599  markStackObjectsConservatively((void *)regs.esp, pthread_get_stackaddr_np(thread->posixThread));
600 #elif PLATFORM(X86_64) && __DARWIN_UNIX03
601  markStackObjectsConservatively((void *)regs.__rsp, pthread_get_stackaddr_np(thread->posixThread));
602 #elif PLATFORM(X86_64)
603  markStackObjectsConservatively((void *)regs.rsp, pthread_get_stackaddr_np(thread->posixThread));
604 #elif (PLATFORM(PPC) || PLATFORM(PPC64)) && __DARWIN_UNIX03
605  markStackObjectsConservatively((void *)regs.__r1, pthread_get_stackaddr_np(thread->posixThread));
606 #elif PLATFORM(PPC) || PLATFORM(PPC64)
607  markStackObjectsConservatively((void *)regs.r1, pthread_get_stackaddr_np(thread->posixThread));
608 #else
609 #error Unknown Architecture
610 #endif
611 
612  thread_resume(thread->machThread);
613 }
614 
615 #endif
616 
617 void Collector::markStackObjectsConservatively()
618 {
619  markCurrentThreadConservatively();
620 
621 #if USE(MULTIPLE_THREADS)
622  for (Thread *thread = registeredThreads; thread != NULL; thread = thread->next) {
623  if (thread->posixThread != pthread_self()) {
624  markOtherThreadConservatively(thread);
625  }
626  }
627 #endif
628 }
629 
630 typedef HashCountedSet<JSCell *> ProtectCounts;
631 
632 static ProtectCounts &protectedValues()
633 {
634  static ProtectCounts pv;
635  return pv;
636 }
637 
638 void Collector::protect(JSValue *k)
639 {
640  assert(k);
641  assert(JSLock::lockCount() > 0);
642 
643  if (JSImmediate::isImmediate(k)) {
644  return;
645  }
646 
647  protectedValues().add(k->asCell());
648 }
649 
650 void Collector::unprotect(JSValue *k)
651 {
652  assert(k);
653  assert(JSLock::lockCount() > 0);
654 
655  if (JSImmediate::isImmediate(k)) {
656  return;
657  }
658 
659  protectedValues().remove(k->asCell());
660 }
661 
662 void Collector::markProtectedObjects()
663 {
664  ProtectCounts &pv = protectedValues();
665  ProtectCounts::iterator end = pv.end();
666  for (ProtectCounts::iterator it = pv.begin(); it != end; ++it) {
667  JSCell *val = it->first;
668  if (!val->marked()) {
669  val->mark();
670  }
671  }
672 }
673 
675 {
676  assert(JSLock::lockCount() > 0);
677 
678 #if USE(MULTIPLE_THREADS)
679  bool currentThreadIsMainThread = pthread_main_np();
680 #else
681  bool currentThreadIsMainThread = true;
682 #endif
683 
685 
686  if (Interpreter::s_hook) {
687  Interpreter *scr = Interpreter::s_hook;
688  do {
689  scr->mark(currentThreadIsMainThread);
690  scr = scr->next;
691  } while (scr != Interpreter::s_hook);
692  }
693 
694  // MARK: first mark all referenced objects recursively starting out from the set of root objects
695 
696  markStackObjectsConservatively();
697  markProtectedObjects();
698  List::markProtectedLists();
699 #if USE(MULTIPLE_THREADS)
700  if (!currentThreadIsMainThread) {
701  markMainThreadOnlyObjects();
702  }
703 #endif
704 
705  // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else
706 
707  // Note: if a cell has zeroIfFree == 0, it is either free,
708  // or in the middle of being constructed and has not yet
709  // had its vtable filled. Hence, such cells should not be cleaned up
710 
711  size_t emptyBlocks = 0;
712  size_t numLiveObjects = heap.numLiveObjects;
713 
714  for (size_t block = 0; block < heap.blocks.used(); block++) {
715  CollectorBlock *curBlock = heap.blocks[block];
716 
717  size_t usedCells = curBlock->usedCells;
718  CollectorCell *freeList = curBlock->freeList;
719 
720  if (usedCells == CELLS_PER_BLOCK) {
721  // special case with a block where all cells are used -- testing indicates this happens often
722  for (size_t i = 0; i < CELLS_PER_BLOCK; i++) {
723  CollectorCell *cell = curBlock->cells + i;
724  JSCell *imp = reinterpret_cast<JSCell *>(cell);
725  if (!curBlock->marked.get(i) && currentThreadIsMainThread) {
726  // special case for allocated but uninitialized object
727  // (We don't need this check earlier because nothing prior this point assumes the object has a valid vptr.)
728  if (cell->u.freeCell.zeroIfFree == nullptr) {
729  continue;
730  }
731  imp->~JSCell();
732  --usedCells;
733  --numLiveObjects;
734 
735  // put cell on the free list
736  cell->u.freeCell.zeroIfFree = nullptr;
737  cell->u.freeCell.next = reinterpret_cast<char *>(freeList) - reinterpret_cast<char *>(cell + 1);
738  freeList = cell;
739  }
740  }
741  } else {
742  size_t minimumCellsToProcess = usedCells;
743  for (size_t i = 0; (i < minimumCellsToProcess) & (i < CELLS_PER_BLOCK); i++) {
744  CollectorCell *cell = curBlock->cells + i;
745  if (cell->u.freeCell.zeroIfFree == nullptr) {
746  ++minimumCellsToProcess;
747  } else {
748  JSCell *imp = reinterpret_cast<JSCell *>(cell);
749  if (!curBlock->marked.get(i) && currentThreadIsMainThread) {
750  imp->~JSCell();
751  --usedCells;
752  --numLiveObjects;
753 
754  // put cell on the free list
755  cell->u.freeCell.zeroIfFree = nullptr;
756  cell->u.freeCell.next = reinterpret_cast<char *>(freeList) - reinterpret_cast<char *>(cell + 1);
757  freeList = cell;
758  }
759  }
760  }
761  }
762 
763  curBlock->marked.clearAll();
764  curBlock->usedCells = usedCells;
765  curBlock->freeList = freeList;
766 
767  if (usedCells == 0) {
768  emptyBlocks++;
769  if (emptyBlocks > SPARE_EMPTY_BLOCKS) {
770 #if !DEBUG_COLLECTOR
771  freeBlock(curBlock);
772 #endif
773  // swap with the last block so we compact as we go
774  heap.blocks.m_data[block] = heap.blocks[heap.blocks.used() - 1];
775  heap.blocks.m_used--;
776  block--; // Don't move forward a step in this case
777  }
778  }
779  }
780 
781  if (heap.numLiveObjects != numLiveObjects) {
782  heap.firstBlockWithPossibleSpace = 0;
783  }
784 
785  // Now sweep oversize blocks.
786  emptyBlocks = 0;
787  for (size_t ob = 0; ob < heap.oversizeBlocks.used(); ++ob) {
788  CollectorBlock *curBlock = heap.oversizeBlocks[ob];
789  CollectorCell *freeList = curBlock->freeList;
790  size_t usedCells = curBlock->usedCells;
791 
792  // Go through the cells
793  for (size_t i = 0; i < CELLS_PER_BLOCK; i++) {
794  if (i % 32 == 0 && curBlock->allocd.bits[i / 32] == 0) {
795  // Nothing used around here, skip this and 31 next cells
796  i += 31;
797  continue;
798  }
799 
800  CollectorCell *cell = curBlock->cells + i;
801  if (cell->u.freeCell.zeroIfFree == nullptr) {
802  continue;
803  }
804 
805  if (!curBlock->allocd.get(i)) {
806  continue;
807  }
808 
809  JSCell *imp = reinterpret_cast<JSCell *>(cell);
810 
811  // Live and trailer cells will all have the mark set,
812  // so we only have to collect with it clear --
813  // and we already took care of those that are
814  // already free (or being initialized) above
815  if (!curBlock->marked.get(i)) {
816  // Free this block...
817  imp->~JSCell();
818  --numLiveObjects;
819  --usedCells;
820 
821  // Mark the block and the trailers as available
822  cell->u.freeCell.zeroIfFree = nullptr;
823  curBlock->allocd.clear(i);
824 
825  ++i; // Go to the potential trailer..
826  while (curBlock->trailer.get(i) && i < CELLS_PER_BLOCK) {
827  --usedCells;
828  curBlock->allocd.clear(i);
829  curBlock->trailer.clear(i);
830  curBlock->marked.clear(i);
831  curBlock->cells[i].u.freeCell.zeroIfFree = nullptr;
832  ++i;
833  }
834  --i; // Last item we processed.
835  } else {
836  // If this is not a trailer cell, clear the mark
837  if (!curBlock->trailer.get(i)) {
838  curBlock->marked.clear(i);
839  }
840  }
841  } // each cell
842  curBlock->usedCells = usedCells;
843  curBlock->freeList = freeList;
844  if (usedCells == 0) {
845  emptyBlocks++;
846  if (emptyBlocks > SPARE_EMPTY_BLOCKS) {
847  freeBlock(curBlock);
848 
849  // swap with the last block so we compact as we go
850  heap.oversizeBlocks.m_data[ob] = heap.oversizeBlocks[heap.oversizeBlocks.used() - 1];
851  heap.oversizeBlocks.m_used--;
852  ob--; // Don't move forward a step in this case
853  }
854  }
855  } // each oversize block
856 
857  bool deleted = heap.numLiveObjects != numLiveObjects;
858 
859  heap.numLiveObjects = numLiveObjects;
860  heap.numLiveObjectsAtLastCollect = numLiveObjects;
861  heap.extraCost = 0;
862 
863  bool newMemoryFull = (numLiveObjects >= KJS_MEM_LIMIT);
864  if (newMemoryFull && newMemoryFull != memoryFull) {
865  reportOutOfMemoryToAllInterpreters();
866  }
867  memoryFull = newMemoryFull;
868 
869  return deleted;
870 }
871 
872 size_t Collector::size()
873 {
874  return heap.numLiveObjects;
875 }
876 
877 #ifdef KJS_DEBUG_MEM
878 void Collector::finalCheck()
879 {
880 }
881 #endif
882 
883 size_t Collector::numInterpreters()
884 {
885  size_t count = 0;
886  if (Interpreter::s_hook) {
887  Interpreter *scr = Interpreter::s_hook;
888  do {
889  ++count;
890  scr = scr->next;
891  } while (scr != Interpreter::s_hook);
892  }
893  return count;
894 }
895 
896 size_t Collector::numProtectedObjects()
897 {
898  return protectedValues().size();
899 }
900 
901 static const char *typeName(JSCell *val)
902 {
903  const char *name = "???";
904  switch (val->type()) {
905  case UnspecifiedType:
906  break;
907  case UndefinedType:
908  name = "undefined";
909  break;
910  case NullType:
911  name = "null";
912  break;
913  case BooleanType:
914  name = "boolean";
915  break;
916  case StringType:
917  name = "string";
918  break;
919  case NumberType:
920  name = "number";
921  break;
922  case ObjectType: {
923  const ClassInfo *info = static_cast<JSObject *>(val)->classInfo();
924  name = info ? info->className : "Object";
925  break;
926  }
927  case GetterSetterType:
928  name = "gettersetter";
929  break;
930  }
931  return name;
932 }
933 
934 HashCountedSet<const char *> *Collector::rootObjectTypeCounts()
935 {
936  HashCountedSet<const char *> *counts = new HashCountedSet<const char *>;
937 
938  ProtectCounts &pv = protectedValues();
939  ProtectCounts::iterator end = pv.end();
940  for (ProtectCounts::iterator it = pv.begin(); it != end; ++it) {
941  counts->add(typeName(it->first));
942  }
943 
944  return counts;
945 }
946 
947 void Collector::reportOutOfMemoryToAllInterpreters()
948 {
949  if (!Interpreter::s_hook) {
950  return;
951  }
952 
953  Interpreter *interpreter = Interpreter::s_hook;
954  do {
955  ExecState *exec = interpreter->execState();
956 
957  exec->setException(Error::create(exec, GeneralError, "Out of memory"));
958 
959  interpreter = interpreter->next;
960  } while (interpreter != Interpreter::s_hook);
961 }
962 
963 } // namespace KJS
static bool collect()
Run the garbage collection.
Definition: collector.cpp:674
virtual void mark(bool currentThreadIsMainThread)
Called during the mark phase of the garbage collector.
void setException(JSValue *e)
Set the exception associated with this execution state, updating the program counter appropriately...
Definition: ExecState.cpp:169
Interpreter objects can be used to evaluate ECMAScript code.
Definition: interpreter.h:56
PostalAddress address(const QVariant &location)
const char * className
A string denoting the class name.
Definition: object.h:52
JSValue is the base type for all primitives (Undefined, Null, Boolean, String, Number) and objects in...
Definition: value.h:58
static void markSourceCachedObjects()
This marks all GC heap resources stored as optimizations; and which have their lifetime managed by th...
ObjectType
static void * allocate(size_t s)
Register an object with the collector.
Definition: collector.cpp:332
Class Information.
Definition: object.h:48
static JSObject * create(ExecState *, ErrorType, const UString &message, int lineNumber, int sourceId, const UString &sourceURL)
Factory method for error objects.
Definition: object.cpp:813
Represents the current state of script execution.
Definition: ExecState.h:53
This file is part of the KDE documentation.
Documentation copyright © 1996-2020 The KDE developers.
Generated on Tue Sep 29 2020 23:07:39 by doxygen 1.8.11 written by Dimitri van Heesch, © 1997-2006

KDE's Doxygen guidelines are available online.