// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.


//
// #Overview
//
// GC automatically manages memory allocated by managed code.
// The design doc for GC can be found at Documentation/botr/garbage-collection.md
//
// This file includes both the code for GC and the allocator. The most common
// case for a GC to be triggered is from the allocator code. See
// code:#try_allocate_more_space where it calls GarbageCollectGeneration.
//
// Entry points for the allocator are GCHeap::Alloc* which are called by the
// allocation helpers in gcscan.cpp
//

#include "gcpriv.h"

#define USE_INTROSORT

// We just needed a simple random number generator for testing.
class gc_rand
{
public:
    static uint64_t x;

    static uint64_t get_rand()
    {
	    x = (314159269*x+278281) & 0x7FFFFFFF;
	    return x;
    }

    // obtain random number in the range 0 .. r-1
    static uint64_t get_rand(uint64_t r) {
	    // require r >= 0
	    uint64_t x = (uint64_t)((get_rand() * r) >> 31);
	    return x;
    }
};

uint64_t gc_rand::x = 0;

#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
BOOL bgc_heap_walk_for_etw_p = FALSE;
#endif //BACKGROUND_GC && FEATURE_EVENT_TRACE

#if defined(FEATURE_REDHAWK)
#define MAYBE_UNUSED_VAR(v) v = v
#else
#define MAYBE_UNUSED_VAR(v)
#endif // FEATURE_REDHAWK

#define MAX_PTR ((uint8_t*)(~(ptrdiff_t)0))
#define commit_min_th (16*OS_PAGE_SIZE)

static size_t smoothed_desired_per_heap = 0;

#ifdef SERVER_GC
#define partial_size_th 100
#define num_partial_refs 64
#else //SERVER_GC
#define partial_size_th 100
#define num_partial_refs 32
#endif //SERVER_GC

#define demotion_plug_len_th (6*1024*1024)

#ifdef BIT64
#define MARK_STACK_INITIAL_LENGTH 1024
#else
#define MARK_STACK_INITIAL_LENGTH 128
#endif // BIT64

#define LOH_PIN_QUEUE_LENGTH 100
#define LOH_PIN_DECAY 10

uint32_t yp_spin_count_unit = 0;
size_t loh_size_threshold = LARGE_OBJECT_SIZE;

#ifdef GC_CONFIG_DRIVEN
int compact_ratio = 0;
#endif //GC_CONFIG_DRIVEN

// See comments in reset_memory.
BOOL reset_mm_p = TRUE;

bool g_fFinalizerRunOnShutDown = false;

#ifdef FEATURE_SVR_GC
bool g_built_with_svr_gc = true;
#else
bool g_built_with_svr_gc = false;
#endif // FEATURE_SVR_GC

#if defined(BUILDENV_DEBUG)
uint8_t g_build_variant = 0;
#elif defined(BUILDENV_CHECKED)
uint8_t g_build_variant = 1;
#else
uint8_t g_build_variant = 2;
#endif // defined(BUILDENV_DEBUG)

VOLATILE(int32_t) g_no_gc_lock = -1;

#if defined (TRACE_GC) && !defined (DACCESS_COMPILE)
const char * const allocation_state_str[] = {
    "start",
    "can_allocate",
    "cant_allocate",
    "retry_allocate",
    "try_fit",
    "try_fit_new_seg",
    "try_fit_after_cg",
    "try_fit_after_bgc",
    "try_free_full_seg_in_bgc",
    "try_free_after_bgc",
    "try_seg_end",
    "acquire_seg",
    "acquire_seg_after_cg",
    "acquire_seg_after_bgc",
    "check_and_wait_for_bgc",
    "trigger_full_compact_gc",
    "trigger_ephemeral_gc",
    "trigger_2nd_ephemeral_gc",
    "check_retry_seg"
};

const char * const msl_take_state_str[] = {
    "get_large_seg",
    "bgc_loh_sweep",
    "wait_bgc",
    "block_gc",
    "clr_mem",
    "clr_large_mem",
    "t_eph_gc",
    "t_full_gc",
    "alloc_small",
    "alloc_large",
    "alloc_small_cant",
    "alloc_large_cant",
    "try_alloc",
    "try_budget"
};
#endif //TRACE_GC && !DACCESS_COMPILE


// Keep this in sync with the definition of gc_reason
#if (defined(DT_LOG) || defined(TRACE_GC)) && !defined (DACCESS_COMPILE)
static const char* const str_gc_reasons[] =
{
    "alloc_soh",
    "induced",
    "lowmem",
    "empty",
    "alloc_loh",
    "oos_soh",
    "oos_loh",
    "induced_noforce",
    "gcstress",
    "induced_lowmem",
    "induced_compacting",
    "lowmemory_host",
    "pm_full_gc",
    "lowmemory_host_blocking"
};

static const char* const str_gc_pause_modes[] =
{
    "batch",
    "interactive",
    "low_latency",
    "sustained_low_latency",
    "no_gc"
};
#endif // defined(DT_LOG) || defined(TRACE_GC)

inline
BOOL is_induced (gc_reason reason)
{
    return ((reason == reason_induced) ||
            (reason == reason_induced_noforce) ||
            (reason == reason_lowmemory) ||
            (reason == reason_lowmemory_blocking) ||
            (reason == reason_induced_compacting) ||
            (reason == reason_lowmemory_host) ||
            (reason == reason_lowmemory_host_blocking));
}

inline
BOOL is_induced_blocking (gc_reason reason)
{
    return ((reason == reason_induced) ||
            (reason == reason_lowmemory_blocking) ||
            (reason == reason_induced_compacting) ||
            (reason == reason_lowmemory_host_blocking));
}

#ifndef DACCESS_COMPILE
int64_t qpf;
size_t start_time;

size_t GetHighPrecisionTimeStamp()
{
    int64_t ts = GCToOSInterface::QueryPerformanceCounter();

    return (size_t)(ts / (qpf / 1000));
}

uint64_t RawGetHighPrecisionTimeStamp()
{
    return (uint64_t)GCToOSInterface::QueryPerformanceCounter();
}

#endif

#ifdef GC_STATS
// There is a current and a prior copy of the statistics.  This allows us to display deltas per reporting
// interval, as well as running totals.  The 'min' and 'max' values require special treatment.  They are
// Reset (zeroed) in the current statistics when we begin a new interval and they are updated via a
// comparison with the global min/max.
GCStatistics g_GCStatistics;
GCStatistics g_LastGCStatistics;

char* GCStatistics::logFileName = NULL;
FILE*  GCStatistics::logFile = NULL;

void GCStatistics::AddGCStats(const gc_mechanisms& settings, size_t timeInMSec)
{
#ifdef BACKGROUND_GC
    if (settings.concurrent)
    {
        bgc.Accumulate((uint32_t)timeInMSec*1000);
        cntBGC++;
    }
    else if (settings.background_p)
    {
        fgc.Accumulate((uint32_t)timeInMSec*1000);
        cntFGC++;
        if (settings.compaction)
            cntCompactFGC++;
        assert(settings.condemned_generation < max_generation);
        cntFGCGen[settings.condemned_generation]++;
    }
    else
#endif // BACKGROUND_GC
    {
        ngc.Accumulate((uint32_t)timeInMSec*1000);
        cntNGC++;
        if (settings.compaction)
            cntCompactNGC++;
        cntNGCGen[settings.condemned_generation]++;
    }

    if (is_induced (settings.reason))
        cntReasons[(int)reason_induced]++;
    else if (settings.stress_induced)
        cntReasons[(int)reason_gcstress]++;
    else
        cntReasons[(int)settings.reason]++;

#ifdef BACKGROUND_GC
    if (settings.concurrent || !settings.background_p)
    {
#endif // BACKGROUND_GC
        RollOverIfNeeded();
#ifdef BACKGROUND_GC
    }
#endif // BACKGROUND_GC
}

void GCStatistics::Initialize()
{
    LIMITED_METHOD_CONTRACT;
    // for efficiency sake we're taking a dependency on the layout of a C++ object
    // with a vtable. protect against violations of our premise:
    static_assert(offsetof(GCStatistics, cntDisplay) == sizeof(void*),
            "The first field of GCStatistics follows the pointer sized vtable");

    int podOffs = offsetof(GCStatistics, cntDisplay);       // offset of the first POD field
    memset((uint8_t*)(&g_GCStatistics)+podOffs, 0, sizeof(g_GCStatistics)-podOffs);
    memset((uint8_t*)(&g_LastGCStatistics)+podOffs, 0, sizeof(g_LastGCStatistics)-podOffs);
}

void GCStatistics::DisplayAndUpdate()
{
    LIMITED_METHOD_CONTRACT;

    if (logFileName == NULL || logFile == NULL)
        return;

    {
        if (cntDisplay == 0)
            fprintf(logFile, "\nGCMix **** Initialize *****\n\n");

        fprintf(logFile, "GCMix **** Summary ***** %d\n", cntDisplay);

        // NGC summary (total, timing info)
        ngc.DisplayAndUpdate(logFile, "NGC ", &g_LastGCStatistics.ngc, cntNGC, g_LastGCStatistics.cntNGC, msec);

        // FGC summary (total, timing info)
        fgc.DisplayAndUpdate(logFile, "FGC ", &g_LastGCStatistics.fgc, cntFGC, g_LastGCStatistics.cntFGC, msec);

        // BGC summary
        bgc.DisplayAndUpdate(logFile, "BGC ", &g_LastGCStatistics.bgc, cntBGC, g_LastGCStatistics.cntBGC, msec);

        // NGC/FGC break out by generation & compacting vs. sweeping
        fprintf(logFile, "NGC   ");
        for (int i = max_generation; i >= 0; --i)
            fprintf(logFile, "gen%d %d (%d). ", i, cntNGCGen[i]-g_LastGCStatistics.cntNGCGen[i], cntNGCGen[i]);
        fprintf(logFile, "\n");

        fprintf(logFile, "FGC   ");
        for (int i = max_generation-1; i >= 0; --i)
            fprintf(logFile, "gen%d %d (%d). ", i, cntFGCGen[i]-g_LastGCStatistics.cntFGCGen[i], cntFGCGen[i]);
        fprintf(logFile, "\n");

        // Compacting vs. Sweeping break out
        int _cntSweep = cntNGC-cntCompactNGC;
        int _cntLastSweep = g_LastGCStatistics.cntNGC-g_LastGCStatistics.cntCompactNGC;
        fprintf(logFile, "NGC   Sweeping %d (%d) Compacting %d (%d)\n",
               _cntSweep - _cntLastSweep, _cntSweep,
               cntCompactNGC - g_LastGCStatistics.cntCompactNGC, cntCompactNGC);

        _cntSweep = cntFGC-cntCompactFGC;
        _cntLastSweep = g_LastGCStatistics.cntFGC-g_LastGCStatistics.cntCompactFGC;
        fprintf(logFile, "FGC   Sweeping %d (%d) Compacting %d (%d)\n",
               _cntSweep - _cntLastSweep, _cntSweep,
               cntCompactFGC - g_LastGCStatistics.cntCompactFGC, cntCompactFGC);

#ifdef TRACE_GC
        // GC reasons...
        for (int reason=(int)reason_alloc_soh; reason <= (int)reason_gcstress; ++reason)
        {
            if (cntReasons[reason] != 0)
                fprintf(logFile, "%s %d (%d). ", str_gc_reasons[reason],
                    cntReasons[reason]-g_LastGCStatistics.cntReasons[reason], cntReasons[reason]);
        }
#endif // TRACE_GC
        fprintf(logFile, "\n\n");

        // flush the log file...
        fflush(logFile);
    }

    g_LastGCStatistics = *this;

    ngc.Reset();
    fgc.Reset();
    bgc.Reset();
}

#endif // GC_STATS

#ifdef BGC_SERVO_TUNING
bool gc_heap::bgc_tuning::enable_fl_tuning = false;
uint32_t gc_heap::bgc_tuning::memory_load_goal = 0;
uint32_t gc_heap::bgc_tuning::memory_load_goal_slack = 0;
uint64_t gc_heap::bgc_tuning::available_memory_goal = 0;
bool gc_heap::bgc_tuning::panic_activated_p = false;
double gc_heap::bgc_tuning::accu_error_panic = 0.0;
double gc_heap::bgc_tuning::above_goal_kp = 0.0;
double gc_heap::bgc_tuning::above_goal_ki = 0.0;
bool gc_heap::bgc_tuning::enable_kd = false;
bool gc_heap::bgc_tuning::enable_ki = false;
bool gc_heap::bgc_tuning::enable_smooth = false;
bool gc_heap::bgc_tuning::enable_tbh = false;
bool gc_heap::bgc_tuning::enable_ff = false;
bool gc_heap::bgc_tuning::enable_gradual_d = false;
double gc_heap::bgc_tuning::above_goal_kd = 0.0;
double gc_heap::bgc_tuning::above_goal_ff = 0.0;
double gc_heap::bgc_tuning::num_gen1s_smooth_factor = 0.0;
double gc_heap::bgc_tuning::ml_kp = 0.0;
double gc_heap::bgc_tuning::ml_ki = 0.0;
double gc_heap::bgc_tuning::accu_error = 0.0;

bool gc_heap::bgc_tuning::fl_tuning_triggered = false;

size_t gc_heap::bgc_tuning::num_bgcs_since_tuning_trigger = 0;

bool gc_heap::bgc_tuning::next_bgc_p = false;

size_t gc_heap::bgc_tuning::gen1_index_last_bgc_end;
size_t gc_heap::bgc_tuning::gen1_index_last_bgc_start;
size_t gc_heap::bgc_tuning::gen1_index_last_bgc_sweep;
size_t gc_heap::bgc_tuning::actual_num_gen1s_to_trigger;

gc_heap::bgc_tuning::tuning_calculation gc_heap::bgc_tuning::gen_calc[2];
gc_heap::bgc_tuning::tuning_stats gc_heap::bgc_tuning::gen_stats[2];
gc_heap::bgc_tuning::bgc_size_data gc_heap::bgc_tuning::current_bgc_end_data[2];

size_t gc_heap::bgc_tuning::last_stepping_bgc_count = 0;
uint32_t gc_heap::bgc_tuning::last_stepping_mem_load = 0;
uint32_t gc_heap::bgc_tuning::stepping_interval = 0;
bool gc_heap::bgc_tuning::use_stepping_trigger_p = true;
double gc_heap::bgc_tuning::gen2_ratio_correction = 0.0;
double gc_heap::bgc_tuning::ratio_correction_step = 0.0;

int gc_heap::saved_bgc_tuning_reason = -1;
#endif //BGC_SERVO_TUNING

inline
size_t round_up_power2 (size_t size)
{
    // Get the 0-based index of the most-significant bit in size-1.
    // If the call failed (because size-1 is zero), size must be 1,
    // so return 1 (because 1 rounds up to itself).
    DWORD highest_set_bit_index;
    if (0 ==
#ifdef BIT64
        BitScanReverse64(
#else
        BitScanReverse(
#endif
            &highest_set_bit_index, size - 1)) { return 1; }

    // The size == 0 case (which would have overflowed to SIZE_MAX when decremented)
    // is handled below by relying on the fact that highest_set_bit_index is the maximum value
    // (31 or 63, depending on sizeof(size_t)) and left-shifting a value >= 2 by that
    // number of bits shifts in zeros from the right, resulting in an output of zero.
    return static_cast<size_t>(2) << highest_set_bit_index;
}

inline
size_t round_down_power2 (size_t size)
{
    // Get the 0-based index of the most-significant bit in size.
    // If the call failed, size must be zero so return zero.
    DWORD highest_set_bit_index;
    if (0 ==
#ifdef BIT64
        BitScanReverse64(
#else
        BitScanReverse(
#endif
            &highest_set_bit_index, size)) { return 0; }

    // Left-shift 1 by highest_set_bit_index to get back a value containing only
    // the most-significant set bit of size, i.e. size rounded down
    // to the next power-of-two value.
    return static_cast<size_t>(1) << highest_set_bit_index;
}

// Get the 0-based index of the most-significant bit in the value.
// Returns -1 if the input value is zero (i.e. has no set bits).
inline
int index_of_highest_set_bit (size_t value)
{
    // Get the 0-based index of the most-significant bit in the value.
    // If the call failed (because value is zero), return -1.
    DWORD highest_set_bit_index;
    return (0 ==
#ifdef BIT64
        BitScanReverse64(
#else
        BitScanReverse(
#endif
            &highest_set_bit_index, value)) ? -1 : static_cast<int>(highest_set_bit_index);
}

inline
int relative_index_power2_plug (size_t power2)
{
    int index = index_of_highest_set_bit (power2);
    assert (index <= MAX_INDEX_POWER2);

    return ((index < MIN_INDEX_POWER2) ? 0 : (index - MIN_INDEX_POWER2));
}

inline
int relative_index_power2_free_space (size_t power2)
{
    int index = index_of_highest_set_bit (power2);
    assert (index <= MAX_INDEX_POWER2);

    return ((index < MIN_INDEX_POWER2) ? -1 : (index - MIN_INDEX_POWER2));
}

#ifdef BACKGROUND_GC
uint32_t bgc_alloc_spin_count = 140;
uint32_t bgc_alloc_spin_count_loh = 16;
uint32_t bgc_alloc_spin = 2;


inline
void c_write (uint32_t& place, uint32_t value)
{
    Interlocked::Exchange (&place, value);
    //place = value;
}

#ifndef DACCESS_COMPILE

// If every heap's gen2 or gen3 size is less than this threshold we will do a blocking GC.
const size_t bgc_min_per_heap = 4*1024*1024;

int gc_heap::gchist_index = 0;
gc_mechanisms_store gc_heap::gchist[max_history_count];

#ifndef MULTIPLE_HEAPS
size_t gc_heap::total_promoted_bytes = 0;
VOLATILE(bgc_state) gc_heap::current_bgc_state = bgc_not_in_process;
int gc_heap::gchist_index_per_heap = 0;
gc_heap::gc_history gc_heap::gchist_per_heap[max_history_count];
#endif //MULTIPLE_HEAPS

void gc_heap::add_to_history_per_heap()
{
#ifdef GC_HISTORY
    gc_history* current_hist = &gchist_per_heap[gchist_index_per_heap];
    current_hist->gc_index = settings.gc_index;
    current_hist->current_bgc_state = current_bgc_state;
    size_t elapsed = dd_gc_elapsed_time (dynamic_data_of (0));
    current_hist->gc_time_ms = (uint32_t)elapsed;
    current_hist->gc_efficiency = (elapsed ? (total_promoted_bytes / elapsed) : total_promoted_bytes);
    current_hist->eph_low = generation_allocation_start (generation_of (max_generation-1));
    current_hist->gen0_start = generation_allocation_start (generation_of (0));
    current_hist->eph_high = heap_segment_allocated (ephemeral_heap_segment);
#ifdef BACKGROUND_GC
    current_hist->bgc_lowest = background_saved_lowest_address;
    current_hist->bgc_highest = background_saved_highest_address;
#endif //BACKGROUND_GC
    current_hist->fgc_lowest = lowest_address;
    current_hist->fgc_highest = highest_address;
    current_hist->g_lowest = g_gc_lowest_address;
    current_hist->g_highest = g_gc_highest_address;

    gchist_index_per_heap++;
    if (gchist_index_per_heap == max_history_count)
    {
        gchist_index_per_heap = 0;
    }
#endif //GC_HISTORY
}

void gc_heap::add_to_history()
{
#ifdef GC_HISTORY
    gc_mechanisms_store* current_settings = &gchist[gchist_index];
    current_settings->store (&settings);

    gchist_index++;
    if (gchist_index == max_history_count)
    {
        gchist_index = 0;
    }
#endif //GC_HISTORY
}

#endif //DACCESS_COMPILE
#endif //BACKGROUND_GC

#if defined(TRACE_GC) && !defined(DACCESS_COMPILE)
BOOL   gc_log_on = TRUE;
FILE* gc_log = NULL;
size_t gc_log_file_size = 0;

size_t gc_buffer_index = 0;
size_t max_gc_buffers = 0;

static CLRCriticalSection gc_log_lock;

// we keep this much in a buffer and only flush when the buffer is full
#define gc_log_buffer_size (1024*1024)
uint8_t* gc_log_buffer = 0;
size_t gc_log_buffer_offset = 0;

void log_va_msg(const char *fmt, va_list args)
{
    gc_log_lock.Enter();

    const int BUFFERSIZE = 512;
    static char rgchBuffer[BUFFERSIZE];
    char *  pBuffer  = &rgchBuffer[0];

    pBuffer[0] = '\n';
    int buffer_start = 1;
    int pid_len = sprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, "[%5d]", (uint32_t)GCToOSInterface::GetCurrentThreadIdForLogging());
    buffer_start += pid_len;
    memset(&pBuffer[buffer_start], '-', BUFFERSIZE - buffer_start);
    int msg_len = _vsnprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, _TRUNCATE, fmt, args);
    if (msg_len == -1)
    {
        msg_len = BUFFERSIZE - buffer_start;
    }

    msg_len += buffer_start;

    if ((gc_log_buffer_offset + msg_len) > (gc_log_buffer_size - 12))
    {
        char index_str[8];
        memset (index_str, '-', 8);
        sprintf_s (index_str, _countof(index_str), "%d", (int)gc_buffer_index);
        gc_log_buffer[gc_log_buffer_offset] = '\n';
        memcpy (gc_log_buffer + (gc_log_buffer_offset + 1), index_str, 8);

        gc_buffer_index++;
        if (gc_buffer_index > max_gc_buffers)
        {
            fseek (gc_log, 0, SEEK_SET);
            gc_buffer_index = 0;
        }
        fwrite(gc_log_buffer, gc_log_buffer_size, 1, gc_log);
        fflush(gc_log);
        memset (gc_log_buffer, '*', gc_log_buffer_size);
        gc_log_buffer_offset = 0;
    }

    memcpy (gc_log_buffer + gc_log_buffer_offset, pBuffer, msg_len);
    gc_log_buffer_offset += msg_len;

    gc_log_lock.Leave();
}

void GCLog (const char *fmt, ... )
{
    if (gc_log_on && (gc_log != NULL))
    {
        va_list     args;
        va_start(args, fmt);
        log_va_msg (fmt, args);
        va_end(args);
    }
}
#endif // TRACE_GC && !DACCESS_COMPILE

#if defined(GC_CONFIG_DRIVEN) && !defined(DACCESS_COMPILE)

BOOL   gc_config_log_on = FALSE;
FILE* gc_config_log = NULL;

// we keep this much in a buffer and only flush when the buffer is full
#define gc_config_log_buffer_size (1*1024) // TEMP
uint8_t* gc_config_log_buffer = 0;
size_t gc_config_log_buffer_offset = 0;

// For config since we log so little we keep the whole history. Also it's only
// ever logged by one thread so no need to synchronize.
void log_va_msg_config(const char *fmt, va_list args)
{
    const int BUFFERSIZE = 256;
    static char rgchBuffer[BUFFERSIZE];
    char *  pBuffer  = &rgchBuffer[0];

    pBuffer[0] = '\n';
    int buffer_start = 1;
    int msg_len = _vsnprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, _TRUNCATE, fmt, args );
    assert (msg_len != -1);
    msg_len += buffer_start;

    if ((gc_config_log_buffer_offset + msg_len) > gc_config_log_buffer_size)
    {
        fwrite(gc_config_log_buffer, gc_config_log_buffer_offset, 1, gc_config_log);
        fflush(gc_config_log);
        gc_config_log_buffer_offset = 0;
    }

    memcpy (gc_config_log_buffer + gc_config_log_buffer_offset, pBuffer, msg_len);
    gc_config_log_buffer_offset += msg_len;
}

void GCLogConfig (const char *fmt, ... )
{
    if (gc_config_log_on && (gc_config_log != NULL))
    {
        va_list     args;
        va_start( args, fmt );
        log_va_msg_config (fmt, args);
    }
}
#endif // GC_CONFIG_DRIVEN && !DACCESS_COMPILE

#ifdef SYNCHRONIZATION_STATS

// Number of GCs have we done since we last logged.
static unsigned int         gc_count_during_log;
 // In ms. This is how often we print out stats.
static const unsigned int   log_interval = 5000;
// Time (in ms) when we start a new log interval.
static unsigned int         log_start_tick;
static unsigned int         gc_lock_contended;
static int64_t              log_start_hires;
// Cycles accumulated in SuspendEE during log_interval.
static uint64_t             suspend_ee_during_log;
// Cycles accumulated in RestartEE during log_interval.
static uint64_t             restart_ee_during_log;
static uint64_t             gc_during_log;

#endif //SYNCHRONIZATION_STATS

void
init_sync_log_stats()
{
#ifdef SYNCHRONIZATION_STATS
    if (gc_count_during_log == 0)
    {
        gc_heap::init_sync_stats();
        suspend_ee_during_log = 0;
        restart_ee_during_log = 0;
        gc_during_log = 0;
        gc_lock_contended = 0;

        log_start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
        log_start_hires = GCToOSInterface::QueryPerformanceCounter();
    }
    gc_count_during_log++;
#endif //SYNCHRONIZATION_STATS
}

void
process_sync_log_stats()
{
#ifdef SYNCHRONIZATION_STATS

    unsigned int log_elapsed = GCToOSInterface::GetLowPrecisionTimeStamp() - log_start_tick;

    if (log_elapsed > log_interval)
    {
        uint64_t total = GCToOSInterface::QueryPerformanceCounter() - log_start_hires;
        // Print out the cycles we spent on average in each suspend and restart.
        printf("\n_________________________________________________________________________________\n"
            "Past %d(s): #%3d GCs; Total gc_lock contended: %8u; GC: %12u\n"
            "SuspendEE: %8u; RestartEE: %8u GC %.3f%%\n",
            log_interval / 1000,
            gc_count_during_log,
            gc_lock_contended,
            (unsigned int)(gc_during_log / gc_count_during_log),
            (unsigned int)(suspend_ee_during_log / gc_count_during_log),
            (unsigned int)(restart_ee_during_log / gc_count_during_log),
            (double)(100.0f * gc_during_log / total));
        gc_heap::print_sync_stats(gc_count_during_log);

        gc_count_during_log = 0;
    }
#endif //SYNCHRONIZATION_STATS
}

#ifdef MULTIPLE_HEAPS
#ifndef DACCESS_COMPILE
uint32_t g_num_active_processors = 0;

enum gc_join_stage
{
    gc_join_init_cpu_mapping = 0,
    gc_join_done = 1,
    gc_join_generation_determined = 2,
    gc_join_begin_mark_phase = 3,
    gc_join_scan_dependent_handles = 4,
    gc_join_rescan_dependent_handles = 5,
    gc_join_scan_sizedref_done = 6,
    gc_join_null_dead_short_weak = 7,
    gc_join_scan_finalization = 8,
    gc_join_null_dead_long_weak = 9,
    gc_join_null_dead_syncblk = 10,
    gc_join_decide_on_compaction = 11,
    gc_join_rearrange_segs_compaction = 12,
    gc_join_adjust_handle_age_compact = 13,
    gc_join_adjust_handle_age_sweep = 14,
    gc_join_begin_relocate_phase = 15,
    gc_join_relocate_phase_done = 16,
    gc_join_verify_objects_done = 17,
    gc_join_start_bgc = 18,
    gc_join_restart_ee = 19,
    gc_join_concurrent_overflow = 20,
    gc_join_suspend_ee = 21,
    gc_join_bgc_after_ephemeral = 22,
    gc_join_allow_fgc = 23,
    gc_join_bgc_sweep = 24,
    gc_join_suspend_ee_verify = 25,
    gc_join_restart_ee_verify = 26,
    gc_join_set_state_free = 27,
    gc_r_join_update_card_bundle = 28,
    gc_join_after_absorb = 29,
    gc_join_verify_copy_table = 30,
    gc_join_after_reset = 31,
    gc_join_after_ephemeral_sweep = 32,
    gc_join_after_profiler_heap_walk = 33,
    gc_join_minimal_gc = 34,
    gc_join_after_commit_soh_no_gc = 35,
    gc_join_expand_loh_no_gc = 36,
    gc_join_final_no_gc = 37,
    gc_join_disable_software_write_watch = 38,
    gc_join_max = 39
};

enum gc_join_flavor
{
    join_flavor_server_gc = 0,
    join_flavor_bgc = 1
};

#define first_thread_arrived 2
#pragma warning(push)
#pragma warning(disable:4324) // don't complain if DECLSPEC_ALIGN actually pads
struct DECLSPEC_ALIGN(HS_CACHE_LINE_SIZE) join_structure
{
    // Shared non volatile keep on separate line to prevent eviction
    int n_threads;

    // Keep polling/wait structures on separate line write once per join
    DECLSPEC_ALIGN(HS_CACHE_LINE_SIZE)
    GCEvent joined_event[3]; // the last event in the array is only used for first_thread_arrived.
    Volatile<int> lock_color;
    VOLATILE(BOOL) wait_done;
    VOLATILE(BOOL) joined_p;

    // Keep volatile counted locks on separate cache line write many per join
    DECLSPEC_ALIGN(HS_CACHE_LINE_SIZE)
    VOLATILE(int) join_lock;
    VOLATILE(int) r_join_lock;

};
#pragma warning(pop)

enum join_type
{
    type_last_join = 0,
    type_join = 1,
    type_restart = 2,
    type_first_r_join = 3,
    type_r_join = 4
};

enum join_time
{
    time_start = 0,
    time_end = 1
};

enum join_heap_index
{
    join_heap_restart = 100,
    join_heap_r_restart = 200
};

struct join_event
{
    uint32_t heap;
    join_time time;
    join_type type;
};

class t_join
{
    join_structure join_struct;

    int id;
    gc_join_flavor flavor;

#ifdef JOIN_STATS
    uint64_t start[MAX_SUPPORTED_CPUS], end[MAX_SUPPORTED_CPUS], start_seq;
    // remember join id and last thread to arrive so restart can use these
    int thd;
    // we want to print statistics every 10 seconds - this is to remember the start of the 10 sec interval
    uint32_t start_tick;
    // counters for joins, in 1000's of clock cycles
    uint64_t elapsed_total[gc_join_max], wake_total[gc_join_max], seq_loss_total[gc_join_max], par_loss_total[gc_join_max], in_join_total[gc_join_max];
#endif //JOIN_STATS

public:
    BOOL init (int n_th, gc_join_flavor f)
    {
        dprintf (JOIN_LOG, ("Initializing join structure"));
        join_struct.n_threads = n_th;
        join_struct.lock_color = 0;
        for (int i = 0; i < 3; i++)
        {
            if (!join_struct.joined_event[i].IsValid())
            {
                join_struct.joined_p = FALSE;
                dprintf (JOIN_LOG, ("Creating join event %d", i));
                // TODO - changing this to a non OS event
                // because this is also used by BGC threads which are
                // managed threads and WaitEx does not allow you to wait
                // for an OS event on a managed thread.
                // But we are not sure if this plays well in the hosting
                // environment.
                //join_struct.joined_event[i].CreateOSManualEventNoThrow(FALSE);
                if (!join_struct.joined_event[i].CreateManualEventNoThrow(FALSE))
                    return FALSE;
            }
        }
        join_struct.join_lock = join_struct.n_threads;
        join_struct.r_join_lock = join_struct.n_threads;
        join_struct.wait_done = FALSE;
        flavor = f;

#ifdef JOIN_STATS
        start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
#endif //JOIN_STATS

        return TRUE;
    }

    void destroy ()
    {
        dprintf (JOIN_LOG, ("Destroying join structure"));
        for (int i = 0; i < 3; i++)
        {
            if (join_struct.joined_event[i].IsValid())
                join_struct.joined_event[i].CloseEvent();
        }
    }

    inline void fire_event (int heap, join_time time, join_type type, int join_id)
    {
        FIRE_EVENT(GCJoin_V2, heap, time, type, join_id);
    }

    void join (gc_heap* gch, int join_id)
    {
#ifdef JOIN_STATS
        // parallel execution ends here
        end[gch->heap_number] = get_ts();
#endif //JOIN_STATS

        assert (!join_struct.joined_p);
        int color = join_struct.lock_color.LoadWithoutBarrier();

        if (Interlocked::Decrement(&join_struct.join_lock) != 0)
        {
            dprintf (JOIN_LOG, ("join%d(%d): Join() Waiting...join_lock is now %d",
                flavor, join_id, (int32_t)(join_struct.join_lock)));

            fire_event (gch->heap_number, time_start, type_join, join_id);

            //busy wait around the color
            if (color == join_struct.lock_color.LoadWithoutBarrier())
            {
respin:
                int spin_count = 128 * yp_spin_count_unit;
                for (int j = 0; j < spin_count; j++)
                {
                    if (color != join_struct.lock_color.LoadWithoutBarrier())
                    {
                        break;
                    }
                    YieldProcessor();           // indicate to the processor that we are spinning
                }

                // we've spun, and if color still hasn't changed, fall into hard wait
                if (color == join_struct.lock_color.LoadWithoutBarrier())
                {
                    dprintf (JOIN_LOG, ("join%d(%d): Join() hard wait on reset event %d, join_lock is now %d",
                        flavor, join_id, color, (int32_t)(join_struct.join_lock)));

                    //Thread* current_thread = GCToEEInterface::GetThread();
                    //BOOL cooperative_mode = gc_heap::enable_preemptive ();
                    uint32_t dwJoinWait = join_struct.joined_event[color].Wait(INFINITE, FALSE);
                    //gc_heap::disable_preemptive (cooperative_mode);

                    if (dwJoinWait != WAIT_OBJECT_0)
                    {
                        STRESS_LOG1 (LF_GC, LL_FATALERROR, "joined event wait failed with code: %Ix", dwJoinWait);
                        FATAL_GC_ERROR ();
                    }
                }

                // avoid race due to the thread about to reset the event (occasionally) being preempted before ResetEvent()
                if (color == join_struct.lock_color.LoadWithoutBarrier())
                {
                    goto respin;
                }

                dprintf (JOIN_LOG, ("join%d(%d): Join() done, join_lock is %d",
                    flavor, join_id, (int32_t)(join_struct.join_lock)));
            }

            fire_event (gch->heap_number, time_end, type_join, join_id);

#ifdef JOIN_STATS
            // parallel execution starts here
            start[gch->heap_number] = get_ts();
            Interlocked::ExchangeAdd(&in_join_total[join_id], (start[gch->heap_number] - end[gch->heap_number]));
#endif //JOIN_STATS
        }
        else
        {
            fire_event (gch->heap_number, time_start, type_last_join, join_id);

            join_struct.joined_p = TRUE;
            dprintf (JOIN_LOG, ("join%d(%d): Last thread to complete the join, setting id", flavor, join_id));
            join_struct.joined_event[!color].Reset();
            id = join_id;
            // this one is alone so it can proceed
#ifdef JOIN_STATS
            // remember the join id, the last thread arriving, the start of the sequential phase,
            // and keep track of the cycles spent waiting in the join
            thd = gch->heap_number;
            start_seq = get_ts();
            Interlocked::ExchangeAdd(&in_join_total[join_id], (start_seq - end[gch->heap_number]));
#endif //JOIN_STATS
        }
    }

    // Reverse join - first thread gets here does the work; other threads will only proceed
    // after the work is done.
    // Note that you cannot call this twice in a row on the same thread. Plus there's no
    // need to call it twice in row - you should just merge the work.
    BOOL r_join (gc_heap* gch, int join_id)
    {

        if (join_struct.n_threads == 1)
        {
            return TRUE;
        }

        if (Interlocked::CompareExchange(&join_struct.r_join_lock, 0, join_struct.n_threads) == 0)
        {
            if (!join_struct.wait_done)
            {
                dprintf (JOIN_LOG, ("r_join() Waiting..."));

                fire_event (gch->heap_number, time_start, type_join, join_id);

                //busy wait around the color
                if (!join_struct.wait_done)
                {
        respin:
                    int spin_count = 256 * yp_spin_count_unit;
                    for (int j = 0; j < spin_count; j++)
                    {
                        if (join_struct.wait_done)
                        {
                            break;
                        }
                        YieldProcessor();           // indicate to the processor that we are spinning
                    }

                    // we've spun, and if color still hasn't changed, fall into hard wait
                    if (!join_struct.wait_done)
                    {
                        dprintf (JOIN_LOG, ("Join() hard wait on reset event %d", first_thread_arrived));
                        uint32_t dwJoinWait = join_struct.joined_event[first_thread_arrived].Wait(INFINITE, FALSE);
                        if (dwJoinWait != WAIT_OBJECT_0)
                        {
                            STRESS_LOG1 (LF_GC, LL_FATALERROR, "joined event wait failed with code: %Ix", dwJoinWait);
                            FATAL_GC_ERROR ();
                        }
                    }

                    // avoid race due to the thread about to reset the event (occasionally) being preempted before ResetEvent()
                    if (!join_struct.wait_done)
                    {
                        goto respin;
                    }

                    dprintf (JOIN_LOG, ("r_join() done"));
                }

                fire_event (gch->heap_number, time_end, type_join, join_id);
            }

            return FALSE;
        }
        else
        {
            fire_event (gch->heap_number, time_start, type_first_r_join, join_id);
            return TRUE;
        }
    }

#ifdef JOIN_STATS
    uint64_t get_ts()
    {
        return GCToOSInterface::QueryPerformanceCounter();
    }

    void start_ts (gc_heap* gch)
    {
        // parallel execution ends here
        start[gch->heap_number] = get_ts();
    }
#endif //JOIN_STATS

    void restart()
    {
#ifdef JOIN_STATS
        uint64_t elapsed_seq = get_ts() - start_seq;
        uint64_t max = 0, sum = 0, wake = 0;
        uint64_t min_ts = start[0];
        for (int i = 1; i < join_struct.n_threads; i++)
        {
            if(min_ts > start[i]) min_ts = start[i];
        }

        for (int i = 0; i < join_struct.n_threads; i++)
        {
            uint64_t wake_delay = start[i] - min_ts;
            uint64_t elapsed = end[i] - start[i];
            if (max < elapsed)
                max = elapsed;
            sum += elapsed;
            wake += wake_delay;
        }
        uint64_t seq_loss = (join_struct.n_threads - 1)*elapsed_seq;
        uint64_t par_loss = join_struct.n_threads*max - sum;
        double efficiency = 0.0;
        if (max > 0)
            efficiency = sum*100.0/(join_struct.n_threads*max);

        const double ts_scale = 1e-6;

        // enable this printf to get statistics on each individual join as it occurs
//      printf("join #%3d  seq_loss = %5g   par_loss = %5g  efficiency = %3.0f%%\n", join_id, ts_scale*seq_loss, ts_scale*par_loss, efficiency);

        elapsed_total[id] += sum;
        wake_total[id] += wake;
        seq_loss_total[id] += seq_loss;
        par_loss_total[id] += par_loss;

        // every 10 seconds, print a summary of the time spent in each type of join
        if (GCToOSInterface::GetLowPrecisionTimeStamp() - start_tick > 10*1000)
        {
            printf("**** summary *****\n");
            for (int i = 0; i < 16; i++)
            {
                printf("join #%3d  elapsed_total = %8g wake_loss = %8g seq_loss = %8g  par_loss = %8g  in_join_total = %8g\n",
                   i,
                   ts_scale*elapsed_total[i],
                   ts_scale*wake_total[i],
                   ts_scale*seq_loss_total[i],
                   ts_scale*par_loss_total[i],
                   ts_scale*in_join_total[i]);
                elapsed_total[i] = wake_total[i] = seq_loss_total[i] = par_loss_total[i] = in_join_total[i] = 0;
            }
            start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
        }
#endif //JOIN_STATS

        fire_event (join_heap_restart, time_start, type_restart, -1);
        assert (join_struct.joined_p);
        join_struct.joined_p = FALSE;
        join_struct.join_lock = join_struct.n_threads;
        dprintf (JOIN_LOG, ("join%d(%d): Restarting from join: join_lock is %d", flavor, id, (int32_t)(join_struct.join_lock)));
//        printf("restart from join #%d at cycle %u from start of gc\n", join_id, GetCycleCount32() - gc_start);
        int color = join_struct.lock_color.LoadWithoutBarrier();
        join_struct.lock_color = !color;
        join_struct.joined_event[color].Set();

//        printf("Set joined_event %d\n", !join_struct.lock_color);

        fire_event (join_heap_restart, time_end, type_restart, -1);

#ifdef JOIN_STATS
        start[thd] = get_ts();
#endif //JOIN_STATS
    }

    BOOL joined()
    {
        dprintf (JOIN_LOG, ("join%d(%d): joined, join_lock is %d", flavor, id, (int32_t)(join_struct.join_lock)));
        return join_struct.joined_p;
    }

    void r_restart()
    {
        if (join_struct.n_threads != 1)
        {
            fire_event (join_heap_r_restart, time_start, type_restart, -1);
            join_struct.wait_done = TRUE;
            join_struct.joined_event[first_thread_arrived].Set();
            fire_event (join_heap_r_restart, time_end, type_restart, -1);
        }
    }

    void r_init()
    {
        if (join_struct.n_threads != 1)
        {
            join_struct.r_join_lock = join_struct.n_threads;
            join_struct.wait_done = FALSE;
            join_struct.joined_event[first_thread_arrived].Reset();
        }
    }
};

t_join gc_t_join;

#ifdef BACKGROUND_GC
t_join bgc_t_join;
#endif //BACKGROUND_GC

#endif // DACCESS_COMPILE

#endif //MULTIPLE_HEAPS

#define spin_and_switch(count_to_spin, expr) \
{ \
    for (int j = 0; j < count_to_spin; j++) \
    { \
        if (expr) \
        { \
            break;\
        } \
        YieldProcessor(); \
    } \
    if (!(expr)) \
    { \
        GCToOSInterface::YieldThread(0); \
    } \
}

#ifndef DACCESS_COMPILE
#ifdef BACKGROUND_GC

#define max_pending_allocs 64

class exclusive_sync
{
    // TODO - verify that this is the right syntax for Volatile.
    VOLATILE(uint8_t*) rwp_object;
    VOLATILE(int32_t) needs_checking;

    int spin_count;

    uint8_t cache_separator[HS_CACHE_LINE_SIZE - sizeof (int) - sizeof (int32_t)];

    // TODO - perhaps each object should be on its own cache line...
    VOLATILE(uint8_t*) alloc_objects[max_pending_allocs];

    int find_free_index ()
    {
        for (int i = 0; i < max_pending_allocs; i++)
        {
            if (alloc_objects [i] == (uint8_t*)0)
            {
                return i;
            }
        }

        return -1;
    }

public:
    void init()
    {
        spin_count = 32 * (g_num_processors - 1);
        rwp_object = 0;
        needs_checking = 0;
        for (int i = 0; i < max_pending_allocs; i++)
        {
            alloc_objects [i] = (uint8_t*)0;
        }
    }

    void check()
    {
        for (int i = 0; i < max_pending_allocs; i++)
        {
            if (alloc_objects [i] != (uint8_t*)0)
            {
                GCToOSInterface::DebugBreak();
            }
        }
    }

    void bgc_mark_set (uint8_t* obj)
    {
        dprintf (3, ("cm: probing %Ix", obj));
retry:
        if (Interlocked::CompareExchange(&needs_checking, 1, 0) == 0)
        {
            // If we spend too much time spending all the allocs,
            // consider adding a high water mark and scan up
            // to that; we'll need to interlock in done when
            // we update the high watermark.
            for (int i = 0; i < max_pending_allocs; i++)
            {
                if (obj == alloc_objects[i])
                {
                    needs_checking = 0;
                    dprintf (3, ("cm: will spin"));
                    spin_and_switch (spin_count, (obj != alloc_objects[i]));
                    goto retry;
                }
            }

            rwp_object = obj;
            needs_checking = 0;
            dprintf (3, ("cm: set %Ix", obj));
            return;
        }
        else
        {
            spin_and_switch (spin_count, (needs_checking == 0));
            goto retry;
        }
    }

    int loh_alloc_set (uint8_t* obj)
    {
        if (!gc_heap::cm_in_progress)
        {
            return -1;
        }

retry:
        dprintf (3, ("loh alloc: probing %Ix", obj));

        if (Interlocked::CompareExchange(&needs_checking, 1, 0) == 0)
        {
            if (obj == rwp_object)
            {
                needs_checking = 0;
                spin_and_switch (spin_count, (obj != rwp_object));
                goto retry;
            }
            else
            {
                int cookie = find_free_index();

                if (cookie != -1)
                {
                    alloc_objects[cookie] = obj;
                    needs_checking = 0;
                    //if (cookie >= 4)
                    //{
                    //    GCToOSInterface::DebugBreak();
                    //}

                    dprintf (3, ("loh alloc: set %Ix at %d", obj, cookie));
                    return cookie;
                }
                else
                {
                    needs_checking = 0;
                    dprintf (3, ("loh alloc: setting %Ix will spin to acquire a free index", obj));
                    spin_and_switch (spin_count, (find_free_index () != -1));
                    goto retry;
                }
            }
        }
        else
        {
            dprintf (3, ("loh alloc: will spin on checking %Ix", obj));
            spin_and_switch (spin_count, (needs_checking == 0));
            goto retry;
        }
    }

    void bgc_mark_done ()
    {
        dprintf (3, ("cm: release lock on %Ix", (uint8_t *)rwp_object));
        rwp_object = 0;
    }

    void loh_alloc_done_with_index (int index)
    {
        dprintf (3, ("loh alloc: release lock on %Ix based on %d", (uint8_t *)alloc_objects[index], index));
        assert ((index >= 0) && (index < max_pending_allocs));
        alloc_objects[index] = (uint8_t*)0;
    }

    void loh_alloc_done (uint8_t* obj)
    {
#ifdef BACKGROUND_GC
        if (!gc_heap::cm_in_progress)
        {
            return;
        }

        for (int i = 0; i < max_pending_allocs; i++)
        {
            if (alloc_objects [i] == obj)
            {
                dprintf (3, ("loh alloc: release lock on %Ix at %d", (uint8_t *)alloc_objects[i], i));
                alloc_objects[i] = (uint8_t*)0;
                return;
            }
        }
#endif //BACKGROUND_GC
    }
};

// Note that this class was written assuming just synchronization between
// one background GC thread and multiple user threads that might request
// an FGC - it does not take into account what kind of locks the multiple
// user threads might be holding at the time (eg, there could only be one
// user thread requesting an FGC because it needs to take gc_lock first)
// so you'll see checks that may not be necessary if you take those conditions
// into consideration.
//
// With the introduction of Server Background GC we no longer use this
// class to do synchronization between FGCs and BGC.
class recursive_gc_sync
{
    static VOLATILE(int32_t) foreground_request_count;//initial state 0
    static VOLATILE(BOOL) gc_background_running; //initial state FALSE
    static VOLATILE(int32_t) foreground_count; // initial state 0;
    static VOLATILE(uint32_t) foreground_gate; // initial state FALSE;
    static GCEvent foreground_complete;//Auto Reset
    static GCEvent foreground_allowed;//Auto Reset
public:
    static void begin_background();
    static void end_background();
    static void begin_foreground();
    static void end_foreground();
    BOOL allow_foreground ();
    static BOOL init();
    static void shutdown();
    static BOOL background_running_p() {return gc_background_running;}
};

VOLATILE(int32_t) recursive_gc_sync::foreground_request_count = 0;//initial state 0
VOLATILE(int32_t) recursive_gc_sync::foreground_count = 0; // initial state 0;
VOLATILE(BOOL) recursive_gc_sync::gc_background_running = FALSE; //initial state FALSE
VOLATILE(uint32_t) recursive_gc_sync::foreground_gate = 0;
GCEvent recursive_gc_sync::foreground_complete;//Auto Reset
GCEvent recursive_gc_sync::foreground_allowed;//Manual Reset

BOOL recursive_gc_sync::init ()
{
    foreground_request_count = 0;
    foreground_count = 0;
    gc_background_running = FALSE;
    foreground_gate = 0;

    if (!foreground_complete.CreateOSAutoEventNoThrow(FALSE))
    {
        goto error;
    }
    if (!foreground_allowed.CreateManualEventNoThrow(FALSE))
    {
        goto error;
    }
    return TRUE;

error:
    shutdown();
    return FALSE;

}

void recursive_gc_sync::shutdown()
{
    if (foreground_complete.IsValid())
        foreground_complete.CloseEvent();
    if (foreground_allowed.IsValid())
        foreground_allowed.CloseEvent();
}

void recursive_gc_sync::begin_background()
{
    dprintf (2, ("begin background"));
    foreground_request_count = 1;
    foreground_count = 1;
    foreground_allowed.Reset();
    gc_background_running = TRUE;
}
void recursive_gc_sync::end_background()
{
    dprintf (2, ("end background"));
    gc_background_running = FALSE;
    foreground_gate = 1;
    foreground_allowed.Set();
}

void recursive_gc_sync::begin_foreground()
{
    dprintf (2, ("begin_foreground"));

    bool cooperative_mode = false;
    if (gc_background_running)
    {
        gc_heap::fire_alloc_wait_event_begin (awr_fgc_wait_for_bgc);
        gc_heap::alloc_wait_event_p = TRUE;

try_again_top:

        Interlocked::Increment (&foreground_request_count);

try_again_no_inc:
        dprintf(2, ("Waiting sync gc point"));
        assert (foreground_allowed.IsValid());
        assert (foreground_complete.IsValid());

        cooperative_mode = gc_heap::enable_preemptive ();

        foreground_allowed.Wait(INFINITE, FALSE);

        dprintf(2, ("Waiting sync gc point is done"));

        gc_heap::disable_preemptive (cooperative_mode);

        if (foreground_gate)
        {
            Interlocked::Increment (&foreground_count);
            dprintf (2, ("foreground_count: %d", (int32_t)foreground_count));
            if (foreground_gate)
            {
                gc_heap::settings.concurrent = FALSE;
                return;
            }
            else
            {
                end_foreground();
                goto try_again_top;
            }
        }
        else
        {
            goto try_again_no_inc;
        }
    }
}

void recursive_gc_sync::end_foreground()
{
    dprintf (2, ("end_foreground"));
    if (gc_background_running)
    {
        Interlocked::Decrement (&foreground_request_count);
        dprintf (2, ("foreground_count before decrement: %d", (int32_t)foreground_count));
        if (Interlocked::Decrement (&foreground_count) == 0)
        {
            //c_write ((BOOL*)&foreground_gate, 0);
            // TODO - couldn't make the syntax work with Volatile<T>
            foreground_gate = 0;
            if (foreground_count == 0)
            {
                foreground_allowed.Reset ();
                dprintf(2, ("setting foreground complete event"));
                foreground_complete.Set();
            }
        }
    }
}

inline
BOOL recursive_gc_sync::allow_foreground()
{
    assert (gc_heap::settings.concurrent);
    dprintf (100, ("enter allow_foreground, f_req_count: %d, f_count: %d",
                   (int32_t)foreground_request_count, (int32_t)foreground_count));

    BOOL did_fgc = FALSE;

    //if we have suspended the EE, just return because
    //some thread could be waiting on this to proceed.
    if (!GCHeap::GcInProgress)
    {
        //TODO BACKGROUND_GC This is to stress the concurrency between
        //background and foreground
//        gc_heap::disallow_new_allocation (0);

        //GCToOSInterface::YieldThread(0);

        //END of TODO
        if (foreground_request_count != 0)
        {
            //foreground wants to run
            //save the important settings
            //TODO BACKGROUND_GC be more selective about the important settings.
            gc_mechanisms saved_settings = gc_heap::settings;
            do
            {
                did_fgc = TRUE;
                //c_write ((BOOL*)&foreground_gate, 1);
                // TODO - couldn't make the syntax work with Volatile<T>
                foreground_gate = 1;
                foreground_allowed.Set ();
                foreground_complete.Wait (INFINITE, FALSE);
            }while (/*foreground_request_count ||*/ foreground_gate);

            assert (!foreground_gate);

            //restore the important settings
            gc_heap::settings = saved_settings;
            GCHeap::GcCondemnedGeneration = gc_heap::settings.condemned_generation;
            //the background GC shouldn't be using gc_high and gc_low
            //gc_low = lowest_address;
            //gc_high = highest_address;
        }

        //TODO BACKGROUND_GC This is to stress the concurrency between
        //background and foreground
//        gc_heap::allow_new_allocation (0);
        //END of TODO
    }

    dprintf (100, ("leave allow_foreground"));
    assert (gc_heap::settings.concurrent);
    return did_fgc;
}

#endif //BACKGROUND_GC
#endif //DACCESS_COMPILE


#if  defined(COUNT_CYCLES)
#ifdef _MSC_VER
#pragma warning(disable:4035)
#endif //_MSC_VER

static
unsigned        GetCycleCount32()        // enough for about 40 seconds
{
__asm   push    EDX
__asm   _emit   0x0F
__asm   _emit   0x31
__asm   pop     EDX
};

#pragma warning(default:4035)

#endif //COUNT_CYCLES

#ifdef TIME_GC
int mark_time, plan_time, sweep_time, reloc_time, compact_time;
#endif //TIME_GC

#ifndef MULTIPLE_HEAPS

#endif // MULTIPLE_HEAPS

void reset_memory (uint8_t* o, size_t sizeo);

#ifdef WRITE_WATCH

#ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
static bool virtual_alloc_hardware_write_watch = false;
#endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP

static bool hardware_write_watch_capability = false;

#ifndef DACCESS_COMPILE

//check if the write watch APIs are supported.

void hardware_write_watch_api_supported()
{
    if (GCToOSInterface::SupportsWriteWatch())
    {
        hardware_write_watch_capability = true;
        dprintf (2, ("WriteWatch supported"));
    }
    else
    {
        dprintf (2,("WriteWatch not supported"));
    }
}

#endif //!DACCESS_COMPILE

inline bool can_use_hardware_write_watch()
{
    return hardware_write_watch_capability;
}

inline bool can_use_write_watch_for_gc_heap()
{
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
    return true;
#else // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
    return can_use_hardware_write_watch();
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
}

inline bool can_use_write_watch_for_card_table()
{
#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
    return true;
#else
    return can_use_hardware_write_watch();
#endif
}

#else
#define mem_reserve (MEM_RESERVE)
#endif //WRITE_WATCH

//check if the low memory notification is supported

#ifndef DACCESS_COMPILE

void WaitLongerNoInstru (int i)
{
    // every 8th attempt:
    bool bToggleGC = GCToEEInterface::EnablePreemptiveGC();

    // if we're waiting for gc to finish, we should block immediately
    if (g_fSuspensionPending == 0)
    {
        if  (g_num_processors > 1)
        {
            YieldProcessor();           // indicate to the processor that we are spinning
            if  (i & 0x01f)
                GCToOSInterface::YieldThread (0);
            else
                GCToOSInterface::Sleep (5);
        }
        else
            GCToOSInterface::Sleep (5);
    }

    // If CLR is hosted, a thread may reach here while it is in preemptive GC mode,
    // or it has no Thread object, in order to force a task to yield, or to triger a GC.
    // It is important that the thread is going to wait for GC.  Otherwise the thread
    // is in a tight loop.  If the thread has high priority, the perf is going to be very BAD.
    if (bToggleGC)
    {
#ifdef _DEBUG
        // In debug builds, all enter_spin_lock operations go through this code.  If a GC has
        // started, it is important to block until the GC thread calls set_gc_done (since it is
        // guaranteed to have cleared g_TrapReturningThreads by this point).  This avoids livelock
        // conditions which can otherwise occur if threads are allowed to spin in this function
        // (and therefore starve the GC thread) between the point when the GC thread sets the
        // WaitForGC event and the point when the GC thread clears g_TrapReturningThreads.
        if (gc_heap::gc_started)
        {
            gc_heap::wait_for_gc_done();
        }
#endif // _DEBUG
        GCToEEInterface::DisablePreemptiveGC();
    }
    else if (g_fSuspensionPending > 0)
    {
        g_theGCHeap->WaitUntilGCComplete();
    }
}

inline
static void safe_switch_to_thread()
{
    bool cooperative_mode = gc_heap::enable_preemptive();

    GCToOSInterface::YieldThread(0);

    gc_heap::disable_preemptive(cooperative_mode);
}

//
// We need the following methods to have volatile arguments, so that they can accept
// raw pointers in addition to the results of the & operator on Volatile<T>.
//
inline
static void enter_spin_lock_noinstru (RAW_KEYWORD(volatile) int32_t* lock)
{
retry:

    if (Interlocked::CompareExchange(lock, 0, -1) >= 0)
    {
        unsigned int i = 0;
        while (VolatileLoad(lock) >= 0)
        {
            if ((++i & 7) && !IsGCInProgress())
            {
                if  (g_num_processors > 1)
                {
#ifndef MULTIPLE_HEAPS
                    int spin_count = 32 * yp_spin_count_unit;
#else //!MULTIPLE_HEAPS
                    int spin_count = yp_spin_count_unit;
#endif //!MULTIPLE_HEAPS
                    for (int j = 0; j < spin_count; j++)
                    {
                        if  (VolatileLoad(lock) < 0 || IsGCInProgress())
                            break;
                        YieldProcessor();           // indicate to the processor that we are spinning
                    }
                    if  (VolatileLoad(lock) >= 0 && !IsGCInProgress())
                    {
                        safe_switch_to_thread();
                    }
                }
                else
                {
                    safe_switch_to_thread();
                }
            }
            else
            {
                WaitLongerNoInstru(i);
            }
        }
        goto retry;
    }
}

inline
static BOOL try_enter_spin_lock_noinstru(RAW_KEYWORD(volatile) int32_t* lock)
{
    return (Interlocked::CompareExchange(&*lock, 0, -1) < 0);
}

inline
static void leave_spin_lock_noinstru (RAW_KEYWORD(volatile) int32_t* lock)
{
    VolatileStore<int32_t>((int32_t*)lock, -1);
}

#ifdef _DEBUG

inline
static void enter_spin_lock(GCSpinLock *pSpinLock)
{
    enter_spin_lock_noinstru(&pSpinLock->lock);
    assert (pSpinLock->holding_thread == (Thread*)-1);
    pSpinLock->holding_thread = GCToEEInterface::GetThread();
}

inline
static BOOL try_enter_spin_lock(GCSpinLock *pSpinLock)
{
    BOOL ret = try_enter_spin_lock_noinstru(&pSpinLock->lock);
    if (ret)
        pSpinLock->holding_thread = GCToEEInterface::GetThread();
    return ret;
}

inline
static void leave_spin_lock(GCSpinLock *pSpinLock)
{
    bool gc_thread_p = GCToEEInterface::WasCurrentThreadCreatedByGC();
//    _ASSERTE((pSpinLock->holding_thread == GCToEEInterface::GetThread()) || gc_thread_p || pSpinLock->released_by_gc_p);
    pSpinLock->released_by_gc_p = gc_thread_p;
    pSpinLock->holding_thread = (Thread*) -1;
    if (pSpinLock->lock != -1)
        leave_spin_lock_noinstru(&pSpinLock->lock);
}

#define ASSERT_HOLDING_SPIN_LOCK(pSpinLock) \
    _ASSERTE((pSpinLock)->holding_thread == GCToEEInterface::GetThread());

#define ASSERT_NOT_HOLDING_SPIN_LOCK(pSpinLock) \
    _ASSERTE((pSpinLock)->holding_thread != GCToEEInterface::GetThread());

#else //_DEBUG

//In the concurrent version, the Enable/DisablePreemptiveGC is optional because
//the gc thread call WaitLonger.
void WaitLonger (int i
#ifdef SYNCHRONIZATION_STATS
    , GCSpinLock* spin_lock
#endif //SYNCHRONIZATION_STATS
    )
{
#ifdef SYNCHRONIZATION_STATS
    (spin_lock->num_wait_longer)++;
#endif //SYNCHRONIZATION_STATS

    // every 8th attempt:
    bool bToggleGC = GCToEEInterface::EnablePreemptiveGC();
    assert (bToggleGC);

    // if we're waiting for gc to finish, we should block immediately
    if (!gc_heap::gc_started)
    {
#ifdef SYNCHRONIZATION_STATS
        (spin_lock->num_switch_thread_w)++;
#endif //SYNCHRONIZATION_STATS
        if  (g_num_processors > 1)
        {
            YieldProcessor();           // indicate to the processor that we are spinning
            if  (i & 0x01f)
                GCToOSInterface::YieldThread (0);
            else
                GCToOSInterface::Sleep (5);
        }
        else
            GCToOSInterface::Sleep (5);
    }

    // If CLR is hosted, a thread may reach here while it is in preemptive GC mode,
    // or it has no Thread object, in order to force a task to yield, or to triger a GC.
    // It is important that the thread is going to wait for GC.  Otherwise the thread
    // is in a tight loop.  If the thread has high priority, the perf is going to be very BAD.
    if (gc_heap::gc_started)
    {
        gc_heap::wait_for_gc_done();
    }

    if (bToggleGC)
    {
#ifdef SYNCHRONIZATION_STATS
        (spin_lock->num_disable_preemptive_w)++;
#endif //SYNCHRONIZATION_STATS
        GCToEEInterface::DisablePreemptiveGC();
    }
}

inline
static void enter_spin_lock (GCSpinLock* spin_lock)
{
retry:

    if (Interlocked::CompareExchange(&spin_lock->lock, 0, -1) >= 0)
    {
        unsigned int i = 0;
        while (spin_lock->lock >= 0)
        {
            if ((++i & 7) && !gc_heap::gc_started)
            {
                if  (g_num_processors > 1)
                {
#ifndef MULTIPLE_HEAPS
                    int spin_count = 32 * yp_spin_count_unit;
#else //!MULTIPLE_HEAPS
                    int spin_count = yp_spin_count_unit;
#endif //!MULTIPLE_HEAPS
                    for (int j = 0; j < spin_count; j++)
                    {
                        if  (spin_lock->lock < 0 || gc_heap::gc_started)
                            break;
                        YieldProcessor();           // indicate to the processor that we are spinning
                    }
                    if  (spin_lock->lock >= 0 && !gc_heap::gc_started)
                    {
#ifdef SYNCHRONIZATION_STATS
                        (spin_lock->num_switch_thread)++;
#endif //SYNCHRONIZATION_STATS
                        bool cooperative_mode = gc_heap::enable_preemptive ();

                        GCToOSInterface::YieldThread(0);

                        gc_heap::disable_preemptive (cooperative_mode);
                    }
                }
                else
                    GCToOSInterface::YieldThread(0);
            }
            else
            {
                WaitLonger(i
#ifdef SYNCHRONIZATION_STATS
                        , spin_lock
#endif //SYNCHRONIZATION_STATS
                    );
            }
        }
        goto retry;
    }
}

inline BOOL try_enter_spin_lock(GCSpinLock* spin_lock)
{
    return (Interlocked::CompareExchange(&spin_lock->lock, 0, -1) < 0);
}

inline
static void leave_spin_lock (GCSpinLock * spin_lock)
{
    spin_lock->lock = -1;
}

#define ASSERT_HOLDING_SPIN_LOCK(pSpinLock)

#endif //_DEBUG

bool gc_heap::enable_preemptive ()
{
    return GCToEEInterface::EnablePreemptiveGC();
}

void gc_heap::disable_preemptive (bool restore_cooperative)
{
    if (restore_cooperative)
    {
        GCToEEInterface::DisablePreemptiveGC();
    }
}

#endif // !DACCESS_COMPILE

typedef void **  PTR_PTR;
//This function clears a piece of memory
// size has to be Dword aligned

inline
void memclr ( uint8_t* mem, size_t size)
{
    dprintf (3, ("MEMCLR: %Ix, %d", mem, size));
    assert ((size & (sizeof(PTR_PTR)-1)) == 0);
    assert (sizeof(PTR_PTR) == DATA_ALIGNMENT);

#if 0
    // The compiler will recognize this pattern and replace it with memset call. We can as well just call
    // memset directly to make it obvious what's going on.
    PTR_PTR m = (PTR_PTR) mem;
    for (size_t i = 0; i < size / sizeof(PTR_PTR); i++)
        *(m++) = 0;
#endif

    memset (mem, 0, size);
}

void memcopy (uint8_t* dmem, uint8_t* smem, size_t size)
{
    const size_t sz4ptr = sizeof(PTR_PTR)*4;
    const size_t sz2ptr = sizeof(PTR_PTR)*2;
    const size_t sz1ptr = sizeof(PTR_PTR)*1;

    // size must be a multiple of the pointer size
    assert ((size & (sizeof (PTR_PTR)-1)) == 0);
    assert (sizeof(PTR_PTR) == DATA_ALIGNMENT);

    // copy in groups of four pointer sized things at a time
    if (size >= sz4ptr)
    {
        do
        {
            ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0];
            ((PTR_PTR)dmem)[1] = ((PTR_PTR)smem)[1];
            ((PTR_PTR)dmem)[2] = ((PTR_PTR)smem)[2];
            ((PTR_PTR)dmem)[3] = ((PTR_PTR)smem)[3];
            dmem += sz4ptr;
            smem += sz4ptr;
        }
        while ((size -= sz4ptr) >= sz4ptr);
    }

    // still two pointer sized things or more left to copy?
    if (size & sz2ptr)
    {
        ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0];
        ((PTR_PTR)dmem)[1] = ((PTR_PTR)smem)[1];
        dmem += sz2ptr;
        smem += sz2ptr;
    }

    // still one pointer sized thing left to copy?
    if (size & sz1ptr)
    {
        ((PTR_PTR)dmem)[0] = ((PTR_PTR)smem)[0];
        // dmem += sz1ptr;
        // smem += sz1ptr;
    }

}

inline
ptrdiff_t round_down (ptrdiff_t add, int pitch)
{
    return ((add / pitch) * pitch);
}

#if defined(FEATURE_STRUCTALIGN) && defined(RESPECT_LARGE_ALIGNMENT)
// FEATURE_STRUCTALIGN allows the compiler to dictate the alignment,
// i.e, if a larger alignment matters or is beneficial, the compiler
// generated info tells us so.  RESPECT_LARGE_ALIGNMENT is just the
// converse - it's a heuristic for the GC to use a larger alignment.
#error FEATURE_STRUCTALIGN should imply !RESPECT_LARGE_ALIGNMENT
#endif

#if defined(FEATURE_STRUCTALIGN) && defined(FEATURE_LOH_COMPACTION)
#error FEATURE_STRUCTALIGN and FEATURE_LOH_COMPACTION are mutually exclusive
#endif

#if defined(GROWABLE_SEG_MAPPING_TABLE) && !defined(SEG_MAPPING_TABLE)
#error if GROWABLE_SEG_MAPPING_TABLE is defined, SEG_MAPPING_TABLE must be defined
#endif

inline
BOOL same_large_alignment_p (uint8_t* p1, uint8_t* p2)
{
#ifdef RESPECT_LARGE_ALIGNMENT
    return ((((size_t)p1 ^ (size_t)p2) & 7) == 0);
#else
    UNREFERENCED_PARAMETER(p1);
    UNREFERENCED_PARAMETER(p2);
    return TRUE;
#endif //RESPECT_LARGE_ALIGNMENT
}

inline
size_t switch_alignment_size (BOOL already_padded_p)
{
    if (already_padded_p)
        return DATA_ALIGNMENT;
    else
        return (Align (min_obj_size) +((Align (min_obj_size)&DATA_ALIGNMENT)^DATA_ALIGNMENT));
}


#ifdef FEATURE_STRUCTALIGN
void set_node_aligninfo (uint8_t *node, int requiredAlignment, ptrdiff_t pad);
void clear_node_aligninfo (uint8_t *node);
#else // FEATURE_STRUCTALIGN
#define node_realigned(node)    (((plug_and_reloc*)(node))[-1].reloc & 1)
void set_node_realigned (uint8_t* node);
void clear_node_realigned(uint8_t* node);
#endif // FEATURE_STRUCTALIGN

inline
size_t AlignQword (size_t nbytes)
{
#ifdef FEATURE_STRUCTALIGN
    // This function is used to align everything on the large object
    // heap to an 8-byte boundary, to reduce the number of unaligned
    // accesses to (say) arrays of doubles.  With FEATURE_STRUCTALIGN,
    // the compiler dictates the optimal alignment instead of having
    // a heuristic in the GC.
    return Align (nbytes);
#else // FEATURE_STRUCTALIGN
    return (nbytes + 7) & ~7;
#endif // FEATURE_STRUCTALIGN
}

inline
BOOL Aligned (size_t n)
{
    return (n & ALIGNCONST) == 0;
}

#define OBJECT_ALIGNMENT_OFFSET (sizeof(MethodTable *))

#ifdef FEATURE_STRUCTALIGN
#define MAX_STRUCTALIGN OS_PAGE_SIZE
#else // FEATURE_STRUCTALIGN
#define MAX_STRUCTALIGN 0
#endif // FEATURE_STRUCTALIGN

#ifdef FEATURE_STRUCTALIGN
inline
ptrdiff_t AdjustmentForMinPadSize(ptrdiff_t pad, int requiredAlignment)
{
    // The resulting alignpad must be either 0 or at least min_obj_size.
    // Note that by computing the following difference on unsigned types,
    // we can do the range check 0 < alignpad < min_obj_size with a
    // single conditional branch.
    if ((size_t)(pad - DATA_ALIGNMENT) < Align (min_obj_size) - DATA_ALIGNMENT)
    {
        return requiredAlignment;
    }
    return 0;
}

inline
uint8_t* StructAlign (uint8_t* origPtr, int requiredAlignment, ptrdiff_t alignmentOffset=OBJECT_ALIGNMENT_OFFSET)
{
    // required alignment must be a power of two
    _ASSERTE(((size_t)origPtr & ALIGNCONST) == 0);
    _ASSERTE(((requiredAlignment - 1) & requiredAlignment) == 0);
    _ASSERTE(requiredAlignment >= sizeof(void *));
    _ASSERTE(requiredAlignment <= MAX_STRUCTALIGN);

    // When this method is invoked for individual objects (i.e., alignmentOffset
    // is just the size of the PostHeader), what needs to be aligned when
    // we're done is the pointer to the payload of the object (which means
    // the actual resulting object pointer is typically not aligned).

    uint8_t* result = (uint8_t*)Align ((size_t)origPtr + alignmentOffset, requiredAlignment-1) - alignmentOffset;
    ptrdiff_t alignpad = result - origPtr;

    return result + AdjustmentForMinPadSize (alignpad, requiredAlignment);
}

inline
ptrdiff_t ComputeStructAlignPad (uint8_t* plug, int requiredAlignment, size_t alignmentOffset=OBJECT_ALIGNMENT_OFFSET)
{
    return StructAlign (plug, requiredAlignment, alignmentOffset) - plug;
}

BOOL IsStructAligned (uint8_t *ptr, int requiredAlignment)
{
    return StructAlign (ptr, requiredAlignment) == ptr;
}

inline
ptrdiff_t ComputeMaxStructAlignPad (int requiredAlignment)
{
    if (requiredAlignment == DATA_ALIGNMENT)
        return 0;
    // Since a non-zero alignment padding cannot be less than min_obj_size (so we can fit the
    // alignment padding object), the worst-case alignment padding is correspondingly larger
    // than the required alignment.
    return requiredAlignment + Align (min_obj_size) - DATA_ALIGNMENT;
}

inline
ptrdiff_t ComputeMaxStructAlignPadLarge (int requiredAlignment)
{
    if (requiredAlignment <= get_alignment_constant (TRUE)+1)
        return 0;
    // This is the same as ComputeMaxStructAlignPad, except that in addition to leaving space
    // for padding before the actual object, it also leaves space for filling a gap after the
    // actual object.  This is needed on the large object heap, as the outer allocation functions
    // don't operate on an allocation context (which would have left space for the final gap).
    return requiredAlignment + Align (min_obj_size) * 2 - DATA_ALIGNMENT;
}

uint8_t* gc_heap::pad_for_alignment (uint8_t* newAlloc, int requiredAlignment, size_t size, alloc_context* acontext)
{
    uint8_t* alignedPtr = StructAlign (newAlloc, requiredAlignment);
    if (alignedPtr != newAlloc) {
        make_unused_array (newAlloc, alignedPtr - newAlloc);
    }
    acontext->alloc_ptr = alignedPtr + Align (size);
    return alignedPtr;
}

uint8_t* gc_heap::pad_for_alignment_large (uint8_t* newAlloc, int requiredAlignment, size_t size)
{
    uint8_t* alignedPtr = StructAlign (newAlloc, requiredAlignment);
    if (alignedPtr != newAlloc) {
        make_unused_array (newAlloc, alignedPtr - newAlloc);
    }
    if (alignedPtr < newAlloc + ComputeMaxStructAlignPadLarge (requiredAlignment)) {
        make_unused_array (alignedPtr + AlignQword (size), newAlloc + ComputeMaxStructAlignPadLarge (requiredAlignment) - alignedPtr);
    }
    return alignedPtr;
}
#else // FEATURE_STRUCTALIGN
#define ComputeMaxStructAlignPad(requiredAlignment) 0
#define ComputeMaxStructAlignPadLarge(requiredAlignment) 0
#endif // FEATURE_STRUCTALIGN

//CLR_SIZE  is the max amount of bytes from gen0 that is set to 0 in one chunk
#ifdef SERVER_GC
#define CLR_SIZE ((size_t)(8*1024))
#else //SERVER_GC
#define CLR_SIZE ((size_t)(8*1024))
#endif //SERVER_GC

#define END_SPACE_AFTER_GC (loh_size_threshold + MAX_STRUCTALIGN)

#ifdef BACKGROUND_GC
#define SEGMENT_INITIAL_COMMIT (2*OS_PAGE_SIZE)
#else
#define SEGMENT_INITIAL_COMMIT (OS_PAGE_SIZE)
#endif //BACKGROUND_GC

// This is always power of 2.
const size_t min_segment_size_hard_limit = 1024*1024*16;

inline
size_t align_on_segment_hard_limit (size_t add)
{
    return ((size_t)(add + (min_segment_size_hard_limit - 1)) & ~(min_segment_size_hard_limit - 1));
}

#ifdef SERVER_GC

#ifdef BIT64

#define INITIAL_ALLOC ((size_t)((size_t)4*1024*1024*1024))
#define LHEAP_ALLOC   ((size_t)(1024*1024*256))

#else

#define INITIAL_ALLOC ((size_t)(1024*1024*64))
#define LHEAP_ALLOC   ((size_t)(1024*1024*32))

#endif  // BIT64

#else //SERVER_GC

#ifdef BIT64

#define INITIAL_ALLOC ((size_t)(1024*1024*256))
#define LHEAP_ALLOC   ((size_t)(1024*1024*128))

#else

#define INITIAL_ALLOC ((size_t)(1024*1024*16))
#define LHEAP_ALLOC   ((size_t)(1024*1024*16))

#endif  // BIT64

#endif //SERVER_GC

//amount in bytes of the etw allocation tick
const size_t etw_allocation_tick = 100*1024;

const size_t low_latency_alloc = 256*1024;

const size_t fgn_check_quantum = 2*1024*1024;

#ifdef MH_SC_MARK
const int max_snoop_level = 128;
#endif //MH_SC_MARK


#ifdef CARD_BUNDLE
//threshold of heap size to turn on card bundles.
#define SH_TH_CARD_BUNDLE  (40*1024*1024)
#define MH_TH_CARD_BUNDLE  (180*1024*1024)
#endif //CARD_BUNDLE

#define GC_EPHEMERAL_DECOMMIT_TIMEOUT 5000

inline
size_t align_on_page (size_t add)
{
    return ((add + OS_PAGE_SIZE - 1) & ~((size_t)OS_PAGE_SIZE - 1));
}

inline
uint8_t* align_on_page (uint8_t* add)
{
    return (uint8_t*)align_on_page ((size_t) add);
}

inline
size_t align_lower_page (size_t add)
{
    return (add & ~((size_t)OS_PAGE_SIZE - 1));
}

inline
uint8_t* align_lower_page (uint8_t* add)
{
    return (uint8_t*)align_lower_page ((size_t)add);
}

inline
size_t align_write_watch_lower_page (size_t add)
{
    return (add & ~(WRITE_WATCH_UNIT_SIZE - 1));
}

inline
uint8_t* align_write_watch_lower_page (uint8_t* add)
{
    return (uint8_t*)align_lower_page ((size_t)add);
}


inline
BOOL power_of_two_p (size_t integer)
{
    return !(integer & (integer-1));
}

inline
BOOL oddp (size_t integer)
{
    return (integer & 1) != 0;
}

// we only ever use this for WORDs.
size_t logcount (size_t word)
{
    //counts the number of high bits in a 16 bit word.
    assert (word < 0x10000);
    size_t count;
    count = (word & 0x5555) + ( (word >> 1 ) & 0x5555);
    count = (count & 0x3333) + ( (count >> 2) & 0x3333);
    count = (count & 0x0F0F) + ( (count >> 4) & 0x0F0F);
    count = (count & 0x00FF) + ( (count >> 8) & 0x00FF);
    return count;
}

#ifndef DACCESS_COMPILE

void stomp_write_barrier_resize(bool is_runtime_suspended, bool requires_upper_bounds_check)
{
    WriteBarrierParameters args = {};
    args.operation = WriteBarrierOp::StompResize;
    args.is_runtime_suspended = is_runtime_suspended;
    args.requires_upper_bounds_check = requires_upper_bounds_check;

    args.card_table = g_gc_card_table;
#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
    args.card_bundle_table = g_gc_card_bundle_table;
#endif

    args.lowest_address = g_gc_lowest_address;
    args.highest_address = g_gc_highest_address;

#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
    if (SoftwareWriteWatch::IsEnabledForGCHeap())
    {
        args.write_watch_table = g_gc_sw_ww_table;
    }
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP

    GCToEEInterface::StompWriteBarrier(&args);
}

void stomp_write_barrier_ephemeral(uint8_t* ephemeral_low, uint8_t* ephemeral_high)
{
    WriteBarrierParameters args = {};
    args.operation = WriteBarrierOp::StompEphemeral;
    args.is_runtime_suspended = true;
    args.ephemeral_low = ephemeral_low;
    args.ephemeral_high = ephemeral_high;
    GCToEEInterface::StompWriteBarrier(&args);
}

void stomp_write_barrier_initialize(uint8_t* ephemeral_low, uint8_t* ephemeral_high)
{
    WriteBarrierParameters args = {};
    args.operation = WriteBarrierOp::Initialize;
    args.is_runtime_suspended = true;
    args.requires_upper_bounds_check = false;
    args.card_table = g_gc_card_table;

#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
    args.card_bundle_table = g_gc_card_bundle_table;
#endif

    args.lowest_address = g_gc_lowest_address;
    args.highest_address = g_gc_highest_address;
    args.ephemeral_low = ephemeral_low;
    args.ephemeral_high = ephemeral_high;
    GCToEEInterface::StompWriteBarrier(&args);
}

#endif // DACCESS_COMPILE

//extract the low bits [0,low[ of a uint32_t
#define lowbits(wrd, bits) ((wrd) & ((1 << (bits))-1))
//extract the high bits [high, 32] of a uint32_t
#define highbits(wrd, bits) ((wrd) & ~((1 << (bits))-1))

// Things we need to manually initialize:
// gen0 min_size - based on cache
// gen0/1 max_size - based on segment size
static static_data static_data_table[latency_level_last - latency_level_first + 1][NUMBERGENERATIONS] =
{
    // latency_level_memory_footprint
    {
        // gen0
        {0, 0, 40000, 0.5f, 9.0f, 20.0f, 1000, 1},
        // gen1
        {160*1024, 0, 80000, 0.5f, 2.0f, 7.0f, 10000, 10},
        // gen2
        {256*1024, SSIZE_T_MAX, 200000, 0.25f, 1.2f, 1.8f, 100000, 100},
        // gen3
        {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0}
    },

    // latency_level_balanced
    {
        // gen0
        {0, 0, 40000, 0.5f,
#ifdef MULTIPLE_HEAPS
            20.0f, 40.0f,
#else
            9.0f, 20.0f,
#endif //MULTIPLE_HEAPS
            1000, 1},
        // gen1
        {256*1024, 0, 80000, 0.5f, 2.0f, 7.0f, 10000, 10},
        // gen2
        {256*1024, SSIZE_T_MAX, 200000, 0.25f, 1.2f, 1.8f, 100000, 100},
        // gen3
        {3*1024*1024, SSIZE_T_MAX, 0, 0.0f, 1.25f, 4.5f, 0, 0}
    },
};

class mark;
class generation;
class heap_segment;
class CObjectHeader;
class dynamic_data;
class l_heap;
class sorted_table;
class c_synchronize;

#ifdef FEATURE_PREMORTEM_FINALIZATION
#ifndef DACCESS_COMPILE
static
HRESULT AllocateCFinalize(CFinalize **pCFinalize);
#endif //!DACCESS_COMPILE
#endif // FEATURE_PREMORTEM_FINALIZATION

uint8_t* tree_search (uint8_t* tree, uint8_t* old_address);


#ifdef USE_INTROSORT
#define _sort introsort::sort
#else //USE_INTROSORT
#define _sort qsort1
void qsort1(uint8_t** low, uint8_t** high, unsigned int depth);
#endif //USE_INTROSORT

void* virtual_alloc (size_t size);
void* virtual_alloc (size_t size, bool use_large_pages_p);
void virtual_free (void* add, size_t size);

/* per heap static initialization */
#ifdef MARK_ARRAY
#ifndef MULTIPLE_HEAPS
uint32_t*   gc_heap::mark_array;
#endif //MULTIPLE_HEAPS
#endif //MARK_ARRAY

#ifdef MARK_LIST
uint8_t**   gc_heap::g_mark_list;

#ifdef PARALLEL_MARK_LIST_SORT
uint8_t**   gc_heap::g_mark_list_copy;
#endif //PARALLEL_MARK_LIST_SORT

size_t      gc_heap::mark_list_size;
#endif //MARK_LIST

#ifdef SEG_MAPPING_TABLE
seg_mapping* seg_mapping_table;
#endif //SEG_MAPPING_TABLE

#if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
sorted_table* gc_heap::seg_table;
#endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE

#ifdef MULTIPLE_HEAPS
GCEvent     gc_heap::ee_suspend_event;
size_t      gc_heap::min_gen0_balance_delta = 0;
size_t      gc_heap::min_balance_threshold = 0;
#endif //MULTIPLE_HEAPS

VOLATILE(BOOL) gc_heap::gc_started;

#ifdef MULTIPLE_HEAPS

GCEvent     gc_heap::gc_start_event;
bool        gc_heap::gc_thread_no_affinitize_p = false;
uintptr_t   process_mask = 0;

int         gc_heap::n_heaps;

gc_heap**   gc_heap::g_heaps;

size_t*     gc_heap::g_promoted;

#ifdef MH_SC_MARK
int*        gc_heap::g_mark_stack_busy;
#endif //MH_SC_MARK


#ifdef BACKGROUND_GC
size_t*     gc_heap::g_bpromoted;
#endif //BACKGROUND_GC

#else  //MULTIPLE_HEAPS

size_t      gc_heap::g_promoted;

#ifdef BACKGROUND_GC
size_t      gc_heap::g_bpromoted;
#endif //BACKGROUND_GC

#endif //MULTIPLE_HEAPS

size_t      gc_heap::reserved_memory = 0;
size_t      gc_heap::reserved_memory_limit = 0;
BOOL        gc_heap::g_low_memory_status;

#ifndef DACCESS_COMPILE
static gc_reason gc_trigger_reason = reason_empty;
#endif //DACCESS_COMPILE

gc_latency_level gc_heap::latency_level = latency_level_default;

gc_mechanisms  gc_heap::settings;

gc_history_global gc_heap::gc_data_global;

size_t      gc_heap::gc_last_ephemeral_decommit_time = 0;

size_t      gc_heap::gc_gen0_desired_high;

CLRCriticalSection gc_heap::check_commit_cs;

size_t      gc_heap::current_total_committed = 0;

size_t      gc_heap::current_total_committed_bookkeeping = 0;

#ifdef SHORT_PLUGS
double       gc_heap::short_plugs_pad_ratio = 0;
#endif //SHORT_PLUGS

#if defined(BIT64)
#define MAX_ALLOWED_MEM_LOAD 85

// consider putting this in dynamic data -
// we may want different values for workstation
// and server GC.
#define MIN_YOUNGEST_GEN_DESIRED (16*1024*1024)

size_t      gc_heap::youngest_gen_desired_th;
#endif //BIT64

uint32_t    gc_heap::last_gc_memory_load = 0;

size_t      gc_heap::last_gc_heap_size = 0;

size_t      gc_heap::last_gc_fragmentation = 0;

uint64_t    gc_heap::mem_one_percent = 0;

uint32_t    gc_heap::high_memory_load_th = 0;

uint32_t    gc_heap::m_high_memory_load_th;

uint32_t    gc_heap::v_high_memory_load_th;

uint64_t    gc_heap::total_physical_mem = 0;

uint64_t    gc_heap::entry_available_physical_mem = 0;

size_t      gc_heap::heap_hard_limit = 0;

bool        affinity_config_specified_p = false;
#ifdef BACKGROUND_GC
GCEvent     gc_heap::bgc_start_event;

gc_mechanisms gc_heap::saved_bgc_settings;

GCEvent     gc_heap::background_gc_done_event;

GCEvent     gc_heap::ee_proceed_event;

bool        gc_heap::gc_can_use_concurrent = false;

bool        gc_heap::temp_disable_concurrent_p = false;

uint32_t    gc_heap::cm_in_progress = FALSE;

BOOL        gc_heap::dont_restart_ee_p = FALSE;

BOOL        gc_heap::keep_bgc_threads_p = FALSE;

GCEvent     gc_heap::bgc_threads_sync_event;

BOOL        gc_heap::do_ephemeral_gc_p = FALSE;

BOOL        gc_heap::do_concurrent_p = FALSE;

size_t      gc_heap::ephemeral_fgc_counts[max_generation];

BOOL        gc_heap::alloc_wait_event_p = FALSE;

VOLATILE(c_gc_state) gc_heap::current_c_gc_state = c_gc_state_free;

#endif //BACKGROUND_GC

#ifndef MULTIPLE_HEAPS
#ifdef SPINLOCK_HISTORY
int         gc_heap::spinlock_info_index = 0;
spinlock_info gc_heap::last_spinlock_info[max_saved_spinlock_info + 8];
#endif //SPINLOCK_HISTORY

uint32_t    gc_heap::fgn_maxgen_percent = 0;
size_t      gc_heap::fgn_last_alloc = 0;

int         gc_heap::generation_skip_ratio = 100;

uint64_t    gc_heap::loh_alloc_since_cg = 0;

BOOL        gc_heap::elevation_requested = FALSE;

BOOL        gc_heap::last_gc_before_oom = FALSE;

BOOL        gc_heap::sufficient_gen0_space_p = FALSE;

#ifdef BACKGROUND_GC
uint8_t*    gc_heap::background_saved_lowest_address = 0;
uint8_t*    gc_heap::background_saved_highest_address = 0;
uint8_t*    gc_heap::next_sweep_obj = 0;
uint8_t*    gc_heap::current_sweep_pos = 0;
exclusive_sync* gc_heap::bgc_alloc_lock;
#endif //BACKGROUND_GC

oom_history gc_heap::oom_info;

int         gc_heap::oomhist_index_per_heap = 0;

oom_history gc_heap::oomhist_per_heap[max_oom_history_count];

fgm_history gc_heap::fgm_result;

size_t      gc_heap::allocated_since_last_gc = 0;

BOOL        gc_heap::ro_segments_in_range;

size_t      gc_heap::gen0_big_free_spaces = 0;

uint8_t*    gc_heap::ephemeral_low;

uint8_t*    gc_heap::ephemeral_high;

uint8_t*    gc_heap::lowest_address;

uint8_t*    gc_heap::highest_address;

BOOL        gc_heap::ephemeral_promotion;

uint8_t*    gc_heap::saved_ephemeral_plan_start[NUMBERGENERATIONS-1];
size_t      gc_heap::saved_ephemeral_plan_start_size[NUMBERGENERATIONS-1];

short*      gc_heap::brick_table;

uint32_t*   gc_heap::card_table;

#ifdef CARD_BUNDLE
uint32_t*   gc_heap::card_bundle_table;
#endif //CARD_BUNDLE

uint8_t*    gc_heap::gc_low;

uint8_t*    gc_heap::gc_high;

uint8_t*    gc_heap::demotion_low;

uint8_t*    gc_heap::demotion_high;

BOOL        gc_heap::demote_gen1_p = TRUE;

uint8_t*    gc_heap::last_gen1_pin_end;

gen_to_condemn_tuning gc_heap::gen_to_condemn_reasons;

size_t      gc_heap::etw_allocation_running_amount[2];

uint64_t    gc_heap::total_alloc_bytes_soh = 0;

uint64_t    gc_heap::total_alloc_bytes_loh = 0;

int         gc_heap::gc_policy = 0;

size_t      gc_heap::allocation_running_time;

size_t      gc_heap::allocation_running_amount;

heap_segment* gc_heap::ephemeral_heap_segment = 0;

BOOL        gc_heap::blocking_collection = FALSE;

heap_segment* gc_heap::freeable_large_heap_segment = 0;

size_t      gc_heap::time_bgc_last = 0;

size_t      gc_heap::mark_stack_tos = 0;

size_t      gc_heap::mark_stack_bos = 0;

size_t      gc_heap::mark_stack_array_length = 0;

mark*       gc_heap::mark_stack_array = 0;

#if defined (_DEBUG) && defined (VERIFY_HEAP)
BOOL        gc_heap::verify_pinned_queue_p = FALSE;
#endif // defined (_DEBUG) && defined (VERIFY_HEAP)

uint8_t*    gc_heap::oldest_pinned_plug = 0;

#if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
size_t      gc_heap::num_pinned_objects = 0;
#endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE

#ifdef FEATURE_LOH_COMPACTION
size_t      gc_heap::loh_pinned_queue_tos = 0;

size_t      gc_heap::loh_pinned_queue_bos = 0;

size_t      gc_heap::loh_pinned_queue_length = 0;

mark*       gc_heap::loh_pinned_queue = 0;

BOOL        gc_heap::loh_compacted_p = FALSE;
#endif //FEATURE_LOH_COMPACTION

#ifdef BACKGROUND_GC

EEThreadId  gc_heap::bgc_thread_id;

uint8_t*    gc_heap::background_written_addresses [array_size+2];

heap_segment* gc_heap::freeable_small_heap_segment = 0;

size_t      gc_heap::bgc_overflow_count = 0;

size_t      gc_heap::bgc_begin_loh_size = 0;
size_t      gc_heap::end_loh_size = 0;

#ifdef BGC_SERVO_TUNING
uint64_t    gc_heap::loh_a_no_bgc = 0;

uint64_t    gc_heap::loh_a_bgc_marking = 0;

uint64_t    gc_heap::loh_a_bgc_planning = 0;

size_t      gc_heap::bgc_maxgen_end_fl_size = 0;
#endif //BGC_SERVO_TUNING

uint32_t    gc_heap::bgc_alloc_spin_loh = 0;

size_t      gc_heap::bgc_loh_size_increased = 0;

size_t      gc_heap::bgc_loh_allocated_in_free = 0;

size_t      gc_heap::background_soh_alloc_count = 0;

size_t      gc_heap::background_loh_alloc_count = 0;

uint8_t**   gc_heap::background_mark_stack_tos = 0;

uint8_t**   gc_heap::background_mark_stack_array = 0;

size_t      gc_heap::background_mark_stack_array_length = 0;

uint8_t*    gc_heap::background_min_overflow_address =0;

uint8_t*    gc_heap::background_max_overflow_address =0;

BOOL        gc_heap::processed_soh_overflow_p = FALSE;

uint8_t*    gc_heap::background_min_soh_overflow_address =0;

uint8_t*    gc_heap::background_max_soh_overflow_address =0;

heap_segment* gc_heap::saved_sweep_ephemeral_seg = 0;

uint8_t*    gc_heap::saved_sweep_ephemeral_start = 0;

heap_segment* gc_heap::saved_overflow_ephemeral_seg = 0;

Thread*     gc_heap::bgc_thread = 0;

BOOL        gc_heap::expanded_in_fgc = FALSE;

uint8_t**   gc_heap::c_mark_list = 0;

size_t      gc_heap::c_mark_list_length = 0;

size_t      gc_heap::c_mark_list_index = 0;

gc_history_per_heap gc_heap::bgc_data_per_heap;

BOOL    gc_heap::bgc_thread_running;

CLRCriticalSection gc_heap::bgc_threads_timeout_cs;

GCEvent gc_heap::gc_lh_block_event;

#endif //BACKGROUND_GC

#ifdef MARK_LIST
uint8_t**   gc_heap::mark_list;
uint8_t**   gc_heap::mark_list_index;
uint8_t**   gc_heap::mark_list_end;
#endif //MARK_LIST

#ifdef SNOOP_STATS
snoop_stats_data gc_heap::snoop_stat;
#endif //SNOOP_STATS

uint8_t*    gc_heap::min_overflow_address = MAX_PTR;

uint8_t*    gc_heap::max_overflow_address = 0;

uint8_t*    gc_heap::shigh = 0;

uint8_t*    gc_heap::slow = MAX_PTR;

size_t      gc_heap::ordered_free_space_indices[MAX_NUM_BUCKETS];

size_t      gc_heap::saved_ordered_free_space_indices[MAX_NUM_BUCKETS];

size_t      gc_heap::ordered_plug_indices[MAX_NUM_BUCKETS];

size_t      gc_heap::saved_ordered_plug_indices[MAX_NUM_BUCKETS];

BOOL        gc_heap::ordered_plug_indices_init = FALSE;

BOOL        gc_heap::use_bestfit = FALSE;

uint8_t*    gc_heap::bestfit_first_pin = 0;

BOOL        gc_heap::commit_end_of_seg = FALSE;

size_t      gc_heap::max_free_space_items = 0;

size_t      gc_heap::free_space_buckets = 0;

size_t      gc_heap::free_space_items = 0;

int         gc_heap::trimmed_free_space_index = 0;

size_t      gc_heap::total_ephemeral_plugs = 0;

seg_free_spaces* gc_heap::bestfit_seg = 0;

size_t      gc_heap::total_ephemeral_size = 0;

#ifdef HEAP_ANALYZE

size_t      gc_heap::internal_root_array_length = initial_internal_roots;

uint8_t**   gc_heap::internal_root_array = 0;

size_t      gc_heap::internal_root_array_index = 0;

BOOL        gc_heap::heap_analyze_success = TRUE;

uint8_t*    gc_heap::current_obj = 0;
size_t      gc_heap::current_obj_size = 0;

#endif //HEAP_ANALYZE

#ifdef GC_CONFIG_DRIVEN
size_t gc_heap::interesting_data_per_gc[max_idp_count];
//size_t gc_heap::interesting_data_per_heap[max_idp_count];
//size_t gc_heap::interesting_mechanisms_per_heap[max_im_count];
#endif //GC_CONFIG_DRIVEN
#endif //MULTIPLE_HEAPS

no_gc_region_info gc_heap::current_no_gc_region_info;
BOOL gc_heap::proceed_with_gc_p = FALSE;
GCSpinLock gc_heap::gc_lock;

#ifdef BGC_SERVO_TUNING
uint64_t gc_heap::total_loh_a_last_bgc = 0;
#endif //BGC_SERVO_TUNING

size_t gc_heap::eph_gen_starts_size = 0;
heap_segment* gc_heap::segment_standby_list;
bool          gc_heap::use_large_pages_p = 0;
size_t        gc_heap::last_gc_index = 0;
#ifdef HEAP_BALANCE_INSTRUMENTATION
size_t        gc_heap::last_gc_end_time_ms = 0;
#endif //HEAP_BALANCE_INSTRUMENTATION
#ifdef SEG_MAPPING_TABLE
size_t        gc_heap::min_segment_size = 0;
size_t        gc_heap::min_segment_size_shr = 0;
#endif //SEG_MAPPING_TABLE
size_t        gc_heap::soh_segment_size = 0;
size_t        gc_heap::min_loh_segment_size = 0;
size_t        gc_heap::segment_info_size = 0;

#ifdef GC_CONFIG_DRIVEN
size_t gc_heap::time_init = 0;
size_t gc_heap::time_since_init = 0;
size_t gc_heap::compact_or_sweep_gcs[2];
#endif //GC_CONFIG_DRIVEN

#ifdef FEATURE_LOH_COMPACTION
BOOL                   gc_heap::loh_compaction_always_p = FALSE;
gc_loh_compaction_mode gc_heap::loh_compaction_mode = loh_compaction_default;
int                    gc_heap::loh_pinned_queue_decay = LOH_PIN_DECAY;

#endif //FEATURE_LOH_COMPACTION

GCEvent gc_heap::full_gc_approach_event;

GCEvent gc_heap::full_gc_end_event;

uint32_t gc_heap::fgn_loh_percent = 0;

#ifdef BACKGROUND_GC
BOOL gc_heap::fgn_last_gc_was_concurrent = FALSE;
#endif //BACKGROUND_GC

VOLATILE(bool) gc_heap::full_gc_approach_event_set;

size_t gc_heap::full_gc_counts[gc_type_max];

bool gc_heap::maxgen_size_inc_p = false;

BOOL gc_heap::should_expand_in_full_gc = FALSE;

// Provisional mode related stuff.
bool gc_heap::provisional_mode_triggered = false;
bool gc_heap::pm_trigger_full_gc = false;
size_t gc_heap::provisional_triggered_gc_count = 0;
size_t gc_heap::provisional_off_gc_count = 0;
size_t gc_heap::num_provisional_triggered = 0;
bool   gc_heap::pm_stress_on = false;

#ifdef HEAP_ANALYZE
BOOL        gc_heap::heap_analyze_enabled = FALSE;
#endif //HEAP_ANALYZE

#ifndef MULTIPLE_HEAPS

alloc_list gc_heap::loh_alloc_list [NUM_LOH_ALIST-1];
alloc_list gc_heap::gen2_alloc_list[NUM_GEN2_ALIST-1];

dynamic_data gc_heap::dynamic_data_table [NUMBERGENERATIONS+1];
gc_history_per_heap gc_heap::gc_data_per_heap;
size_t gc_heap::maxgen_pinned_compact_before_advance = 0;

uint8_t* gc_heap::alloc_allocated = 0;

size_t gc_heap::allocation_quantum = CLR_SIZE;

GCSpinLock gc_heap::more_space_lock_soh;
GCSpinLock gc_heap::more_space_lock_loh;
VOLATILE(int32_t) gc_heap::loh_alloc_thread_count = 0;

#ifdef SYNCHRONIZATION_STATS
unsigned int gc_heap::good_suspension = 0;
unsigned int gc_heap::bad_suspension = 0;
uint64_t     gc_heap::total_msl_acquire = 0;
unsigned int gc_heap::num_msl_acquired = 0;
unsigned int gc_heap::num_high_msl_acquire = 0;
unsigned int gc_heap::num_low_msl_acquire = 0;
#endif //SYNCHRONIZATION_STATS

size_t   gc_heap::alloc_contexts_used = 0;
size_t   gc_heap::soh_allocation_no_gc = 0;
size_t   gc_heap::loh_allocation_no_gc = 0;
bool     gc_heap::no_gc_oom_p = false;
heap_segment* gc_heap::saved_loh_segment_no_gc = 0;

#endif //MULTIPLE_HEAPS

#ifndef MULTIPLE_HEAPS

BOOL        gc_heap::gen0_bricks_cleared = FALSE;

int         gc_heap::gen0_must_clear_bricks = 0;

#ifdef FEATURE_PREMORTEM_FINALIZATION
CFinalize*  gc_heap::finalize_queue = 0;
#endif // FEATURE_PREMORTEM_FINALIZATION

#ifdef FEATURE_CARD_MARKING_STEALING
VOLATILE(uint32_t) gc_heap::card_mark_chunk_index_soh;
VOLATILE(bool) gc_heap::card_mark_done_soh;
VOLATILE(uint32_t) gc_heap::card_mark_chunk_index_loh;
VOLATILE(bool) gc_heap::card_mark_done_loh;
#endif // FEATURE_CARD_MARKING_STEALING

generation gc_heap::generation_table [NUMBERGENERATIONS + 1];

size_t     gc_heap::interesting_data_per_heap[max_idp_count];

size_t     gc_heap::compact_reasons_per_heap[max_compact_reasons_count];

size_t     gc_heap::expand_mechanisms_per_heap[max_expand_mechanisms_count];

size_t     gc_heap::interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count];

#endif // MULTIPLE_HEAPS

/* end of per heap static initialization */

/* end of static initialization */

#ifndef DACCESS_COMPILE

void gen_to_condemn_tuning::print (int heap_num)
{
#ifdef DT_LOG
    dprintf (DT_LOG_0, ("condemned reasons (%d %d)", condemn_reasons_gen, condemn_reasons_condition));
    dprintf (DT_LOG_0, ("%s", record_condemn_reasons_gen_header));
    gc_condemn_reason_gen r_gen;
    for (int i = 0; i < gcrg_max; i++)
    {
        r_gen = (gc_condemn_reason_gen)(i);
        str_reasons_gen[i * 2] = get_gen_char (get_gen (r_gen));
    }
    dprintf (DT_LOG_0, ("[%2d]%s", heap_num, str_reasons_gen));

    dprintf (DT_LOG_0, ("%s", record_condemn_reasons_condition_header));
    gc_condemn_reason_condition r_condition;
    for (int i = 0; i < gcrc_max; i++)
    {
        r_condition = (gc_condemn_reason_condition)(i);
        str_reasons_condition[i * 2] = get_condition_char (get_condition (r_condition));
    }

    dprintf (DT_LOG_0, ("[%2d]%s", heap_num, str_reasons_condition));
#else
    UNREFERENCED_PARAMETER(heap_num);
#endif //DT_LOG
}

void gc_generation_data::print (int heap_num, int gen_num)
{
#if defined(SIMPLE_DPRINTF) && defined(DT_LOG)
    dprintf (DT_LOG_0, ("[%2d]gen%d beg %Id fl %Id fo %Id end %Id fl %Id fo %Id in %Id p %Id np %Id alloc %Id",
                heap_num, gen_num,
                size_before,
                free_list_space_before, free_obj_space_before,
                size_after,
                free_list_space_after, free_obj_space_after,
                in, pinned_surv, npinned_surv,
                new_allocation));
#else
    UNREFERENCED_PARAMETER(heap_num);
    UNREFERENCED_PARAMETER(gen_num);
#endif //SIMPLE_DPRINTF && DT_LOG
}

void gc_history_per_heap::set_mechanism (gc_mechanism_per_heap mechanism_per_heap, uint32_t value)
{
    uint32_t* mechanism = &mechanisms[mechanism_per_heap];
    *mechanism = 0;
    *mechanism |= mechanism_mask;
    *mechanism |= (1 << value);

#ifdef DT_LOG
    gc_mechanism_descr* descr = &gc_mechanisms_descr[mechanism_per_heap];
    dprintf (DT_LOG_0, ("setting %s: %s",
            descr->name,
            (descr->descr)[value]));
#endif //DT_LOG
}

void gc_history_per_heap::print()
{
#if defined(SIMPLE_DPRINTF) && defined(DT_LOG)
    for (int i = 0; i < (sizeof (gen_data)/sizeof (gc_generation_data)); i++)
    {
        gen_data[i].print (heap_index, i);
    }

    dprintf (DT_LOG_0, ("fla %Id flr %Id esa %Id ca %Id pa %Id paa %Id, rfle %d, ec %Id",
                    maxgen_size_info.free_list_allocated,
                    maxgen_size_info.free_list_rejected,
                    maxgen_size_info.end_seg_allocated,
                    maxgen_size_info.condemned_allocated,
                    maxgen_size_info.pinned_allocated,
                    maxgen_size_info.pinned_allocated_advance,
                    maxgen_size_info.running_free_list_efficiency,
                    extra_gen0_committed));

    int mechanism = 0;
    gc_mechanism_descr* descr = 0;

    for (int i = 0; i < max_mechanism_per_heap; i++)
    {
        mechanism = get_mechanism ((gc_mechanism_per_heap)i);

        if (mechanism >= 0)
        {
            descr = &gc_mechanisms_descr[(gc_mechanism_per_heap)i];
            dprintf (DT_LOG_0, ("[%2d]%s%s",
                        heap_index,
                        descr->name,
                        (descr->descr)[mechanism]));
        }
    }
#endif //SIMPLE_DPRINTF && DT_LOG
}

void gc_history_global::print()
{
#ifdef DT_LOG
    char str_settings[64];
    memset (str_settings, '|', sizeof (char) * 64);
    str_settings[max_global_mechanisms_count*2] = 0;

    for (int i = 0; i < max_global_mechanisms_count; i++)
    {
        str_settings[i * 2] = (get_mechanism_p ((gc_global_mechanism_p)i) ? 'Y' : 'N');
    }

    dprintf (DT_LOG_0, ("[hp]|c|p|o|d|b|e|"));

    dprintf (DT_LOG_0, ("%4d|%s", num_heaps, str_settings));
    dprintf (DT_LOG_0, ("Condemned gen%d(reason: %s; mode: %s), youngest budget %Id(%d), memload %d",
                        condemned_generation,
                        str_gc_reasons[reason],
                        str_gc_pause_modes[pause_mode],
                        final_youngest_desired,
                        gen0_reduction_count,
                        mem_pressure));
#endif //DT_LOG
}

void gc_heap::fire_per_heap_hist_event (gc_history_per_heap* current_gc_data_per_heap, int heap_num)
{
    maxgen_size_increase* maxgen_size_info = &(current_gc_data_per_heap->maxgen_size_info);
    FIRE_EVENT(GCPerHeapHistory_V3,
               (void *)(maxgen_size_info->free_list_allocated),
               (void *)(maxgen_size_info->free_list_rejected),
               (void *)(maxgen_size_info->end_seg_allocated),
               (void *)(maxgen_size_info->condemned_allocated),
               (void *)(maxgen_size_info->pinned_allocated),
               (void *)(maxgen_size_info->pinned_allocated_advance),
               maxgen_size_info->running_free_list_efficiency,
               current_gc_data_per_heap->gen_to_condemn_reasons.get_reasons0(),
               current_gc_data_per_heap->gen_to_condemn_reasons.get_reasons1(),
               current_gc_data_per_heap->mechanisms[gc_heap_compact],
               current_gc_data_per_heap->mechanisms[gc_heap_expand],
               current_gc_data_per_heap->heap_index,
               (void *)(current_gc_data_per_heap->extra_gen0_committed),
               (max_generation + 2),
               (uint32_t)(sizeof (gc_generation_data)),
               (void *)&(current_gc_data_per_heap->gen_data[0]));

    current_gc_data_per_heap->print();
    current_gc_data_per_heap->gen_to_condemn_reasons.print (heap_num);
}

void gc_heap::fire_pevents()
{
    settings.record (&gc_data_global);
    gc_data_global.print();

    FIRE_EVENT(GCGlobalHeapHistory_V2,
               gc_data_global.final_youngest_desired,
               gc_data_global.num_heaps,
               gc_data_global.condemned_generation,
               gc_data_global.gen0_reduction_count,
               gc_data_global.reason,
               gc_data_global.global_mechanisms_p,
               gc_data_global.pause_mode,
               gc_data_global.mem_pressure);

#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < gc_heap::n_heaps; i++)
    {
        gc_heap* hp = gc_heap::g_heaps[i];
        gc_history_per_heap* current_gc_data_per_heap = hp->get_gc_data_per_heap();
        fire_per_heap_hist_event (current_gc_data_per_heap, hp->heap_number);
    }
#else
    gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
    fire_per_heap_hist_event (current_gc_data_per_heap, heap_number);
#endif
}

inline BOOL
gc_heap::dt_low_ephemeral_space_p (gc_tuning_point tp)
{
    BOOL ret = FALSE;

    switch (tp)
    {
        case tuning_deciding_condemned_gen:
        case tuning_deciding_compaction:
        case tuning_deciding_expansion:
        case tuning_deciding_full_gc:
        {
            ret = (!ephemeral_gen_fit_p (tp));
            break;
        }
        case tuning_deciding_promote_ephemeral:
        {
            size_t new_gen0size = approximate_new_allocation();
            ptrdiff_t plan_ephemeral_size = total_ephemeral_size;

            dprintf (GTC_LOG, ("h%d: plan eph size is %Id, new gen0 is %Id",
                heap_number, plan_ephemeral_size, new_gen0size));
            // If we were in no_gc_region we could have allocated a larger than normal segment,
            // and the next seg we allocate will be a normal sized seg so if we can't fit the new
            // ephemeral generations there, do an ephemeral promotion.
            ret = ((soh_segment_size - segment_info_size) < (plan_ephemeral_size + new_gen0size));
            break;
        }
        default:
            break;
    }

    return ret;
}

BOOL
gc_heap::dt_high_frag_p (gc_tuning_point tp,
                         int gen_number,
                         BOOL elevate_p)
{
    BOOL ret = FALSE;

    switch (tp)
    {
        case tuning_deciding_condemned_gen:
        {
            dynamic_data* dd = dynamic_data_of (gen_number);
            float fragmentation_burden = 0;

            if (elevate_p)
            {
                ret = (dd_fragmentation (dynamic_data_of (max_generation)) >= dd_max_size(dd));
                dprintf (GTC_LOG, ("h%d: frag is %Id, max size is %Id",
                    heap_number, dd_fragmentation (dd), dd_max_size(dd)));
            }
            else
            {
#ifndef MULTIPLE_HEAPS
                if (gen_number == max_generation)
                {
                    float frag_ratio = (float)(dd_fragmentation (dynamic_data_of (max_generation))) / (float)generation_size (max_generation);
                    if (frag_ratio > 0.65)
                    {
                        dprintf (GTC_LOG, ("g2 FR: %d%%", (int)(frag_ratio*100)));
                        return TRUE;
                    }
                }
#endif //!MULTIPLE_HEAPS
                size_t fr = generation_unusable_fragmentation (generation_of (gen_number));
                ret = (fr > dd_fragmentation_limit(dd));
                if (ret)
                {
                    fragmentation_burden = (float)fr / generation_size (gen_number);
                    ret = (fragmentation_burden > dd_v_fragmentation_burden_limit (dd));
                }
                dprintf (GTC_LOG, ("h%d: gen%d, frag is %Id, alloc effi: %d%%, unusable frag is %Id, ratio is %d",
                    heap_number, gen_number, dd_fragmentation (dd),
                    (int)(100*generation_allocator_efficiency (generation_of (gen_number))),
                    fr, (int)(fragmentation_burden*100)));
            }
            break;
        }
        default:
            break;
    }

    return ret;
}

inline BOOL
gc_heap::dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number)
{
    BOOL ret = FALSE;

    switch (tp)
    {
        case tuning_deciding_condemned_gen:
        {
            if (gen_number == max_generation)
            {
                size_t est_maxgen_free = estimated_reclaim (gen_number);

                uint32_t num_heaps = 1;
#ifdef MULTIPLE_HEAPS
                num_heaps = gc_heap::n_heaps;
#endif //MULTIPLE_HEAPS

                size_t min_frag_th = min_reclaim_fragmentation_threshold (num_heaps);
                dprintf (GTC_LOG, ("h%d, min frag is %Id", heap_number, min_frag_th));
                ret = (est_maxgen_free >= min_frag_th);
            }
            else
            {
                assert (0);
            }
            break;
        }

        default:
            break;
    }

    return ret;
}

// DTREVIEW: Right now we only estimate gen2 fragmentation.
// on 64-bit though we should consider gen1 or even gen0 fragmentation as
// well
inline BOOL
gc_heap::dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, uint64_t available_mem)
{
    BOOL ret = FALSE;

    switch (tp)
    {
        case tuning_deciding_condemned_gen:
        {
            if (gen_number == max_generation)
            {
                dynamic_data* dd = dynamic_data_of (gen_number);
                float est_frag_ratio = 0;
                if (dd_current_size (dd) == 0)
                {
                    est_frag_ratio = 1;
                }
                else if ((dd_fragmentation (dd) == 0) || (dd_fragmentation (dd) + dd_current_size (dd) == 0))
                {
                    est_frag_ratio = 0;
                }
                else
                {
                    est_frag_ratio = (float)dd_fragmentation (dd) / (float)(dd_fragmentation (dd) + dd_current_size (dd));
                }

                size_t est_frag = (dd_fragmentation (dd) + (size_t)((dd_desired_allocation (dd) - dd_new_allocation (dd)) * est_frag_ratio));
                dprintf (GTC_LOG, ("h%d: gen%d: current_size is %Id, frag is %Id, est_frag_ratio is %d%%, estimated frag is %Id",
                    heap_number,
                    gen_number,
                    dd_current_size (dd),
                    dd_fragmentation (dd),
                    (int)(est_frag_ratio*100),
                    est_frag));

                uint32_t num_heaps = 1;

#ifdef MULTIPLE_HEAPS
                num_heaps = gc_heap::n_heaps;
#endif //MULTIPLE_HEAPS
                uint64_t min_frag_th = min_high_fragmentation_threshold(available_mem, num_heaps);
                //dprintf (GTC_LOG, ("h%d, min frag is %I64d", heap_number, min_frag_th));
                ret = (est_frag >= min_frag_th);
            }
            else
            {
                assert (0);
            }
            break;
        }

        default:
            break;
    }

    return ret;
}

inline BOOL
gc_heap::dt_low_card_table_efficiency_p (gc_tuning_point tp)
{
    BOOL ret = FALSE;

    switch (tp)
    {
    case tuning_deciding_condemned_gen:
    {
        /* promote into max-generation if the card table has too many
        * generation faults besides the n -> 0
        */
        ret = (generation_skip_ratio < 30);
        break;
    }

    default:
        break;
    }

    return ret;
}

inline BOOL
in_range_for_segment(uint8_t* add, heap_segment* seg)
{
    return ((add >= heap_segment_mem (seg)) && (add < heap_segment_reserved (seg)));
}

#if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
// The array we allocate is organized as follows:
// 0th element is the address of the last array we allocated.
// starting from the 1st element are the segment addresses, that's
// what buckets() returns.
struct bk
{
    uint8_t* add;
    size_t val;
};

class sorted_table
{
private:
    ptrdiff_t size;
    ptrdiff_t count;
    bk* slots;
    bk* buckets() { return (slots + 1); }
    uint8_t*& last_slot (bk* arr) { return arr[0].add; }
    bk* old_slots;
public:
    static  sorted_table* make_sorted_table ();
    BOOL    insert (uint8_t* add, size_t val);;
    size_t  lookup (uint8_t*& add);
    void    remove (uint8_t* add);
    void    clear ();
    void    delete_sorted_table();
    void    delete_old_slots();
    void    enqueue_old_slot(bk* sl);
    BOOL    ensure_space_for_insert();
};

sorted_table*
sorted_table::make_sorted_table ()
{
    size_t size = 400;

    // allocate one more bk to store the older slot address.
    sorted_table* res = (sorted_table*)new char [sizeof (sorted_table) + (size + 1) * sizeof (bk)];
    if (!res)
        return 0;
    res->size = size;
    res->slots = (bk*)(res + 1);
    res->old_slots = 0;
    res->clear();
    return res;
}

void
sorted_table::delete_sorted_table()
{
    if (slots != (bk*)(this+1))
    {
        delete slots;
    }
    delete_old_slots();
    delete this;
}
void
sorted_table::delete_old_slots()
{
    uint8_t* sl = (uint8_t*)old_slots;
    while (sl)
    {
        uint8_t* dsl = sl;
        sl = last_slot ((bk*)sl);
        delete dsl;
    }
    old_slots = 0;
}
void
sorted_table::enqueue_old_slot(bk* sl)
{
    last_slot (sl) = (uint8_t*)old_slots;
    old_slots = sl;
}

inline
size_t
sorted_table::lookup (uint8_t*& add)
{
    ptrdiff_t high = (count-1);
    ptrdiff_t low = 0;
    ptrdiff_t ti;
    ptrdiff_t mid;
    bk* buck = buckets();
    while (low <= high)
    {
        mid = ((low + high)/2);
        ti = mid;
        if (buck[ti].add > add)
        {
            if ((ti > 0) && (buck[ti-1].add <= add))
            {
                add = buck[ti-1].add;
                return buck[ti - 1].val;
            }
            high = mid - 1;
        }
        else
        {
            if (buck[ti+1].add > add)
            {
                add = buck[ti].add;
                return buck[ti].val;
            }
            low = mid + 1;
        }
    }
    add = 0;
    return 0;
}

BOOL
sorted_table::ensure_space_for_insert()
{
    if (count == size)
    {
        size = (size * 3)/2;
        assert((size * sizeof (bk)) > 0);
        bk* res = (bk*)new (nothrow) char [(size + 1) * sizeof (bk)];
        assert (res);
        if (!res)
            return FALSE;

        last_slot (res) = 0;
        memcpy (((bk*)res + 1), buckets(), count * sizeof (bk));
        bk* last_old_slots = slots;
        slots = res;
        if (last_old_slots != (bk*)(this + 1))
            enqueue_old_slot (last_old_slots);
    }
    return TRUE;
}

BOOL
sorted_table::insert (uint8_t* add, size_t val)
{
    //grow if no more room
    assert (count < size);

    //insert sorted
    ptrdiff_t high = (count-1);
    ptrdiff_t low = 0;
    ptrdiff_t ti;
    ptrdiff_t mid;
    bk* buck = buckets();
    while (low <= high)
    {
        mid = ((low + high)/2);
        ti = mid;
        if (buck[ti].add > add)
        {
            if ((ti == 0) || (buck[ti-1].add <= add))
            {
                // found insertion point
                for (ptrdiff_t k = count; k > ti;k--)
                {
                    buck [k] = buck [k-1];
                }
                buck[ti].add = add;
                buck[ti].val = val;
                count++;
                return TRUE;
            }
            high = mid - 1;
        }
        else
        {
            if (buck[ti+1].add > add)
            {
                //found the insertion point
                for (ptrdiff_t k = count; k > ti+1;k--)
                {
                    buck [k] = buck [k-1];
                }
                buck[ti+1].add = add;
                buck[ti+1].val = val;
                count++;
                return TRUE;
            }
            low = mid + 1;
        }
    }
    assert (0);
    return TRUE;
}

void
sorted_table::remove (uint8_t* add)
{
    ptrdiff_t high = (count-1);
    ptrdiff_t low = 0;
    ptrdiff_t ti;
    ptrdiff_t mid;
    bk* buck = buckets();
    while (low <= high)
    {
        mid = ((low + high)/2);
        ti = mid;
        if (buck[ti].add > add)
        {
            if (buck[ti-1].add <= add)
            {
                // found the guy to remove
                for (ptrdiff_t k = ti; k < count; k++)
                    buck[k-1] = buck[k];
                count--;
                return;
            }
            high = mid - 1;
        }
        else
        {
            if (buck[ti+1].add > add)
            {
                // found the guy to remove
                for (ptrdiff_t k = ti+1; k < count; k++)
                    buck[k-1] = buck[k];
                count--;
                return;
            }
            low = mid + 1;
        }
    }
    assert (0);
}

void
sorted_table::clear()
{
    count = 1;
    buckets()[0].add = MAX_PTR;
}
#endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE

#ifdef SEG_MAPPING_TABLE
#ifdef GROWABLE_SEG_MAPPING_TABLE
inline
uint8_t* align_on_segment (uint8_t* add)
{
    return (uint8_t*)((size_t)(add + (gc_heap::min_segment_size - 1)) & ~(gc_heap::min_segment_size - 1));
}

inline
uint8_t* align_lower_segment (uint8_t* add)
{
    return (uint8_t*)((size_t)(add) & ~(gc_heap::min_segment_size - 1));
}

size_t size_seg_mapping_table_of (uint8_t* from, uint8_t* end)
{
    from = align_lower_segment (from);
    end = align_on_segment (end);
    dprintf (1, ("from: %Ix, end: %Ix, size: %Ix", from, end, sizeof (seg_mapping)*(((size_t)(end - from) >> gc_heap::min_segment_size_shr))));
    return sizeof (seg_mapping)*((size_t)(end - from) >> gc_heap::min_segment_size_shr);
}

// for seg_mapping_table we want it to start from a pointer sized address.
inline
size_t align_for_seg_mapping_table (size_t size)
{
    return ((size + (sizeof (uint8_t*) - 1)) &~ (sizeof (uint8_t*) - 1));
}

inline
size_t seg_mapping_word_of (uint8_t* add)
{
    return (size_t)add >> gc_heap::min_segment_size_shr;
}
#else //GROWABLE_SEG_MAPPING_TABLE
BOOL seg_mapping_table_init()
{
#ifdef BIT64
    uint64_t total_address_space = (uint64_t)8*1024*1024*1024*1024;
#else
    uint64_t total_address_space = (uint64_t)4*1024*1024*1024;
#endif // BIT64

    size_t num_entries = (size_t)(total_address_space >> gc_heap::min_segment_size_shr);
    seg_mapping_table = new seg_mapping[num_entries];

    if (seg_mapping_table)
    {
        memset (seg_mapping_table, 0, num_entries * sizeof (seg_mapping));
        dprintf (1, ("created %d entries for heap mapping (%Id bytes)",
                     num_entries, (num_entries * sizeof (seg_mapping))));
        return TRUE;
    }
    else
    {
        dprintf (1, ("failed to create %d entries for heap mapping (%Id bytes)",
                     num_entries, (num_entries * sizeof (seg_mapping))));
        return FALSE;
    }
}
#endif //GROWABLE_SEG_MAPPING_TABLE

#ifdef FEATURE_BASICFREEZE
inline
size_t ro_seg_begin_index (heap_segment* seg)
{
    size_t begin_index = (size_t)seg >> gc_heap::min_segment_size_shr;
    begin_index = max (begin_index, (size_t)g_gc_lowest_address >> gc_heap::min_segment_size_shr);
    return begin_index;
}

inline
size_t ro_seg_end_index (heap_segment* seg)
{
    size_t end_index = (size_t)(heap_segment_reserved (seg) - 1) >> gc_heap::min_segment_size_shr;
    end_index = min (end_index, (size_t)g_gc_highest_address >> gc_heap::min_segment_size_shr);
    return end_index;
}

void seg_mapping_table_add_ro_segment (heap_segment* seg)
{
#ifdef GROWABLE_SEG_MAPPING_TABLE
    if ((heap_segment_reserved (seg) <= g_gc_lowest_address) || (heap_segment_mem (seg) >= g_gc_highest_address))
        return;
#endif //GROWABLE_SEG_MAPPING_TABLE

    for (size_t entry_index = ro_seg_begin_index (seg); entry_index <= ro_seg_end_index (seg); entry_index++)
        seg_mapping_table[entry_index].seg1 = (heap_segment*)((size_t)seg_mapping_table[entry_index].seg1 | ro_in_entry);
}

void seg_mapping_table_remove_ro_segment (heap_segment* seg)
{
    UNREFERENCED_PARAMETER(seg);
#if 0
// POSSIBLE PERF TODO: right now we are not doing anything because we can't simply remove the flag. If it proves
// to be a perf problem, we can search in the current ro segs and see if any lands in this range and only
// remove the flag if none lands in this range.
#endif //0
}

heap_segment* ro_segment_lookup (uint8_t* o)
{
    uint8_t* ro_seg_start = o;
    heap_segment* seg = (heap_segment*)gc_heap::seg_table->lookup (ro_seg_start);

    if (ro_seg_start && in_range_for_segment (o, seg))
        return seg;
    else
        return 0;
}

#endif //FEATURE_BASICFREEZE

void gc_heap::seg_mapping_table_add_segment (heap_segment* seg, gc_heap* hp)
{
    size_t seg_end = (size_t)(heap_segment_reserved (seg) - 1);
    size_t begin_index = (size_t)seg >> gc_heap::min_segment_size_shr;
    seg_mapping* begin_entry = &seg_mapping_table[begin_index];
    size_t end_index = seg_end >> gc_heap::min_segment_size_shr;
    seg_mapping* end_entry = &seg_mapping_table[end_index];

    dprintf (1, ("adding seg %Ix(%d)-%Ix(%d)",
        seg, begin_index, heap_segment_reserved (seg), end_index));

    dprintf (1, ("before add: begin entry%d: boundary: %Ix; end entry: %d: boundary: %Ix",
        begin_index, (seg_mapping_table[begin_index].boundary + 1),
        end_index, (seg_mapping_table[end_index].boundary + 1)));

#ifdef MULTIPLE_HEAPS
#ifdef SIMPLE_DPRINTF
    dprintf (1, ("begin %d: h0: %Ix(%d), h1: %Ix(%d); end %d: h0: %Ix(%d), h1: %Ix(%d)",
        begin_index, (uint8_t*)(begin_entry->h0), (begin_entry->h0 ? begin_entry->h0->heap_number : -1),
        (uint8_t*)(begin_entry->h1), (begin_entry->h1 ? begin_entry->h1->heap_number : -1),
        end_index, (uint8_t*)(end_entry->h0), (end_entry->h0 ? end_entry->h0->heap_number : -1),
        (uint8_t*)(end_entry->h1), (end_entry->h1 ? end_entry->h1->heap_number : -1)));
#endif //SIMPLE_DPRINTF
    assert (end_entry->boundary == 0);
    assert (end_entry->h0 == 0);
    end_entry->h0 = hp;
    assert (begin_entry->h1 == 0);
    begin_entry->h1 = hp;
#else
    UNREFERENCED_PARAMETER(hp);
#endif //MULTIPLE_HEAPS

    end_entry->boundary = (uint8_t*)seg_end;

    dprintf (1, ("set entry %d seg1 and %d seg0 to %Ix", begin_index, end_index, seg));
    assert ((begin_entry->seg1 == 0) || ((size_t)(begin_entry->seg1) == ro_in_entry));
    begin_entry->seg1 = (heap_segment*)((size_t)(begin_entry->seg1) | (size_t)seg);
    end_entry->seg0 = seg;

    // for every entry inbetween we need to set its heap too.
    for (size_t entry_index = (begin_index + 1); entry_index <= (end_index - 1); entry_index++)
    {
        assert (seg_mapping_table[entry_index].boundary == 0);
#ifdef MULTIPLE_HEAPS
        assert (seg_mapping_table[entry_index].h0 == 0);
        seg_mapping_table[entry_index].h1 = hp;
#endif //MULTIPLE_HEAPS
        seg_mapping_table[entry_index].seg1 = seg;
    }

    dprintf (1, ("after add: begin entry%d: boundary: %Ix; end entry: %d: boundary: %Ix",
        begin_index, (seg_mapping_table[begin_index].boundary + 1),
        end_index, (seg_mapping_table[end_index].boundary + 1)));
#if defined(MULTIPLE_HEAPS) && defined(SIMPLE_DPRINTF)
    dprintf (1, ("begin %d: h0: %Ix(%d), h1: %Ix(%d); end: %d h0: %Ix(%d), h1: %Ix(%d)",
        begin_index, (uint8_t*)(begin_entry->h0), (begin_entry->h0 ? begin_entry->h0->heap_number : -1),
        (uint8_t*)(begin_entry->h1), (begin_entry->h1 ? begin_entry->h1->heap_number : -1),
        end_index, (uint8_t*)(end_entry->h0), (end_entry->h0 ? end_entry->h0->heap_number : -1),
        (uint8_t*)(end_entry->h1), (end_entry->h1 ? end_entry->h1->heap_number : -1)));
#endif //MULTIPLE_HEAPS && SIMPLE_DPRINTF
}

void gc_heap::seg_mapping_table_remove_segment (heap_segment* seg)
{
    size_t seg_end = (size_t)(heap_segment_reserved (seg) - 1);
    size_t begin_index = (size_t)seg >> gc_heap::min_segment_size_shr;
    seg_mapping* begin_entry = &seg_mapping_table[begin_index];
    size_t end_index = seg_end >> gc_heap::min_segment_size_shr;
    seg_mapping* end_entry = &seg_mapping_table[end_index];
    dprintf (1, ("removing seg %Ix(%d)-%Ix(%d)",
        seg, begin_index, heap_segment_reserved (seg), end_index));

    assert (end_entry->boundary == (uint8_t*)seg_end);
    end_entry->boundary = 0;

#ifdef MULTIPLE_HEAPS
    gc_heap* hp = heap_segment_heap (seg);
    assert (end_entry->h0 == hp);
    end_entry->h0 = 0;
    assert (begin_entry->h1 == hp);
    begin_entry->h1 = 0;
#endif //MULTIPLE_HEAPS

    assert (begin_entry->seg1 != 0);
    begin_entry->seg1 = (heap_segment*)((size_t)(begin_entry->seg1) & ro_in_entry);
    end_entry->seg0 = 0;

    // for every entry inbetween we need to reset its heap too.
    for (size_t entry_index = (begin_index + 1); entry_index <= (end_index - 1); entry_index++)
    {
        assert (seg_mapping_table[entry_index].boundary == 0);
#ifdef MULTIPLE_HEAPS
        assert (seg_mapping_table[entry_index].h0 == 0);
        assert (seg_mapping_table[entry_index].h1 == hp);
        seg_mapping_table[entry_index].h1 = 0;
#endif //MULTIPLE_HEAPS
        seg_mapping_table[entry_index].seg1 = 0;
    }

    dprintf (1, ("after remove: begin entry%d: boundary: %Ix; end entry: %d: boundary: %Ix",
        begin_index, (seg_mapping_table[begin_index].boundary + 1),
        end_index, (seg_mapping_table[end_index].boundary + 1)));
#ifdef MULTIPLE_HEAPS
    dprintf (1, ("begin %d: h0: %Ix, h1: %Ix; end: %d h0: %Ix, h1: %Ix",
        begin_index, (uint8_t*)(begin_entry->h0), (uint8_t*)(begin_entry->h1),
        end_index, (uint8_t*)(end_entry->h0), (uint8_t*)(end_entry->h1)));
#endif //MULTIPLE_HEAPS
}

#ifdef MULTIPLE_HEAPS
inline
gc_heap* seg_mapping_table_heap_of_worker (uint8_t* o)
{
    size_t index = (size_t)o >> gc_heap::min_segment_size_shr;
    seg_mapping* entry = &seg_mapping_table[index];

    gc_heap* hp = ((o > entry->boundary) ? entry->h1 : entry->h0);

    dprintf (2, ("checking obj %Ix, index is %Id, entry: boundary: %Ix, h0: %Ix, seg0: %Ix, h1: %Ix, seg1: %Ix",
        o, index, (entry->boundary + 1),
        (uint8_t*)(entry->h0), (uint8_t*)(entry->seg0),
        (uint8_t*)(entry->h1), (uint8_t*)(entry->seg1)));

#ifdef _DEBUG
    heap_segment* seg = ((o > entry->boundary) ? entry->seg1 : entry->seg0);
#ifdef FEATURE_BASICFREEZE
    if ((size_t)seg & ro_in_entry)
        seg = (heap_segment*)((size_t)seg & ~ro_in_entry);
#endif //FEATURE_BASICFREEZE

    if (seg)
    {
        if (in_range_for_segment (o, seg))
        {
            dprintf (2, ("obj %Ix belongs to segment %Ix(-%Ix)", o, seg, (uint8_t*)heap_segment_allocated (seg)));
        }
        else
        {
            dprintf (2, ("found seg %Ix(-%Ix) for obj %Ix, but it's not on the seg",
                seg, (uint8_t*)heap_segment_allocated (seg), o));
        }
    }
    else
    {
        dprintf (2, ("could not find obj %Ix in any existing segments", o));
    }
#endif //_DEBUG

    return hp;
}

gc_heap* seg_mapping_table_heap_of (uint8_t* o)
{
#ifdef GROWABLE_SEG_MAPPING_TABLE
    if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
        return 0;
#endif //GROWABLE_SEG_MAPPING_TABLE

    return seg_mapping_table_heap_of_worker (o);
}

gc_heap* seg_mapping_table_heap_of_gc (uint8_t* o)
{
#if defined(FEATURE_BASICFREEZE) && defined(GROWABLE_SEG_MAPPING_TABLE)
    if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
        return 0;
#endif //FEATURE_BASICFREEZE || GROWABLE_SEG_MAPPING_TABLE

    return seg_mapping_table_heap_of_worker (o);
}
#endif //MULTIPLE_HEAPS

// Only returns a valid seg if we can actually find o on the seg.
heap_segment* seg_mapping_table_segment_of (uint8_t* o)
{
#if defined(FEATURE_BASICFREEZE) && defined(GROWABLE_SEG_MAPPING_TABLE)
    if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address))
#ifdef FEATURE_BASICFREEZE
        return ro_segment_lookup (o);
#else
        return 0;
#endif //FEATURE_BASICFREEZE
#endif //FEATURE_BASICFREEZE || GROWABLE_SEG_MAPPING_TABLE

    size_t index = (size_t)o >> gc_heap::min_segment_size_shr;
    seg_mapping* entry = &seg_mapping_table[index];

    dprintf (2, ("checking obj %Ix, index is %Id, entry: boundary: %Ix, seg0: %Ix, seg1: %Ix",
        o, index, (entry->boundary + 1),
        (uint8_t*)(entry->seg0), (uint8_t*)(entry->seg1)));

    heap_segment* seg = ((o > entry->boundary) ? entry->seg1 : entry->seg0);
#ifdef FEATURE_BASICFREEZE
    if ((size_t)seg & ro_in_entry)
        seg = (heap_segment*)((size_t)seg & ~ro_in_entry);
#endif //FEATURE_BASICFREEZE

    if (seg)
    {
        if (in_range_for_segment (o, seg))
        {
            dprintf (2, ("obj %Ix belongs to segment %Ix(-%Ix)", o, (uint8_t*)heap_segment_mem(seg), (uint8_t*)heap_segment_reserved(seg)));
        }
        else
        {
            dprintf (2, ("found seg %Ix(-%Ix) for obj %Ix, but it's not on the seg, setting it to 0",
                (uint8_t*)heap_segment_mem(seg), (uint8_t*)heap_segment_reserved(seg), o));
            seg = 0;
        }
    }
    else
    {
        dprintf (2, ("could not find obj %Ix in any existing segments", o));
    }

#ifdef FEATURE_BASICFREEZE
    // TODO: This was originally written assuming that the seg_mapping_table would always contain entries for ro
    // segments whenever the ro segment falls into the [g_gc_lowest_address,g_gc_highest_address) range.  I.e., it had an
    // extra "&& (size_t)(entry->seg1) & ro_in_entry" expression.  However, at the moment, grow_brick_card_table does
    // not correctly go through the ro segments and add them back to the seg_mapping_table when the [lowest,highest)
    // range changes.  We should probably go ahead and modify grow_brick_card_table and put back the
    // "&& (size_t)(entry->seg1) & ro_in_entry" here.
    if (!seg)
    {
        seg = ro_segment_lookup (o);
        if (seg && !in_range_for_segment (o, seg))
            seg = 0;
    }
#endif //FEATURE_BASICFREEZE

    return seg;
}
#endif //SEG_MAPPING_TABLE

size_t gcard_of ( uint8_t*);

#define memref(i) *(uint8_t**)(i)

//GC Flags
#define GC_MARKED       (size_t)0x1
#define slot(i, j) ((uint8_t**)(i))[(j)+1]

#define free_object_base_size (plug_skew + sizeof(ArrayBase))

class CObjectHeader : public Object
{
public:

#if defined(FEATURE_REDHAWK) || defined(BUILD_AS_STANDALONE)
    // The GC expects the following methods that are provided by the Object class in the CLR but not provided
    // by Redhawk's version of Object.
    uint32_t GetNumComponents()
    {
        return ((ArrayBase *)this)->GetNumComponents();
    }

    void Validate(BOOL bDeep=TRUE, BOOL bVerifyNextHeader = TRUE)
    {
        UNREFERENCED_PARAMETER(bVerifyNextHeader);

        MethodTable * pMT = GetMethodTable();

        _ASSERTE(pMT->SanityCheck());

        bool noRangeChecks =
            (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_NO_RANGE_CHECKS) == GCConfig::HEAPVERIFY_NO_RANGE_CHECKS;

        BOOL fSmallObjectHeapPtr = FALSE, fLargeObjectHeapPtr = FALSE;
        if (!noRangeChecks)
        {
            fSmallObjectHeapPtr = g_theGCHeap->IsHeapPointer(this, TRUE);
            if (!fSmallObjectHeapPtr)
                fLargeObjectHeapPtr = g_theGCHeap->IsHeapPointer(this);

            _ASSERTE(fSmallObjectHeapPtr || fLargeObjectHeapPtr);
        }

#ifdef FEATURE_STRUCTALIGN
        _ASSERTE(IsStructAligned((uint8_t *)this, GetMethodTable()->GetBaseAlignment()));
#endif // FEATURE_STRUCTALIGN

#ifdef FEATURE_64BIT_ALIGNMENT
        if (pMT->RequiresAlign8())
        {
            _ASSERTE((((size_t)this) & 0x7) == (pMT->IsValueType() ? 4U : 0U));
        }
#endif // FEATURE_64BIT_ALIGNMENT

#ifdef VERIFY_HEAP
        if (bDeep && (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC))
            g_theGCHeap->ValidateObjectMember(this);
#endif
        if (fSmallObjectHeapPtr)
        {
#ifdef FEATURE_BASICFREEZE
            _ASSERTE(!g_theGCHeap->IsLargeObject(pMT) || g_theGCHeap->IsInFrozenSegment(this));
#else
            _ASSERTE(!g_theGCHeap->IsLargeObject(pMT));
#endif
        }
    }

    void ValidatePromote(ScanContext *sc, uint32_t flags)
    {
        UNREFERENCED_PARAMETER(sc);
        UNREFERENCED_PARAMETER(flags);

        Validate();
    }

    void ValidateHeap(Object *from, BOOL bDeep)
    {
        UNREFERENCED_PARAMETER(from);

        Validate(bDeep, FALSE);
    }

#endif //FEATURE_REDHAWK || BUILD_AS_STANDALONE

    /////
    //
    // Header Status Information
    //

    MethodTable    *GetMethodTable() const
    {
        return( (MethodTable *) (((size_t) RawGetMethodTable()) & (~(GC_MARKED))));
    }

    void SetMarked()
    {
        RawSetMethodTable((MethodTable *) (((size_t) RawGetMethodTable()) | GC_MARKED));
    }

    BOOL IsMarked() const
    {
        return !!(((size_t)RawGetMethodTable()) & GC_MARKED);
    }

    void SetPinned()
    {
        assert (!(gc_heap::settings.concurrent));
        GetHeader()->SetGCBit();
    }

    BOOL IsPinned() const
    {
        return !!((((CObjectHeader*)this)->GetHeader()->GetBits()) & BIT_SBLK_GC_RESERVE);
    }

    void ClearMarked()
    {
        RawSetMethodTable( GetMethodTable() );
    }

    CGCDesc *GetSlotMap ()
    {
        assert (GetMethodTable()->ContainsPointers());
        return CGCDesc::GetCGCDescFromMT(GetMethodTable());
    }

    void SetFree(size_t size)
    {
        assert (size >= free_object_base_size);

        assert (g_gc_pFreeObjectMethodTable->GetBaseSize() == free_object_base_size);
        assert (g_gc_pFreeObjectMethodTable->RawGetComponentSize() == 1);

        RawSetMethodTable( g_gc_pFreeObjectMethodTable );

        size_t* numComponentsPtr = (size_t*) &((uint8_t*) this)[ArrayBase::GetOffsetOfNumComponents()];
        *numComponentsPtr = size - free_object_base_size;
#ifdef VERIFY_HEAP
        //This introduces a bug in the free list management.
        //((void**) this)[-1] = 0;    // clear the sync block,
        assert (*numComponentsPtr >= 0);
        if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
            memset (((uint8_t*)this)+sizeof(ArrayBase), 0xcc, *numComponentsPtr);
#endif //VERIFY_HEAP
    }

    void UnsetFree()
    {
        size_t size = free_object_base_size - plug_skew;

        // since we only need to clear 2 ptr size, we do it manually
        PTR_PTR m = (PTR_PTR) this;
        for (size_t i = 0; i < size / sizeof(PTR_PTR); i++)
            *(m++) = 0;
    }

    BOOL IsFree () const
    {
        return (GetMethodTable() == g_gc_pFreeObjectMethodTable);
    }

#ifdef FEATURE_STRUCTALIGN
    int GetRequiredAlignment () const
    {
        return GetMethodTable()->GetRequiredAlignment();
    }
#endif // FEATURE_STRUCTALIGN

    BOOL ContainsPointers() const
    {
        return GetMethodTable()->ContainsPointers();
    }

#ifdef COLLECTIBLE_CLASS
    BOOL Collectible() const
    {
        return GetMethodTable()->Collectible();
    }

    FORCEINLINE BOOL ContainsPointersOrCollectible() const
    {
        MethodTable *pMethodTable = GetMethodTable();
        return (pMethodTable->ContainsPointers() || pMethodTable->Collectible());
    }
#endif //COLLECTIBLE_CLASS

    Object* GetObjectBase() const
    {
        return (Object*) this;
    }
};

#define header(i) ((CObjectHeader*)(i))

#define free_list_slot(x) ((uint8_t**)(x))[2]
#define free_list_undo(x) ((uint8_t**)(x))[-1]
#define UNDO_EMPTY ((uint8_t*)1)

#ifdef SHORT_PLUGS
inline
void set_plug_padded (uint8_t* node)
{
    header(node)->SetMarked();
}
inline
void clear_plug_padded (uint8_t* node)
{
    header(node)->ClearMarked();
}
inline
BOOL is_plug_padded (uint8_t* node)
{
    return header(node)->IsMarked();
}
#else //SHORT_PLUGS
inline void set_plug_padded (uint8_t* node){}
inline void clear_plug_padded (uint8_t* node){}
inline
BOOL is_plug_padded (uint8_t* node){return FALSE;}
#endif //SHORT_PLUGS


inline size_t unused_array_size(uint8_t * p)
{
    assert(((CObjectHeader*)p)->IsFree());

    size_t* numComponentsPtr = (size_t*)(p + ArrayBase::GetOffsetOfNumComponents());
    return free_object_base_size + *numComponentsPtr;
}

heap_segment* heap_segment_rw (heap_segment* ns)
{
    if ((ns == 0) || !heap_segment_read_only_p (ns))
    {
        return ns;
    }
    else
    {
        do
        {
            ns = heap_segment_next (ns);
        } while ((ns != 0) && heap_segment_read_only_p (ns));
        return ns;
    }
}

//returns the next non ro segment.
heap_segment* heap_segment_next_rw (heap_segment* seg)
{
    heap_segment* ns = heap_segment_next (seg);
    return heap_segment_rw (ns);
}

// returns the segment before seg.
heap_segment* heap_segment_prev_rw (heap_segment* begin, heap_segment* seg)
{
    assert (begin != 0);
    heap_segment* prev = begin;
    heap_segment* current = heap_segment_next_rw (begin);

    while (current && current != seg)
    {
        prev = current;
        current = heap_segment_next_rw (current);
    }

    if (current == seg)
    {
        return prev;
    }
    else
    {
        return 0;
    }
}

// returns the segment before seg.
heap_segment* heap_segment_prev (heap_segment* begin, heap_segment* seg)
{
    assert (begin != 0);
    heap_segment* prev = begin;
    heap_segment* current = heap_segment_next (begin);

    while (current && current != seg)
    {
        prev = current;
        current = heap_segment_next (current);
    }

    if (current == seg)
    {
        return prev;
    }
    else
    {
        return 0;
    }
}

heap_segment* heap_segment_in_range (heap_segment* ns)
{
    if ((ns == 0) || heap_segment_in_range_p (ns))
    {
        return ns;
    }
    else
    {
        do
        {
            ns = heap_segment_next (ns);
        } while ((ns != 0) && !heap_segment_in_range_p (ns));
        return ns;
    }
}

heap_segment* heap_segment_next_in_range (heap_segment* seg)
{
    heap_segment* ns = heap_segment_next (seg);
    return heap_segment_in_range (ns);
}

typedef struct
{
    uint8_t* memory_base;
} imemory_data;

typedef struct
{
    imemory_data *initial_memory;
    imemory_data *initial_normal_heap; // points into initial_memory_array
    imemory_data *initial_large_heap;  // points into initial_memory_array

    size_t block_size_normal;
    size_t block_size_large;

    int block_count;                // # of blocks in each
    int current_block_normal;
    int current_block_large;

    enum
    {
        ALLATONCE = 1,
        TWO_STAGE,
        EACH_BLOCK
    };

    size_t allocation_pattern;
} initial_memory_details;

initial_memory_details memory_details;

BOOL reserve_initial_memory (size_t normal_size, size_t large_size, int num_heaps, bool use_large_pages_p)
{
    BOOL reserve_success = FALSE;

    // should only be called once
    assert (memory_details.initial_memory == 0);

    memory_details.initial_memory = new (nothrow) imemory_data[num_heaps * 2];
    if (memory_details.initial_memory == 0)
    {
        dprintf (2, ("failed to reserve %Id bytes for imemory_data", num_heaps * 2 * sizeof (imemory_data)));
        return FALSE;
    }

    memory_details.initial_normal_heap = memory_details.initial_memory;
    memory_details.initial_large_heap = memory_details.initial_memory + num_heaps;
    memory_details.block_size_normal = normal_size;
    memory_details.block_size_large = large_size;
    memory_details.block_count = num_heaps;

    memory_details.current_block_normal = 0;
    memory_details.current_block_large = 0;

    g_gc_lowest_address = MAX_PTR;
    g_gc_highest_address = 0;

    if (((size_t)MAX_PTR - large_size) < normal_size)
    {
        // we are already overflowing with just one heap.
        dprintf (2, ("0x%Ix + 0x%Ix already overflow", normal_size, large_size));
        return FALSE;
    }

    if (((size_t)MAX_PTR / memory_details.block_count) < (normal_size + large_size))
    {
        dprintf (2, ("(0x%Ix + 0x%Ix)*0x%Ix overflow", normal_size, large_size, memory_details.block_count));
        return FALSE;
    }

    size_t requestedMemory = memory_details.block_count * (normal_size + large_size);

    uint8_t* allatonce_block = (uint8_t*)virtual_alloc (requestedMemory, use_large_pages_p);
    if (allatonce_block)
    {
        g_gc_lowest_address = allatonce_block;
        g_gc_highest_address = allatonce_block + requestedMemory;
        memory_details.allocation_pattern = initial_memory_details::ALLATONCE;

        for (int i = 0; i < memory_details.block_count; i++)
        {
            memory_details.initial_normal_heap[i].memory_base = allatonce_block + (i * normal_size);
            memory_details.initial_large_heap[i].memory_base = allatonce_block +
                (memory_details.block_count * normal_size) + (i * large_size);
            reserve_success = TRUE;
        }
    }
    else
    {
        // try to allocate 2 blocks
        uint8_t* b1 = 0;
        uint8_t* b2 = 0;
        b1 = (uint8_t*)virtual_alloc (memory_details.block_count * normal_size, use_large_pages_p);
        if (b1)
        {
            b2 = (uint8_t*)virtual_alloc (memory_details.block_count * large_size, use_large_pages_p);
            if (b2)
            {
                memory_details.allocation_pattern = initial_memory_details::TWO_STAGE;
                g_gc_lowest_address = min (b1, b2);
                g_gc_highest_address = max (b1 + memory_details.block_count * normal_size,
                    b2 + memory_details.block_count * large_size);
                for (int i = 0; i < memory_details.block_count; i++)
                {
                    memory_details.initial_normal_heap[i].memory_base = b1 + (i * normal_size);
                    memory_details.initial_large_heap[i].memory_base = b2 + (i * large_size);
                    reserve_success = TRUE;
                }
            }
            else
            {
                // b2 allocation failed, we'll go on to try allocating each block.
                // We could preserve the b1 alloc, but code complexity increases
                virtual_free (b1, memory_details.block_count * normal_size);
            }
        }

        if ((b2 == NULL) && (memory_details.block_count > 1))
        {
            memory_details.allocation_pattern = initial_memory_details::EACH_BLOCK;

            imemory_data* current_block = memory_details.initial_memory;
            for (int i = 0; i < (memory_details.block_count * 2); i++, current_block++)
            {
                size_t block_size = ((i < memory_details.block_count) ?
                    memory_details.block_size_normal :
                    memory_details.block_size_large);
                current_block->memory_base =
                    (uint8_t*)virtual_alloc (block_size, use_large_pages_p);
                if (current_block->memory_base == 0)
                {
                    // Free the blocks that we've allocated so far
                    current_block = memory_details.initial_memory;
                    for (int j = 0; j < i; j++, current_block++) {
                        if (current_block->memory_base != 0) {
                            block_size = ((j < memory_details.block_count) ?
                                memory_details.block_size_normal :
                                memory_details.block_size_large);
                            virtual_free (current_block->memory_base, block_size);
                        }
                    }
                    reserve_success = FALSE;
                    break;
                }
                else
                {
                    if (current_block->memory_base < g_gc_lowest_address)
                        g_gc_lowest_address = current_block->memory_base;
                    if (((uint8_t*)current_block->memory_base + block_size) > g_gc_highest_address)
                        g_gc_highest_address = (current_block->memory_base + block_size);
                }
                reserve_success = TRUE;
            }
        }
    }

    return reserve_success;
}

void destroy_initial_memory()
{
    if (memory_details.initial_memory != NULL)
    {
        if (memory_details.allocation_pattern == initial_memory_details::ALLATONCE)
        {
            virtual_free(memory_details.initial_memory[0].memory_base,
                memory_details.block_count*(memory_details.block_size_normal +
                memory_details.block_size_large));
        }
        else if (memory_details.allocation_pattern == initial_memory_details::TWO_STAGE)
        {
            virtual_free (memory_details.initial_normal_heap[0].memory_base,
                memory_details.block_count*memory_details.block_size_normal);

            virtual_free (memory_details.initial_large_heap[0].memory_base,
                memory_details.block_count*memory_details.block_size_large);
        }
        else
        {
            assert (memory_details.allocation_pattern == initial_memory_details::EACH_BLOCK);
            imemory_data *current_block = memory_details.initial_memory;
            for(int i = 0; i < (memory_details.block_count*2); i++, current_block++)
            {
                size_t block_size = (i < memory_details.block_count) ? memory_details.block_size_normal :
                                                                       memory_details.block_size_large;
                if (current_block->memory_base != NULL)
                {
                    virtual_free (current_block->memory_base, block_size);
                }
            }
        }

        delete [] memory_details.initial_memory;
        memory_details.initial_memory = NULL;
        memory_details.initial_normal_heap = NULL;
        memory_details.initial_large_heap = NULL;
    }
}

void* next_initial_memory (size_t size)
{
    assert ((size == memory_details.block_size_normal) || (size == memory_details.block_size_large));
    void *res = NULL;

    if ((size != memory_details.block_size_normal) ||
        ((memory_details.current_block_normal == memory_details.block_count) &&
         (memory_details.block_size_normal == memory_details.block_size_large)))
    {
        // If the block sizes are the same, flow block requests from normal to large
        assert (memory_details.current_block_large < memory_details.block_count);
        assert (memory_details.initial_large_heap != 0);

        res = memory_details.initial_large_heap[memory_details.current_block_large].memory_base;
        memory_details.current_block_large++;
    }
    else
    {
        assert (memory_details.current_block_normal < memory_details.block_count);
        assert (memory_details.initial_normal_heap != NULL);

        res = memory_details.initial_normal_heap[memory_details.current_block_normal].memory_base;
        memory_details.current_block_normal++;
    }

    return res;
}

heap_segment* get_initial_segment (size_t size, int h_number)
{
    void* mem = next_initial_memory (size);
    heap_segment* res = gc_heap::make_heap_segment ((uint8_t*)mem, size , h_number);

    return res;
}

void* virtual_alloc (size_t size)
{
    return virtual_alloc(size, false);
}

void* virtual_alloc (size_t size, bool use_large_pages_p)
{
    size_t requested_size = size;

    if ((gc_heap::reserved_memory_limit - gc_heap::reserved_memory) < requested_size)
    {
        gc_heap::reserved_memory_limit =
            GCScan::AskForMoreReservedMemory (gc_heap::reserved_memory_limit, requested_size);
        if ((gc_heap::reserved_memory_limit - gc_heap::reserved_memory) < requested_size)
        {
            return 0;
        }
    }

    uint32_t flags = VirtualReserveFlags::None;
#ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
    if (virtual_alloc_hardware_write_watch)
    {
        flags = VirtualReserveFlags::WriteWatch;
    }
#endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP

    void* prgmem = use_large_pages_p ?
        GCToOSInterface::VirtualReserveAndCommitLargePages(requested_size) :
        GCToOSInterface::VirtualReserve(requested_size, card_size * card_word_width, flags);
    void *aligned_mem = prgmem;

    // We don't want (prgmem + size) to be right at the end of the address space
    // because we'd have to worry about that everytime we do (address + size).
    // We also want to make sure that we leave loh_size_threshold at the end
    // so we allocate a small object we don't need to worry about overflow there
    // when we do alloc_ptr+size.
    if (prgmem)
    {
        uint8_t* end_mem = (uint8_t*)prgmem + requested_size;

        if ((end_mem == 0) || ((size_t)(MAX_PTR - end_mem) <= END_SPACE_AFTER_GC))
        {
            GCToOSInterface::VirtualRelease (prgmem, requested_size);
            dprintf (2, ("Virtual Alloc size %Id returned memory right against 4GB [%Ix, %Ix[ - discarding",
                        requested_size, (size_t)prgmem, (size_t)((uint8_t*)prgmem+requested_size)));
            prgmem = 0;
            aligned_mem = 0;
        }
    }

    if (prgmem)
    {
        gc_heap::reserved_memory += requested_size;
    }

    dprintf (2, ("Virtual Alloc size %Id: [%Ix, %Ix[",
                 requested_size, (size_t)prgmem, (size_t)((uint8_t*)prgmem+requested_size)));

    return aligned_mem;
}

void virtual_free (void* add, size_t size)
{
    GCToOSInterface::VirtualRelease (add, size);
    gc_heap::reserved_memory -= size;
    dprintf (2, ("Virtual Free size %Id: [%Ix, %Ix[",
                 size, (size_t)add, (size_t)((uint8_t*)add+size)));
}

static size_t get_valid_segment_size (BOOL large_seg=FALSE)
{
    size_t seg_size, initial_seg_size;

    if (!large_seg)
    {
        initial_seg_size = INITIAL_ALLOC;
        seg_size = static_cast<size_t>(GCConfig::GetSegmentSize());
    }
    else
    {
        initial_seg_size = LHEAP_ALLOC;
        seg_size = static_cast<size_t>(GCConfig::GetSegmentSize()) / 2;
    }

#ifdef MULTIPLE_HEAPS
#ifdef BIT64
    if (!large_seg)
#endif // BIT64
    {
        if (g_num_processors > 4)
            initial_seg_size /= 2;
        if (g_num_processors > 8)
            initial_seg_size /= 2;
    }
#endif //MULTIPLE_HEAPS

    // if seg_size is small but not 0 (0 is default if config not set)
    // then set the segment to the minimum size
    if (!g_theGCHeap->IsValidSegmentSize(seg_size))
    {
        // if requested size is between 1 byte and 4MB, use min
        if ((seg_size >> 1) && !(seg_size >> 22))
            seg_size = 1024*1024*4;
        else
            seg_size = initial_seg_size;
    }

#ifdef SEG_MAPPING_TABLE
#ifdef BIT64
    seg_size = round_up_power2 (seg_size);
#else
    seg_size = round_down_power2 (seg_size);
#endif // BIT64
#endif //SEG_MAPPING_TABLE

    return (seg_size);
}

void
gc_heap::compute_new_ephemeral_size()
{
    int eph_gen_max = max_generation - 1 - (settings.promotion ? 1 : 0);
    size_t padding_size = 0;

    for (int i = 0; i <= eph_gen_max; i++)
    {
        dynamic_data* dd = dynamic_data_of (i);
        total_ephemeral_size += (dd_survived_size (dd) - dd_pinned_survived_size (dd));
#ifdef RESPECT_LARGE_ALIGNMENT
        total_ephemeral_size += dd_num_npinned_plugs (dd) * switch_alignment_size (FALSE);
#endif //RESPECT_LARGE_ALIGNMENT
#ifdef FEATURE_STRUCTALIGN
        total_ephemeral_size += dd_num_npinned_plugs (dd) * MAX_STRUCTALIGN;
#endif //FEATURE_STRUCTALIGN

#ifdef SHORT_PLUGS
        padding_size += dd_padding_size (dd);
#endif //SHORT_PLUGS
    }

    total_ephemeral_size += eph_gen_starts_size;

#ifdef RESPECT_LARGE_ALIGNMENT
    size_t planned_ephemeral_size = heap_segment_plan_allocated (ephemeral_heap_segment) -
                                       generation_plan_allocation_start (generation_of (max_generation-1));
    total_ephemeral_size = min (total_ephemeral_size, planned_ephemeral_size);
#endif //RESPECT_LARGE_ALIGNMENT

#ifdef SHORT_PLUGS
    total_ephemeral_size = Align ((size_t)((double)total_ephemeral_size * short_plugs_pad_ratio) + 1);
    total_ephemeral_size += Align (DESIRED_PLUG_LENGTH);
#endif //SHORT_PLUGS

    dprintf (3, ("total ephemeral size is %Ix, padding %Ix(%Ix)",
        total_ephemeral_size,
        padding_size, (total_ephemeral_size - padding_size)));
}

#ifdef _MSC_VER
#pragma warning(disable:4706) // "assignment within conditional expression" is intentional in this function.
#endif // _MSC_VER

heap_segment*
gc_heap::soh_get_segment_to_expand()
{
    size_t size = soh_segment_size;

    ordered_plug_indices_init = FALSE;
    use_bestfit = FALSE;

    //compute the size of the new ephemeral heap segment.
    compute_new_ephemeral_size();

    if ((settings.pause_mode != pause_low_latency) &&
        (settings.pause_mode != pause_no_gc)
#ifdef BACKGROUND_GC
        && (!recursive_gc_sync::background_running_p())
#endif //BACKGROUND_GC
        )
    {
        allocator*  gen_alloc = ((settings.condemned_generation == max_generation) ? 0 :
                              generation_allocator (generation_of (max_generation)));
        dprintf (2, ("(gen%d)soh_get_segment_to_expand", settings.condemned_generation));

        // try to find one in the gen 2 segment list, search backwards because the first segments
        // tend to be more compact than the later ones.
        heap_segment* fseg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));

        PREFIX_ASSUME(fseg != NULL);

#ifdef SEG_REUSE_STATS
        int try_reuse = 0;
#endif //SEG_REUSE_STATS

        heap_segment* seg = ephemeral_heap_segment;
        while ((seg = heap_segment_prev_rw (fseg, seg)) && (seg != fseg))
        {
#ifdef SEG_REUSE_STATS
        try_reuse++;
#endif //SEG_REUSE_STATS

            if (can_expand_into_p (seg, size/3, total_ephemeral_size, gen_alloc))
            {
                get_gc_data_per_heap()->set_mechanism (gc_heap_expand,
                    (use_bestfit ? expand_reuse_bestfit : expand_reuse_normal));
                if (settings.condemned_generation == max_generation)
                {
                    if (use_bestfit)
                    {
                        build_ordered_free_spaces (seg);
                        dprintf (GTC_LOG, ("can use best fit"));
                    }

#ifdef SEG_REUSE_STATS
                    dprintf (SEG_REUSE_LOG_0, ("(gen%d)soh_get_segment_to_expand: found seg #%d to reuse",
                        settings.condemned_generation, try_reuse));
#endif //SEG_REUSE_STATS
                    dprintf (GTC_LOG, ("max_gen: Found existing segment to expand into %Ix", (size_t)seg));
                    return seg;
                }
                else
                {
#ifdef SEG_REUSE_STATS
                    dprintf (SEG_REUSE_LOG_0, ("(gen%d)soh_get_segment_to_expand: found seg #%d to reuse - returning",
                        settings.condemned_generation, try_reuse));
#endif //SEG_REUSE_STATS
                    dprintf (GTC_LOG, ("max_gen-1: Found existing segment to expand into %Ix", (size_t)seg));

                    // If we return 0 here, the allocator will think since we are short on end
                    // of seg we need to trigger a full compacting GC. So if sustained low latency
                    // is set we should acquire a new seg instead, that way we wouldn't be short.
                    // The real solution, of course, is to actually implement seg reuse in gen1.
                    if (settings.pause_mode != pause_sustained_low_latency)
                    {
                        dprintf (GTC_LOG, ("max_gen-1: SustainedLowLatency is set, acquire a new seg"));
                        get_gc_data_per_heap()->set_mechanism (gc_heap_expand, expand_next_full_gc);
                        return 0;
                    }
                }
            }
        }
    }

    heap_segment* result = get_segment (size, FALSE);

    if(result)
    {
#ifdef BACKGROUND_GC
        if (current_c_gc_state == c_gc_state_planning)
        {
            // When we expand heap during bgc sweep, we set the seg to be swept so
            // we'll always look at cards for objects on the new segment.
            result->flags |= heap_segment_flags_swept;
        }
#endif //BACKGROUND_GC

        FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(result),
                                  (size_t)(heap_segment_reserved (result) - heap_segment_mem(result)),
                                  gc_etw_segment_small_object_heap);
    }

    get_gc_data_per_heap()->set_mechanism (gc_heap_expand, (result ? expand_new_seg : expand_no_memory));

    if (result == 0)
    {
        dprintf (2, ("h%d: failed to allocate a new segment!", heap_number));
    }
    else
    {
#ifdef MULTIPLE_HEAPS
        heap_segment_heap (result) = this;
#endif //MULTIPLE_HEAPS
    }

    dprintf (GTC_LOG, ("(gen%d)creating new segment %Ix", settings.condemned_generation, result));
    return result;
}

#ifdef _MSC_VER
#pragma warning(default:4706)
#endif // _MSC_VER

//returns 0 in case of allocation failure
heap_segment*
gc_heap::get_segment (size_t size, BOOL loh_p)
{
    if (heap_hard_limit)
        return NULL;

    heap_segment* result = 0;

    if (segment_standby_list != 0)
    {
        result = segment_standby_list;
        heap_segment* last = 0;
        while (result)
        {
            size_t hs = (size_t)(heap_segment_reserved (result) - (uint8_t*)result);
            if ((hs >= size) && ((hs / 2) < size))
            {
                dprintf (2, ("Hoarded segment %Ix found", (size_t) result));
                if (last)
                {
                    heap_segment_next (last) = heap_segment_next (result);
                }
                else
                {
                    segment_standby_list = heap_segment_next (result);
                }
                break;
            }
            else
            {
                last = result;
                result = heap_segment_next (result);
            }
        }
    }

    if (result)
    {
        init_heap_segment (result);
#ifdef BACKGROUND_GC
        if (should_commit_mark_array())
        {
            dprintf (GC_TABLE_LOG, ("hoarded seg %Ix, mark_array is %Ix", result, mark_array));
            if (!commit_mark_array_new_seg (__this, result))
            {
                dprintf (GC_TABLE_LOG, ("failed to commit mark array for hoarded seg"));
                // If we can't use it we need to thread it back.
                if (segment_standby_list != 0)
                {
                    heap_segment_next (result) = segment_standby_list;
                    segment_standby_list = result;
                }
                else
                {
                    segment_standby_list = result;
                }

                result = 0;
            }
        }
#endif //BACKGROUND_GC

#ifdef SEG_MAPPING_TABLE
        if (result)
            seg_mapping_table_add_segment (result, __this);
#endif //SEG_MAPPING_TABLE
    }

    if (!result)
    {
#ifndef SEG_MAPPING_TABLE
        if (!seg_table->ensure_space_for_insert ())
            return 0;
#endif //SEG_MAPPING_TABLE
        void* mem = virtual_alloc (size);
        if (!mem)
        {
            fgm_result.set_fgm (fgm_reserve_segment, size, loh_p);
            return 0;
        }

        result = gc_heap::make_heap_segment ((uint8_t*)mem, size, heap_number);

        if (result)
        {
            uint8_t* start;
            uint8_t* end;
            if (mem < g_gc_lowest_address)
            {
                start =  (uint8_t*)mem;
            }
            else
            {
                start = (uint8_t*)g_gc_lowest_address;
            }

            if (((uint8_t*)mem + size) > g_gc_highest_address)
            {
                end = (uint8_t*)mem + size;
            }
            else
            {
                end = (uint8_t*)g_gc_highest_address;
            }

            if (gc_heap::grow_brick_card_tables (start, end, size, result, __this, loh_p) != 0)
            {
                virtual_free (mem, size);
                return 0;
            }
        }
        else
        {
            fgm_result.set_fgm (fgm_commit_segment_beg, SEGMENT_INITIAL_COMMIT, loh_p);
            virtual_free (mem, size);
        }

        if (result)
        {
#ifdef SEG_MAPPING_TABLE
            seg_mapping_table_add_segment (result, __this);
#else //SEG_MAPPING_TABLE
            gc_heap::seg_table->insert ((uint8_t*)result, delta);
#endif //SEG_MAPPING_TABLE
        }
    }

#ifdef BACKGROUND_GC
    if (result)
    {
        ::record_changed_seg ((uint8_t*)result, heap_segment_reserved (result),
                            settings.gc_index, current_bgc_state,
                            seg_added);
        bgc_verify_mark_array_cleared (result);
    }
#endif //BACKGROUND_GC

    dprintf (GC_TABLE_LOG, ("h%d: new seg: %Ix-%Ix (%Id)", heap_number, result, ((uint8_t*)result + size), size));
    return result;
}

void release_segment (heap_segment* sg)
{
    ptrdiff_t delta = 0;
    FIRE_EVENT(GCFreeSegment_V1, heap_segment_mem(sg));
    virtual_free (sg, (uint8_t*)heap_segment_reserved (sg)-(uint8_t*)sg);
}

heap_segment* gc_heap::get_segment_for_loh (size_t size
#ifdef MULTIPLE_HEAPS
                                           , gc_heap* hp
#endif //MULTIPLE_HEAPS
                                           )
{
#ifndef MULTIPLE_HEAPS
    gc_heap* hp = 0;
#endif //MULTIPLE_HEAPS
    heap_segment* res = hp->get_segment (size, TRUE);
    if (res != 0)
    {
#ifdef MULTIPLE_HEAPS
        heap_segment_heap (res) = hp;
#endif //MULTIPLE_HEAPS
        res->flags |= heap_segment_flags_loh;

        FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(res), (size_t)(heap_segment_reserved (res) - heap_segment_mem(res)), gc_etw_segment_large_object_heap);

        GCToEEInterface::DiagUpdateGenerationBounds();

#ifdef MULTIPLE_HEAPS
        hp->thread_loh_segment (res);
#else
        thread_loh_segment (res);
#endif //MULTIPLE_HEAPS
    }

    return res;
}

void gc_heap::thread_loh_segment (heap_segment* new_seg)
{
    heap_segment* seg = generation_allocation_segment (generation_of (max_generation + 1));

    while (heap_segment_next_rw (seg))
        seg = heap_segment_next_rw (seg);
    heap_segment_next (seg) = new_seg;
}

heap_segment*
gc_heap::get_large_segment (size_t size, BOOL* did_full_compact_gc)
{
    *did_full_compact_gc = FALSE;
    size_t last_full_compact_gc_count = get_full_compact_gc_count();

    //access to get_segment needs to be serialized
    add_saved_spinlock_info (true, me_release, mt_get_large_seg);
    leave_spin_lock (&more_space_lock_loh);
    enter_spin_lock (&gc_heap::gc_lock);
    dprintf (SPINLOCK_LOG, ("[%d]Seg: Egc", heap_number));
    // if a GC happened between here and before we ask for a segment in
    // get_large_segment, we need to count that GC.
    size_t current_full_compact_gc_count = get_full_compact_gc_count();

    if (current_full_compact_gc_count > last_full_compact_gc_count)
    {
        *did_full_compact_gc = TRUE;
    }

    heap_segment* res = get_segment_for_loh (size
#ifdef MULTIPLE_HEAPS
                                            , this
#endif //MULTIPLE_HEAPS
                                            );

    dprintf (SPINLOCK_LOG, ("[%d]Seg: A Lgc", heap_number));
    leave_spin_lock (&gc_heap::gc_lock);
    enter_spin_lock (&more_space_lock_loh);
    add_saved_spinlock_info (true, me_acquire, mt_get_large_seg);

    return res;
}

#if 0
BOOL gc_heap::unprotect_segment (heap_segment* seg)
{
    uint8_t* start = align_lower_page (heap_segment_mem (seg));
    ptrdiff_t region_size = heap_segment_allocated (seg) - start;

    if (region_size != 0 )
    {
        dprintf (3, ("unprotecting segment %Ix:", (size_t)seg));

        BOOL status = GCToOSInterface::VirtualUnprotect (start, region_size);
        assert (status);
        return status;
    }
    return FALSE;
}
#endif

#ifdef MULTIPLE_HEAPS
#ifdef _X86_
#ifdef _MSC_VER
#pragma warning(disable:4035)
    static ptrdiff_t  get_cycle_count()
    {
        __asm   rdtsc
    }
#pragma warning(default:4035)
#elif defined(__GNUC__)
    static ptrdiff_t  get_cycle_count()
    {
        ptrdiff_t cycles;
        ptrdiff_t cyclesHi;
        __asm__ __volatile__
        ("rdtsc":"=a" (cycles), "=d" (cyclesHi));
        return cycles;
    }
#else //_MSC_VER
#error Unknown compiler
#endif //_MSC_VER
#elif defined(_TARGET_AMD64_)
#ifdef _MSC_VER
extern "C" uint64_t __rdtsc();
#pragma intrinsic(__rdtsc)
    static ptrdiff_t get_cycle_count()
    {
        return (ptrdiff_t)__rdtsc();
    }
#elif defined(__GNUC__)
    static ptrdiff_t get_cycle_count()
    {
        ptrdiff_t cycles;
        ptrdiff_t cyclesHi;
        __asm__ __volatile__
        ("rdtsc":"=a" (cycles), "=d" (cyclesHi));
        return (cyclesHi << 32) | cycles;
    }
#else // _MSC_VER
    extern "C" ptrdiff_t get_cycle_count(void);
#endif // _MSC_VER
#else
    static ptrdiff_t get_cycle_count()
    {
        // @ARMTODO, @ARM64TODO, @WASMTODO: cycle counter is not exposed to user mode. For now (until we can show this
        // makes a difference on the configurations on which we'll run) just return 0. This will result in
        // all buffer access times being reported as equal in access_time().
        return 0;
    }
#endif //_TARGET_X86_

// We may not be on contiguous numa nodes so need to store
// the node index as well.
struct node_heap_count
{
    int node_no;
    int heap_count;
};

class heap_select
{
    heap_select() {}
public:
    static uint8_t* sniff_buffer;
    static unsigned n_sniff_buffers;
    static unsigned cur_sniff_index;

    static uint16_t proc_no_to_heap_no[MAX_SUPPORTED_CPUS];
    static uint16_t heap_no_to_proc_no[MAX_SUPPORTED_CPUS];
    static uint16_t heap_no_to_numa_node[MAX_SUPPORTED_CPUS];
    static uint16_t proc_no_to_numa_node[MAX_SUPPORTED_CPUS];
    static uint16_t numa_node_to_heap_map[MAX_SUPPORTED_CPUS+4];
    // Note this is the total numa nodes GC heaps are on. There might be
    // more on the machine if GC threads aren't using all of them.
    static uint16_t total_numa_nodes;
    static node_heap_count heaps_on_node[MAX_SUPPORTED_NODES];

    static int access_time(uint8_t *sniff_buffer, int heap_number, unsigned sniff_index, unsigned n_sniff_buffers)
    {
        ptrdiff_t start_cycles = get_cycle_count();
        uint8_t sniff = sniff_buffer[(1 + heap_number*n_sniff_buffers + sniff_index)*HS_CACHE_LINE_SIZE];
        assert (sniff == 0);
        ptrdiff_t elapsed_cycles = get_cycle_count() - start_cycles;
        // add sniff here just to defeat the optimizer
        elapsed_cycles += sniff;
        return (int) elapsed_cycles;
    }

public:
    static BOOL init(int n_heaps)
    {
        assert (sniff_buffer == NULL && n_sniff_buffers == 0);
        if (!GCToOSInterface::CanGetCurrentProcessorNumber())
        {
            n_sniff_buffers = n_heaps*2+1;
            size_t n_cache_lines = 1 + n_heaps * n_sniff_buffers + 1;
            size_t sniff_buf_size = n_cache_lines * HS_CACHE_LINE_SIZE;
            if (sniff_buf_size / HS_CACHE_LINE_SIZE != n_cache_lines) // check for overlow
            {
                return FALSE;
            }

            sniff_buffer = new (nothrow) uint8_t[sniff_buf_size];
            if (sniff_buffer == 0)
                return FALSE;
            memset(sniff_buffer, 0, sniff_buf_size*sizeof(uint8_t));
        }

        //can not enable gc numa aware, force all heaps to be in
        //one numa node by filling the array with all 0s
        if (!GCToOSInterface::CanEnableGCNumaAware())
            memset(heap_no_to_numa_node, 0, sizeof (heap_no_to_numa_node));

        return TRUE;
    }

    static void init_cpu_mapping(int heap_number)
    {
        if (GCToOSInterface::CanGetCurrentProcessorNumber())
        {
            uint32_t proc_no = GCToOSInterface::GetCurrentProcessorNumber();
            proc_no_to_heap_no[proc_no] = (uint16_t)heap_number;
        }
    }

    static void mark_heap(int heap_number)
    {
        if (GCToOSInterface::CanGetCurrentProcessorNumber())
            return;

        for (unsigned sniff_index = 0; sniff_index < n_sniff_buffers; sniff_index++)
            sniff_buffer[(1 + heap_number*n_sniff_buffers + sniff_index)*HS_CACHE_LINE_SIZE] &= 1;
    }

    static int select_heap(alloc_context* acontext)
    {
        UNREFERENCED_PARAMETER(acontext); // only referenced by dprintf

        if (GCToOSInterface::CanGetCurrentProcessorNumber())
        {
            uint32_t proc_no = GCToOSInterface::GetCurrentProcessorNumber();
            return proc_no_to_heap_no[proc_no];
        }

        unsigned sniff_index = Interlocked::Increment(&cur_sniff_index);
        sniff_index %= n_sniff_buffers;

        int best_heap = 0;
        int best_access_time = 1000*1000*1000;
        int second_best_access_time = best_access_time;

        uint8_t *l_sniff_buffer = sniff_buffer;
        unsigned l_n_sniff_buffers = n_sniff_buffers;
        for (int heap_number = 0; heap_number < gc_heap::n_heaps; heap_number++)
        {
            int this_access_time = access_time(l_sniff_buffer, heap_number, sniff_index, l_n_sniff_buffers);
            if (this_access_time < best_access_time)
            {
                second_best_access_time = best_access_time;
                best_access_time = this_access_time;
                best_heap = heap_number;
            }
            else if (this_access_time < second_best_access_time)
            {
                second_best_access_time = this_access_time;
            }
        }

        if (best_access_time*2 < second_best_access_time)
        {
            sniff_buffer[(1 + best_heap*n_sniff_buffers + sniff_index)*HS_CACHE_LINE_SIZE] &= 1;

            dprintf (3, ("select_heap yields crisp %d for context %p\n", best_heap, (void *)acontext));
        }
        else
        {
            dprintf (3, ("select_heap yields vague %d for context %p\n", best_heap, (void *)acontext ));
        }

        return best_heap;
    }

    static bool can_find_heap_fast()
    {
        return GCToOSInterface::CanGetCurrentProcessorNumber();
    }

    static uint16_t find_heap_no_from_proc_no(uint16_t proc_no)
    {
        return proc_no_to_heap_no[proc_no];
    }

    static uint16_t find_proc_no_from_heap_no(int heap_number)
    {
        return heap_no_to_proc_no[heap_number];
    }

    static void set_proc_no_for_heap(int heap_number, uint16_t proc_no)
    {
        heap_no_to_proc_no[heap_number] = proc_no;
    }

    static uint16_t find_numa_node_from_heap_no(int heap_number)
    {
        return heap_no_to_numa_node[heap_number];
    }

    static uint16_t find_numa_node_from_proc_no (uint16_t proc_no)
    {
        return proc_no_to_numa_node[proc_no];
    }

    static void set_numa_node_for_heap_and_proc(int heap_number, uint16_t proc_no, uint16_t numa_node)
    {
        heap_no_to_numa_node[heap_number] = numa_node;
        proc_no_to_numa_node[proc_no] = numa_node;
    }

    static void init_numa_node_to_heap_map(int nheaps)
    {
        // Called right after GCHeap::Init() for each heap
        // For each NUMA node used by the heaps, the
        // numa_node_to_heap_map[numa_node] is set to the first heap number on that node and
        // numa_node_to_heap_map[numa_node + 1] is set to the first heap number not on that node
        // Set the start of the heap number range for the first NUMA node
        numa_node_to_heap_map[heap_no_to_numa_node[0]] = 0;
        total_numa_nodes = 0;
        memset (heaps_on_node, 0, sizeof (heaps_on_node));
        heaps_on_node[0].node_no = heap_no_to_numa_node[0];
        heaps_on_node[0].heap_count = 1;

        for (int i=1; i < nheaps; i++)
        {
            if (heap_no_to_numa_node[i] != heap_no_to_numa_node[i-1])
            {
                total_numa_nodes++;
                heaps_on_node[total_numa_nodes].node_no = heap_no_to_numa_node[i];

                // Set the end of the heap number range for the previous NUMA node
                numa_node_to_heap_map[heap_no_to_numa_node[i-1] + 1] =
                // Set the start of the heap number range for the current NUMA node
                numa_node_to_heap_map[heap_no_to_numa_node[i]] = (uint16_t)i;
            }
            (heaps_on_node[total_numa_nodes].heap_count)++;
        }

        // Set the end of the heap range for the last NUMA node
        numa_node_to_heap_map[heap_no_to_numa_node[nheaps-1] + 1] = (uint16_t)nheaps; //mark the end with nheaps
        total_numa_nodes++;
    }

    // TODO: curently this doesn't work with GCHeapAffinitizeMask/GCHeapAffinitizeRanges
    // because the heaps may not be on contiguous active procs.
    //
    // This is for scenarios where GCHeapCount is specified as something like
    // (g_num_active_processors - 2) to allow less randomization to the Server GC threads.
    // In this case we want to assign the right heaps to those procs, ie if they share
    // the same numa node we want to assign local heaps to those procs. Otherwise we
    // let the heap balancing mechanism take over for now.
    static void distribute_other_procs()
    {
        if (affinity_config_specified_p)
            return;

        uint16_t proc_no = 0;
        uint16_t node_no = 0;
        bool res = false;
        int start_heap = -1;
        int end_heap = -1;
        int current_node_no = -1;
        int current_heap_on_node = -1;

        for (int i = gc_heap::n_heaps; i < (int)g_num_active_processors; i++)
        {
            if (!GCToOSInterface::GetProcessorForHeap (i, &proc_no, &node_no))
                break;

            int start_heap = (int)numa_node_to_heap_map[node_no];
            int end_heap = (int)(numa_node_to_heap_map[node_no + 1]);

            if ((end_heap - start_heap) > 0)
            {
                if (node_no == current_node_no)
                {
                    // We already iterated through all heaps on this node, don't add more procs to these
                    // heaps.
                    if (current_heap_on_node >= end_heap)
                    {
                        continue;
                    }
                }
                else
                {
                    current_node_no = node_no;
                    current_heap_on_node = start_heap;
                }

                proc_no_to_heap_no[proc_no] = current_heap_on_node;
                proc_no_to_numa_node[proc_no] = node_no;

                current_heap_on_node++;
            }
        }
    }

    static void get_heap_range_for_heap(int hn, int* start, int* end)
    {
        uint16_t numa_node = heap_no_to_numa_node[hn];
        *start = (int)numa_node_to_heap_map[numa_node];
        *end   = (int)(numa_node_to_heap_map[numa_node+1]);
    }

    // This gets the next valid numa node index starting at current_index+1.
    // It assumes that current_index is a valid node index.
    // If current_index+1 is at the end this will start at the beginning. So this will
    // always return a valid node index, along with that node's start/end heaps.
    static uint16_t get_next_numa_node (uint16_t current_index, int* start, int* end)
    {
        int start_index = current_index + 1;
        int nheaps = gc_heap::n_heaps;

        bool found_node_with_heaps_p = false;
        do
        {
            int start_heap = (int)numa_node_to_heap_map[start_index];
            int end_heap = (int)numa_node_to_heap_map[start_index + 1];
            if (start_heap == nheaps)
            {
                // This is the last node.
                start_index = 0;
                continue;
            }

            if ((end_heap - start_heap) == 0)
            {
                // This node has no heaps.
                start_index++;
            }
            else
            {
                found_node_with_heaps_p = true;
                *start = start_heap;
                *end = end_heap;
            }
        } while (!found_node_with_heaps_p);

        return start_index;
    }
};
uint8_t* heap_select::sniff_buffer;
unsigned heap_select::n_sniff_buffers;
unsigned heap_select::cur_sniff_index;
uint16_t heap_select::proc_no_to_heap_no[MAX_SUPPORTED_CPUS];
uint16_t heap_select::heap_no_to_proc_no[MAX_SUPPORTED_CPUS];
uint16_t heap_select::heap_no_to_numa_node[MAX_SUPPORTED_CPUS];
uint16_t heap_select::proc_no_to_numa_node[MAX_SUPPORTED_CPUS];
uint16_t heap_select::numa_node_to_heap_map[MAX_SUPPORTED_CPUS+4];
uint16_t  heap_select::total_numa_nodes;
node_heap_count heap_select::heaps_on_node[MAX_SUPPORTED_NODES];

#ifdef HEAP_BALANCE_INSTRUMENTATION
// This records info we use to look at effect of different strategies
// for heap balancing.
struct heap_balance_info
{
    uint64_t timestamp;
    // This also encodes when we detect the thread runs on
    // different proc during a balance attempt. Sometimes
    // I observe this happens multiple times during one attempt!
    // If this happens, I just record the last proc we observe
    // and set MSB.
    int tid;
    // This records the final alloc_heap for the thread.
    //
    // This also encodes the reason why we needed to set_home_heap
    // in balance_heaps.
    // If we set it because the home heap is not the same as the proc,
    // we set MSB.
    //
    // If we set ideal proc, we set the 2nd MSB.
    int alloc_heap;
    int ideal_proc_no;
};

// This means inbetween each GC we can log at most this many entries per proc.
// This is usually enough. Most of the time we only need to log something every 128k
// of allocations in balance_heaps and gen0 budget is <= 200mb.
#define default_max_hb_heap_balance_info 4096

struct heap_balance_info_proc
{
    int count;
    int index;
    heap_balance_info hb_info[default_max_hb_heap_balance_info];
};

struct heap_balance_info_numa
{
    heap_balance_info_proc* hb_info_procs;
};

uint64_t start_raw_ts = 0;
bool cpu_group_enabled_p = false;
uint32_t procs_per_numa_node = 0;
uint16_t total_numa_nodes_on_machine = 0;
uint32_t procs_per_cpu_group = 0;
uint16_t total_cpu_groups_on_machine = 0;
// Note this is still on one of the numa nodes, so we'll incur a remote access
// no matter what.
heap_balance_info_numa* hb_info_numa_nodes = NULL;

// TODO: This doesn't work for multiple nodes per CPU group yet.
int get_proc_index_numa (int proc_no, int* numa_no)
{
    if (total_numa_nodes_on_machine == 1)
    {
        *numa_no = 0;
        return proc_no;
    }
    else
    {
        if (cpu_group_enabled_p)
        {
            // see vm\gcenv.os.cpp GroupProcNo implementation.
            *numa_no = proc_no >> 6;
            return (proc_no % 64);
        }
        else
        {
            *numa_no = proc_no / procs_per_numa_node;
            return (proc_no % procs_per_numa_node);
        }
    }
}

// We could consider optimizing it so we don't need to get the tid
// everytime but it's not very expensive to get.
void add_to_hb_numa (
    int proc_no,
    int ideal_proc_no,
    int alloc_heap,
    bool multiple_procs_p,
    bool alloc_count_p,
    bool set_ideal_p)
{
    int tid = (int)GCToOSInterface::GetCurrentThreadIdForLogging ();
    uint64_t timestamp = RawGetHighPrecisionTimeStamp ();

    int saved_proc_no = proc_no;
    int numa_no = -1;
    proc_no = get_proc_index_numa (proc_no, &numa_no);

    heap_balance_info_numa* hb_info_numa_node = &hb_info_numa_nodes[numa_no];

    heap_balance_info_proc* hb_info_proc = &(hb_info_numa_node->hb_info_procs[proc_no]);
    int index = hb_info_proc->index;
    int count = hb_info_proc->count;

    if (index == count)
    {
        // Too much info inbetween GCs. This can happen if the thread is scheduled on a different
        // processor very often so it caused us to log many entries due to that reason. You could
        // increase default_max_hb_heap_balance_info but this usually indicates a problem that
        // should be investigated.
        dprintf (HEAP_BALANCE_LOG, ("too much info between GCs, already logged %d entries", index));
        GCToOSInterface::DebugBreak ();
    }
    heap_balance_info* hb_info = &(hb_info_proc->hb_info[index]);

    dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMP[p%3d->%3d(i:%3d), N%d] #%4d: %I64d, tid %d, ah: %d, m: %d, p: %d, i: %d",
        saved_proc_no, proc_no, ideal_proc_no, numa_no, index,
        (timestamp - start_raw_ts), tid, alloc_heap, (int)multiple_procs_p, (int)(!alloc_count_p), (int)set_ideal_p));

    if (multiple_procs_p)
    {
        tid |= (1 << (sizeof (tid) * 8 - 1));
    }

    if (!alloc_count_p)
    {
        alloc_heap |= (1 << (sizeof (alloc_heap) * 8 - 1));
    }

    if (set_ideal_p)
    {
        alloc_heap |= (1 << (sizeof (alloc_heap) * 8 - 2));
    }

    hb_info->timestamp = timestamp;
    hb_info->tid = tid;
    hb_info->alloc_heap = alloc_heap;
    hb_info->ideal_proc_no = ideal_proc_no;
    (hb_info_proc->index)++;
}

const int hb_log_buffer_size = 1024;
static char hb_log_buffer[hb_log_buffer_size];
int last_hb_recorded_gc_index = -1;
#endif //HEAP_BALANCE_INSTRUMENTATION

// This logs what we recorded in balance_heaps
// The format for this is
//
// [ms since last GC end]
// [cpu index]
// all elements we stored before this GC for this CPU in the format
// timestamp,tid, alloc_heap_no
// repeat this for each CPU
//
// the timestamp here is just the result of calling QPC,
// it's not converted to ms. The conversion will be done when we process
// the log.
void gc_heap::hb_log_balance_activities()
{
#ifdef HEAP_BALANCE_INSTRUMENTATION
    char* log_buffer = hb_log_buffer;

    size_t now = GetHighPrecisionTimeStamp ();
    size_t time_since_last_gc_ms = now - last_gc_end_time_ms;
    dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMP%Id - %Id = %Id", now, last_gc_end_time_ms, time_since_last_gc_ms));

    // We want to get the min and the max timestamp for all procs because it helps with our post processing
    // to know how big an array to allocate to display the history inbetween the GCs.
    uint64_t min_timestamp = 0xffffffffffffffff;
    uint64_t max_timestamp = 0;

    for (int numa_node_index = 0; numa_node_index < total_numa_nodes_on_machine; numa_node_index++)
    {
        heap_balance_info_proc* hb_info_procs = hb_info_numa_nodes[numa_node_index].hb_info_procs;
        for (int proc_index = 0; proc_index < (int)procs_per_numa_node; proc_index++)
        {
            heap_balance_info_proc* hb_info_proc = &hb_info_procs[proc_index];
            int total_entries_on_proc = hb_info_proc->index;

            if (total_entries_on_proc > 0)
            {
                min_timestamp = min (min_timestamp, hb_info_proc->hb_info[0].timestamp);
                max_timestamp = max (max_timestamp, hb_info_proc->hb_info[total_entries_on_proc - 1].timestamp);
            }
        }
    }

    dprintf (HEAP_BALANCE_LOG, ("[GCA#%Id %Id-%I64d-%I64d]",
        settings.gc_index, time_since_last_gc_ms, (min_timestamp - start_raw_ts), (max_timestamp - start_raw_ts)));

    if (last_hb_recorded_gc_index == (int)settings.gc_index)
    {
        GCToOSInterface::DebugBreak ();
    }

    last_hb_recorded_gc_index = (int)settings.gc_index;

    // When we print out the proc index we need to convert it to the actual proc index (this is contiguous).
    // It helps with post processing.
    for (int numa_node_index = 0; numa_node_index < total_numa_nodes_on_machine; numa_node_index++)
    {
        heap_balance_info_proc* hb_info_procs = hb_info_numa_nodes[numa_node_index].hb_info_procs;
        for (int proc_index = 0; proc_index < (int)procs_per_numa_node; proc_index++)
        {
            heap_balance_info_proc* hb_info_proc = &hb_info_procs[proc_index];
            int total_entries_on_proc = hb_info_proc->index;
            if (total_entries_on_proc > 0)
            {
                int total_exec_time_ms = (int)((hb_info_proc->hb_info[total_entries_on_proc - 1].timestamp - hb_info_proc->hb_info[0].timestamp) / (qpf / 1000));
                dprintf (HEAP_BALANCE_LOG, ("[p%d]-%d-%dms", (proc_index + numa_node_index * procs_per_numa_node), total_entries_on_proc, total_exec_time_ms));
            }

            for (int i = 0; i < hb_info_proc->index; i++)
            {
                heap_balance_info* hb_info = &hb_info_proc->hb_info[i];
                bool multiple_procs_p = false;
                bool alloc_count_p = true;
                bool set_ideal_p = false;
                int tid = hb_info->tid;
                int alloc_heap = hb_info->alloc_heap;

                if (tid & (1 << (sizeof (tid) * 8 - 1)))
                {
                    multiple_procs_p = true;
                    tid &= ~(1 << (sizeof (tid) * 8 - 1));
                }

                if (alloc_heap & (1 << (sizeof (alloc_heap) * 8 - 1)))
                {
                    alloc_count_p = false;
                    alloc_heap &= ~(1 << (sizeof (alloc_heap) * 8 - 1));
                }

                if (alloc_heap & (1 << (sizeof (alloc_heap) * 8 - 2)))
                {
                    set_ideal_p = true;
                    alloc_heap &= ~(1 << (sizeof (alloc_heap) * 8 - 2));
                }

                // TODO - This assumes ideal proc is in the same cpu group which is not true
                // when we don't have CPU groups.
                int ideal_proc_no = hb_info->ideal_proc_no;
                int ideal_node_no = -1;
                ideal_proc_no = get_proc_index_numa (ideal_proc_no, &ideal_node_no);
                ideal_proc_no = ideal_proc_no + ideal_node_no * procs_per_numa_node;

                dprintf (HEAP_BALANCE_LOG, ("%I64d,%d,%d,%d%s%s%s",
                    (hb_info->timestamp - start_raw_ts),
                    tid,
                    ideal_proc_no,
                    (int)alloc_heap,
                    (multiple_procs_p ? "|m" : ""), (!alloc_count_p ? "|p" : ""), (set_ideal_p ? "|i" : "")));
            }
        }
    }

    for (int numa_node_index = 0; numa_node_index < total_numa_nodes_on_machine; numa_node_index++)
    {
        heap_balance_info_proc* hb_info_procs = hb_info_numa_nodes[numa_node_index].hb_info_procs;
        for (int proc_index = 0; proc_index < (int)procs_per_numa_node; proc_index++)
        {
            heap_balance_info_proc* hb_info_proc = &hb_info_procs[proc_index];
            hb_info_proc->index = 0;
        }
    }
#endif //HEAP_BALANCE_INSTRUMENTATION
}

// The format for this is
//
// [GC_alloc_mb]
// h0_new_alloc, h1_new_alloc, ...
//
void gc_heap::hb_log_new_allocation()
{
#ifdef HEAP_BALANCE_INSTRUMENTATION
    char* log_buffer = hb_log_buffer;

    int desired_alloc_mb = (int)(dd_desired_allocation (g_heaps[0]->dynamic_data_of (0)) / 1024 / 1024);

    int buffer_pos = sprintf_s (hb_log_buffer, hb_log_buffer_size, "[GC_alloc_mb]\n");
    for (int numa_node_index = 0; numa_node_index < heap_select::total_numa_nodes; numa_node_index++)
    {
        int node_allocated_mb = 0;

        // I'm printing out the budget here instead of the numa node index so we know how much
        // of the budget we consumed.
        buffer_pos += sprintf_s (hb_log_buffer + buffer_pos, hb_log_buffer_size - buffer_pos, "[N#%3d]",
            //numa_node_index);
            desired_alloc_mb);

        int heaps_on_node = heap_select::heaps_on_node[numa_node_index].heap_count;

        for (int heap_index = 0; heap_index < heaps_on_node; heap_index++)
        {
            int actual_heap_index = heap_index + numa_node_index * heaps_on_node;
            gc_heap* hp = g_heaps[actual_heap_index];
            dynamic_data* dd0 = hp->dynamic_data_of (0);
            int allocated_mb = (int)((dd_desired_allocation (dd0) - dd_new_allocation (dd0)) / 1024 / 1024);
            node_allocated_mb += allocated_mb;
            buffer_pos += sprintf_s (hb_log_buffer + buffer_pos, hb_log_buffer_size - buffer_pos, "%d,",
                allocated_mb);
        }

        dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPN#%d a %dmb(%dmb)", numa_node_index, node_allocated_mb, desired_alloc_mb));

        buffer_pos += sprintf_s (hb_log_buffer + buffer_pos, hb_log_buffer_size - buffer_pos, "\n");
    }

    dprintf (HEAP_BALANCE_LOG, ("%s", hb_log_buffer));
#endif //HEAP_BALANCE_INSTRUMENTATION
}

BOOL gc_heap::create_thread_support (int number_of_heaps)
{
    BOOL ret = FALSE;
    if (!gc_start_event.CreateOSManualEventNoThrow (FALSE))
    {
        goto cleanup;
    }
    if (!ee_suspend_event.CreateOSAutoEventNoThrow (FALSE))
    {
        goto cleanup;
    }
    if (!gc_t_join.init (number_of_heaps, join_flavor_server_gc))
    {
        goto cleanup;
    }

    ret = TRUE;

cleanup:

    if (!ret)
    {
        destroy_thread_support();
    }

    return ret;
}

void gc_heap::destroy_thread_support ()
{
    if (ee_suspend_event.IsValid())
    {
        ee_suspend_event.CloseEvent();
    }
    if (gc_start_event.IsValid())
    {
        gc_start_event.CloseEvent();
    }
}

bool get_proc_and_numa_for_heap (int heap_number)
{
    uint16_t proc_no;
    uint16_t node_no;

    bool res = GCToOSInterface::GetProcessorForHeap (heap_number, &proc_no, &node_no);
    if (res)
    {
        heap_select::set_proc_no_for_heap (heap_number, proc_no);
        if (node_no != NUMA_NODE_UNDEFINED)
        {
            heap_select::set_numa_node_for_heap_and_proc (heap_number, proc_no, node_no);
        }
    }

    return res;
}

void set_thread_affinity_for_heap (int heap_number, uint16_t proc_no)
{
    if (!GCToOSInterface::SetThreadAffinity (proc_no))
    {
        dprintf (1, ("Failed to set thread affinity for GC thread %d on proc #%d", heap_number, proc_no));
    }
}

bool gc_heap::create_gc_thread ()
{
    dprintf (3, ("Creating gc thread\n"));
    return GCToEEInterface::CreateThread(gc_thread_stub, this, false, ".NET Server GC");
}

#ifdef _MSC_VER
#pragma warning(disable:4715) //IA64 xcompiler recognizes that without the 'break;' the while(1) will never end and therefore not return a value for that code path
#endif //_MSC_VER
void gc_heap::gc_thread_function ()
{
    assert (gc_done_event.IsValid());
    assert (gc_start_event.IsValid());
    dprintf (3, ("gc thread started"));

    heap_select::init_cpu_mapping(heap_number);

    while (1)
    {
        assert (!gc_t_join.joined());

        if (heap_number == 0)
        {
            gc_heap::ee_suspend_event.Wait(INFINITE, FALSE);

            BEGIN_TIMING(suspend_ee_during_log);
            GCToEEInterface::SuspendEE(SUSPEND_FOR_GC);
            END_TIMING(suspend_ee_during_log);

            proceed_with_gc_p = TRUE;

            if (!should_proceed_with_gc())
            {
                update_collection_counts_for_no_gc();
                proceed_with_gc_p = FALSE;
            }
            else
            {
                settings.init_mechanisms();
                gc_start_event.Set();
            }
            dprintf (3, ("%d gc thread waiting...", heap_number));
        }
        else
        {
            gc_start_event.Wait(INFINITE, FALSE);
            dprintf (3, ("%d gc thread waiting... Done", heap_number));
        }

        assert ((heap_number == 0) || proceed_with_gc_p);

        if (proceed_with_gc_p)
        {
            garbage_collect (GCHeap::GcCondemnedGeneration);

            if (pm_trigger_full_gc)
            {
                garbage_collect_pm_full_gc();
            }
        }

        if (heap_number == 0)
        {
            if (proceed_with_gc_p && (!settings.concurrent))
            {
                do_post_gc();
            }

#ifdef BACKGROUND_GC
            recover_bgc_settings();
#endif //BACKGROUND_GC

#ifdef MULTIPLE_HEAPS
            for (int i = 0; i < gc_heap::n_heaps; i++)
            {
                gc_heap* hp = gc_heap::g_heaps[i];
                hp->add_saved_spinlock_info (false, me_release, mt_block_gc);
                leave_spin_lock(&hp->more_space_lock_soh);
            }
#endif //MULTIPLE_HEAPS

            gc_heap::gc_started = FALSE;

            BEGIN_TIMING(restart_ee_during_log);
            GCToEEInterface::RestartEE(TRUE);
            END_TIMING(restart_ee_during_log);
            process_sync_log_stats();

            dprintf (SPINLOCK_LOG, ("GC Lgc"));
            leave_spin_lock (&gc_heap::gc_lock);

            gc_heap::internal_gc_done = true;

            if (proceed_with_gc_p)
                set_gc_done();
            else
            {
                // If we didn't actually do a GC, it means we didn't wait up the other threads,
                // we still need to set the gc_done_event for those threads.
                for (int i = 0; i < gc_heap::n_heaps; i++)
                {
                    gc_heap* hp = gc_heap::g_heaps[i];
                    hp->set_gc_done();
                }
            }
        }
        else
        {
            int spin_count = 32 * (gc_heap::n_heaps - 1);

            // wait until RestartEE has progressed to a stage where we can restart user threads
            while (!gc_heap::internal_gc_done && !GCHeap::SafeToRestartManagedThreads())
            {
                spin_and_switch (spin_count, (gc_heap::internal_gc_done || GCHeap::SafeToRestartManagedThreads()));
            }
            set_gc_done();
        }
    }
}
#ifdef _MSC_VER
#pragma warning(default:4715) //IA64 xcompiler recognizes that without the 'break;' the while(1) will never end and therefore not return a value for that code path
#endif //_MSC_VER

#endif //MULTIPLE_HEAPS

bool gc_heap::virtual_alloc_commit_for_heap (void* addr, size_t size, int h_number)
{
#if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK)
    // Currently there is no way for us to specific the numa node to allocate on via hosting interfaces to
    // a host. This will need to be added later.
#if !defined(FEATURE_CORECLR) && !defined(BUILD_AS_STANDALONE)
    if (!CLRMemoryHosted())
#endif
    {
        if (GCToOSInterface::CanEnableGCNumaAware())
        {
            uint16_t numa_node = heap_select::find_numa_node_from_heap_no(h_number);
            if (GCToOSInterface::VirtualCommit (addr, size, numa_node))
                return true;
        }
    }
#else
    UNREFERENCED_PARAMETER(h_number);
#endif

    //numa aware not enabled, or call failed --> fallback to VirtualCommit()
    return GCToOSInterface::VirtualCommit(addr, size);
}

bool gc_heap::virtual_commit (void* address, size_t size, int h_number, bool* hard_limit_exceeded_p)
{
#ifndef BIT64
    assert (heap_hard_limit == 0);
#endif //!BIT64

    if (heap_hard_limit)
    {
        bool exceeded_p = false;

        check_commit_cs.Enter();

        if ((current_total_committed + size) > heap_hard_limit)
        {
            dprintf (1, ("%Id + %Id = %Id > limit",
                current_total_committed, size,
                (current_total_committed + size),
                heap_hard_limit));

            exceeded_p = true;
        }
        else
        {
            current_total_committed += size;
            if (h_number < 0)
                current_total_committed_bookkeeping += size;
        }

        check_commit_cs.Leave();

        if (hard_limit_exceeded_p)
            *hard_limit_exceeded_p = exceeded_p;

        if (exceeded_p)
        {
            dprintf (1, ("can't commit %Ix for %Id bytes > HARD LIMIT %Id", (size_t)address, size, heap_hard_limit));
            return false;
        }
    }

    // If it's a valid heap number it means it's commiting for memory on the GC heap.
    // In addition if large pages is enabled, we set commit_succeeded_p to true because memory is already committed.
    bool commit_succeeded_p = ((h_number >= 0) ? (use_large_pages_p ? true :
                              virtual_alloc_commit_for_heap (address, size, h_number)) :
                              GCToOSInterface::VirtualCommit(address, size));

    if (!commit_succeeded_p && heap_hard_limit)
    {
        check_commit_cs.Enter();
        dprintf (1, ("commit failed, updating %Id to %Id",
                current_total_committed, (current_total_committed - size)));
        current_total_committed -= size;
        if (h_number < 0)
            current_total_committed_bookkeeping -= size;

        check_commit_cs.Leave();
    }

    return commit_succeeded_p;
}

bool gc_heap::virtual_decommit (void* address, size_t size, int h_number)
{
#ifndef BIT64
    assert (heap_hard_limit == 0);
#endif //!BIT64

    bool decommit_succeeded_p = GCToOSInterface::VirtualDecommit (address, size);

    if (decommit_succeeded_p && heap_hard_limit)
    {
        check_commit_cs.Enter();
        current_total_committed -= size;
        if (h_number < 0)
            current_total_committed_bookkeeping -= size;
        check_commit_cs.Leave();
    }

    return decommit_succeeded_p;
}

#ifndef SEG_MAPPING_TABLE
inline
heap_segment* gc_heap::segment_of (uint8_t* add, ptrdiff_t& delta, BOOL verify_p)
{
    uint8_t* sadd = add;
    heap_segment* hs = 0;
    heap_segment* hs1 = 0;
    if (!((add >= g_gc_lowest_address) && (add < g_gc_highest_address)))
    {
        delta = 0;
        return 0;
    }
    //repeat in case there is a concurrent insertion in the table.
    do
    {
        hs = hs1;
        sadd = add;
        seg_table->lookup (sadd);
        hs1 = (heap_segment*)sadd;
    } while (hs1 && !in_range_for_segment (add, hs1) && (hs != hs1));

    hs = hs1;

    if ((hs == 0) ||
        (verify_p && (add > heap_segment_reserved ((heap_segment*)(sadd + delta)))))
        delta = 0;
    return hs;
}
#endif //SEG_MAPPING_TABLE

class mark
{
public:
    uint8_t* first;
    size_t len;

    // If we want to save space we can have a pool of plug_and_gap's instead of
    // always having 2 allocated for each pinned plug.
    gap_reloc_pair saved_pre_plug;
    // If we decide to not compact, we need to restore the original values.
    gap_reloc_pair saved_pre_plug_reloc;

    gap_reloc_pair saved_post_plug;

    // Supposedly Pinned objects cannot have references but we are seeing some from pinvoke
    // frames. Also if it's an artificially pinned plug created by us, it can certainly
    // have references.
    // We know these cases will be rare so we can optimize this to be only allocated on demand.
    gap_reloc_pair saved_post_plug_reloc;

    // We need to calculate this after we are done with plan phase and before compact
    // phase because compact phase will change the bricks so relocate_address will no
    // longer work.
    uint8_t* saved_pre_plug_info_reloc_start;

    // We need to save this because we will have no way to calculate it, unlike the
    // pre plug info start which is right before this plug.
    uint8_t* saved_post_plug_info_start;

#ifdef SHORT_PLUGS
    uint8_t* allocation_context_start_region;
#endif //SHORT_PLUGS

    // How the bits in these bytes are organized:
    // MSB --> LSB
    // bit to indicate whether it's a short obj | 3 bits for refs in this short obj | 2 unused bits | bit to indicate if it's collectible | last bit
    // last bit indicates if there's pre or post info associated with this plug. If it's not set all other bits will be 0.
    BOOL saved_pre_p;
    BOOL saved_post_p;

#ifdef _DEBUG
    // We are seeing this is getting corrupted for a PP with a NP after.
    // Save it when we first set it and make sure it doesn't change.
    gap_reloc_pair saved_post_plug_debug;
#endif //_DEBUG

    size_t get_max_short_bits()
    {
        return (sizeof (gap_reloc_pair) / sizeof (uint8_t*));
    }

    // pre bits
    size_t get_pre_short_start_bit ()
    {
        return (sizeof (saved_pre_p) * 8 - 1 - (sizeof (gap_reloc_pair) / sizeof (uint8_t*)));
    }

    BOOL pre_short_p()
    {
        return (saved_pre_p & (1 << (sizeof (saved_pre_p) * 8 - 1)));
    }

    void set_pre_short()
    {
        saved_pre_p |= (1 << (sizeof (saved_pre_p) * 8 - 1));
    }

    void set_pre_short_bit (size_t bit)
    {
        saved_pre_p |= 1 << (get_pre_short_start_bit() + bit);
    }

    BOOL pre_short_bit_p (size_t bit)
    {
        return (saved_pre_p & (1 << (get_pre_short_start_bit() + bit)));
    }

#ifdef COLLECTIBLE_CLASS
    void set_pre_short_collectible()
    {
        saved_pre_p |= 2;
    }

    BOOL pre_short_collectible_p()
    {
        return (saved_pre_p & 2);
    }
#endif //COLLECTIBLE_CLASS

    // post bits
    size_t get_post_short_start_bit ()
    {
        return (sizeof (saved_post_p) * 8 - 1 - (sizeof (gap_reloc_pair) / sizeof (uint8_t*)));
    }

    BOOL post_short_p()
    {
        return (saved_post_p & (1 << (sizeof (saved_post_p) * 8 - 1)));
    }

    void set_post_short()
    {
        saved_post_p |= (1 << (sizeof (saved_post_p) * 8 - 1));
    }

    void set_post_short_bit (size_t bit)
    {
        saved_post_p |= 1 << (get_post_short_start_bit() + bit);
    }

    BOOL post_short_bit_p (size_t bit)
    {
        return (saved_post_p & (1 << (get_post_short_start_bit() + bit)));
    }

#ifdef COLLECTIBLE_CLASS
    void set_post_short_collectible()
    {
        saved_post_p |= 2;
    }

    BOOL post_short_collectible_p()
    {
        return (saved_post_p & 2);
    }
#endif //COLLECTIBLE_CLASS

    uint8_t* get_plug_address() { return first; }

    BOOL has_pre_plug_info() { return saved_pre_p; }
    BOOL has_post_plug_info() { return saved_post_p; }

    gap_reloc_pair* get_pre_plug_reloc_info() { return &saved_pre_plug_reloc; }
    gap_reloc_pair* get_post_plug_reloc_info() { return &saved_post_plug_reloc; }
    void set_pre_plug_info_reloc_start (uint8_t* reloc) { saved_pre_plug_info_reloc_start = reloc; }
    uint8_t* get_post_plug_info_start() { return saved_post_plug_info_start; }

    // We need to temporarily recover the shortened plugs for compact phase so we can
    // copy over the whole plug and their related info (mark bits/cards). But we will
    // need to set the artificial gap back so compact phase can keep reading the plug info.
    // We also need to recover the saved info because we'll need to recover it later.
    //
    // So we would call swap_p*_plug_and_saved once to recover the object info; then call
    // it again to recover the artificial gap.
    void swap_pre_plug_and_saved()
    {
        gap_reloc_pair temp;
        memcpy (&temp, (first - sizeof (plug_and_gap)), sizeof (temp));
        memcpy ((first - sizeof (plug_and_gap)), &saved_pre_plug_reloc, sizeof (saved_pre_plug_reloc));
        saved_pre_plug_reloc = temp;
    }

    void swap_post_plug_and_saved()
    {
        gap_reloc_pair temp;
        memcpy (&temp, saved_post_plug_info_start, sizeof (temp));
        memcpy (saved_post_plug_info_start, &saved_post_plug_reloc, sizeof (saved_post_plug_reloc));
        saved_post_plug_reloc = temp;
    }

    void swap_pre_plug_and_saved_for_profiler()
    {
        gap_reloc_pair temp;
        memcpy (&temp, (first - sizeof (plug_and_gap)), sizeof (temp));
        memcpy ((first - sizeof (plug_and_gap)), &saved_pre_plug, sizeof (saved_pre_plug));
        saved_pre_plug = temp;
    }

    void swap_post_plug_and_saved_for_profiler()
    {
        gap_reloc_pair temp;
        memcpy (&temp, saved_post_plug_info_start, sizeof (temp));
        memcpy (saved_post_plug_info_start, &saved_post_plug, sizeof (saved_post_plug));
        saved_post_plug = temp;
    }

    // We should think about whether it's really necessary to have to copy back the pre plug
    // info since it was already copied during compacting plugs. But if a plug doesn't move
    // by >= 3 ptr size (the size of gap_reloc_pair), it means we'd have to recover pre plug info.
    void recover_plug_info()
    {
        if (saved_pre_p)
        {
            if (gc_heap::settings.compaction)
            {
                dprintf (3, ("%Ix: REC Pre: %Ix-%Ix",
                    first,
                    &saved_pre_plug_reloc,
                    saved_pre_plug_info_reloc_start));
                memcpy (saved_pre_plug_info_reloc_start, &saved_pre_plug_reloc, sizeof (saved_pre_plug_reloc));
            }
            else
            {
                dprintf (3, ("%Ix: REC Pre: %Ix-%Ix",
                    first,
                    &saved_pre_plug,
                    (first - sizeof (plug_and_gap))));
                memcpy ((first - sizeof (plug_and_gap)), &saved_pre_plug, sizeof (saved_pre_plug));
            }
        }

        if (saved_post_p)
        {
            if (gc_heap::settings.compaction)
            {
                dprintf (3, ("%Ix: REC Post: %Ix-%Ix",
                    first,
                    &saved_post_plug_reloc,
                    saved_post_plug_info_start));
                memcpy (saved_post_plug_info_start, &saved_post_plug_reloc, sizeof (saved_post_plug_reloc));
            }
            else
            {
                dprintf (3, ("%Ix: REC Post: %Ix-%Ix",
                    first,
                    &saved_post_plug,
                    saved_post_plug_info_start));
                memcpy (saved_post_plug_info_start, &saved_post_plug, sizeof (saved_post_plug));
            }
        }
    }
};


void gc_mechanisms::init_mechanisms()
{
    condemned_generation = 0;
    promotion = FALSE;//TRUE;
    compaction = TRUE;
#ifdef FEATURE_LOH_COMPACTION
    loh_compaction = gc_heap::loh_compaction_requested();
#else
    loh_compaction = FALSE;
#endif //FEATURE_LOH_COMPACTION
    heap_expansion = FALSE;
    concurrent = FALSE;
    demotion = FALSE;
    elevation_reduced = FALSE;
    found_finalizers = FALSE;
#ifdef BACKGROUND_GC
    background_p = recursive_gc_sync::background_running_p() != FALSE;
    allocations_allowed = TRUE;
#endif //BACKGROUND_GC

    entry_memory_load = 0;
    entry_available_physical_mem = 0;
    exit_memory_load = 0;

#ifdef STRESS_HEAP
    stress_induced = FALSE;
#endif // STRESS_HEAP
}

void gc_mechanisms::first_init()
{
    gc_index = 0;
    gen0_reduction_count = 0;
    should_lock_elevation = FALSE;
    elevation_locked_count = 0;
    reason = reason_empty;
#ifdef BACKGROUND_GC
    pause_mode = gc_heap::gc_can_use_concurrent ? pause_interactive : pause_batch;
#ifdef _DEBUG
    int debug_pause_mode = static_cast<int>(GCConfig::GetLatencyMode());
    if (debug_pause_mode >= 0)
    {
        assert (debug_pause_mode <= pause_sustained_low_latency);
        pause_mode = (gc_pause_mode)debug_pause_mode;
    }
#endif //_DEBUG
#else //BACKGROUND_GC
    pause_mode = pause_batch;
#endif //BACKGROUND_GC

    init_mechanisms();
}

void gc_mechanisms::record (gc_history_global* history)
{
#ifdef MULTIPLE_HEAPS
    history->num_heaps = gc_heap::n_heaps;
#else
    history->num_heaps = 1;
#endif //MULTIPLE_HEAPS

    history->condemned_generation = condemned_generation;
    history->gen0_reduction_count = gen0_reduction_count;
    history->reason = reason;
    history->pause_mode = (int)pause_mode;
    history->mem_pressure = entry_memory_load;
    history->global_mechanisms_p = 0;

    // start setting the boolean values.
    if (concurrent)
        history->set_mechanism_p (global_concurrent);

    if (compaction)
        history->set_mechanism_p (global_compaction);

    if (promotion)
        history->set_mechanism_p (global_promotion);

    if (demotion)
        history->set_mechanism_p (global_demotion);

    if (card_bundles)
        history->set_mechanism_p (global_card_bundles);

    if (elevation_reduced)
        history->set_mechanism_p (global_elevation);
}

/**********************************
   called at the beginning of GC to fix the allocated size to
   what is really allocated, or to turn the free area into an unused object
   It needs to be called after all of the other allocation contexts have been
   fixed since it relies on alloc_allocated.
 ********************************/

//for_gc_p indicates that the work is being done for GC,
//as opposed to concurrent heap verification
void gc_heap::fix_youngest_allocation_area (BOOL for_gc_p)
{
    UNREFERENCED_PARAMETER(for_gc_p);

    // The gen 0 alloc context is never used for allocation in the allocator path. It's
    // still used in the allocation path during GCs.
    assert (generation_allocation_pointer (youngest_generation) == nullptr);
    assert (generation_allocation_limit (youngest_generation) == nullptr);
    heap_segment_allocated (ephemeral_heap_segment) = alloc_allocated;
}

void gc_heap::fix_large_allocation_area (BOOL for_gc_p)
{
    UNREFERENCED_PARAMETER(for_gc_p);

#ifdef _DEBUG
    alloc_context* acontext =
#endif // _DEBUG
        generation_alloc_context (large_object_generation);
    assert (acontext->alloc_ptr == 0);
    assert (acontext->alloc_limit == 0);
#if 0
    dprintf (3, ("Large object alloc context: ptr: %Ix, limit %Ix",
                 (size_t)acontext->alloc_ptr, (size_t)acontext->alloc_limit));
    fix_allocation_context (acontext, FALSE, get_alignment_constant (FALSE));
    if (for_gc_p)
    {
        acontext->alloc_ptr = 0;
        acontext->alloc_limit = acontext->alloc_ptr;
    }
#endif //0
}

//for_gc_p indicates that the work is being done for GC,
//as opposed to concurrent heap verification
void gc_heap::fix_allocation_context (alloc_context* acontext, BOOL for_gc_p,
                                      int align_const)
{
    dprintf (3, ("Fixing allocation context %Ix: ptr: %Ix, limit: %Ix",
                 (size_t)acontext,
                 (size_t)acontext->alloc_ptr, (size_t)acontext->alloc_limit));

    if (((size_t)(alloc_allocated - acontext->alloc_limit) > Align (min_obj_size, align_const)) ||
        !for_gc_p)
    {
        uint8_t*  point = acontext->alloc_ptr;
        if (point != 0)
        {
            size_t  size = (acontext->alloc_limit - acontext->alloc_ptr);
            // the allocation area was from the free list
            // it was shortened by Align (min_obj_size) to make room for
            // at least the shortest unused object
            size += Align (min_obj_size, align_const);
            assert ((size >= Align (min_obj_size)));

            dprintf(3,("Making unused area [%Ix, %Ix[", (size_t)point,
                       (size_t)point + size ));
            make_unused_array (point, size);

            if (for_gc_p)
            {
                generation_free_obj_space (generation_of (0)) += size;
                alloc_contexts_used ++;
            }
        }
    }
    else if (for_gc_p)
    {
        alloc_allocated = acontext->alloc_ptr;
        assert (heap_segment_allocated (ephemeral_heap_segment) <=
                heap_segment_committed (ephemeral_heap_segment));
        alloc_contexts_used ++;
    }

    if (for_gc_p)
    {
        // We need to update the alloc_bytes to reflect the portion that we have not used
        acontext->alloc_bytes -= (acontext->alloc_limit - acontext->alloc_ptr);
        total_alloc_bytes_soh -= (acontext->alloc_limit - acontext->alloc_ptr);

        acontext->alloc_ptr = 0;
        acontext->alloc_limit = acontext->alloc_ptr;
    }
}

//used by the heap verification for concurrent gc.
//it nulls out the words set by fix_allocation_context for heap_verification
void repair_allocation (gc_alloc_context* acontext, void*)
{
    uint8_t*  point = acontext->alloc_ptr;

    if (point != 0)
    {
        dprintf (3, ("Clearing [%Ix, %Ix[", (size_t)acontext->alloc_ptr,
                     (size_t)acontext->alloc_limit+Align(min_obj_size)));
        memclr (acontext->alloc_ptr - plug_skew,
                (acontext->alloc_limit - acontext->alloc_ptr)+Align (min_obj_size));
    }
}

void void_allocation (gc_alloc_context* acontext, void*)
{
    uint8_t*  point = acontext->alloc_ptr;

    if (point != 0)
    {
        dprintf (3, ("Void [%Ix, %Ix[", (size_t)acontext->alloc_ptr,
                     (size_t)acontext->alloc_limit+Align(min_obj_size)));
        acontext->alloc_ptr = 0;
        acontext->alloc_limit = acontext->alloc_ptr;
    }
}

void gc_heap::repair_allocation_contexts (BOOL repair_p)
{
    GCToEEInterface::GcEnumAllocContexts (repair_p ? repair_allocation : void_allocation, NULL);
}

struct fix_alloc_context_args
{
    BOOL for_gc_p;
    void* heap;
};

void fix_alloc_context (gc_alloc_context* acontext, void* param)
{
    fix_alloc_context_args* args = (fix_alloc_context_args*)param;
    g_theGCHeap->FixAllocContext(acontext, (void*)(size_t)(args->for_gc_p), args->heap);
}

void gc_heap::fix_allocation_contexts (BOOL for_gc_p)
{
    fix_alloc_context_args args;
    args.for_gc_p = for_gc_p;
    args.heap = __this;

    GCToEEInterface::GcEnumAllocContexts(fix_alloc_context, &args);
    fix_youngest_allocation_area(for_gc_p);
    fix_large_allocation_area(for_gc_p);
}

void gc_heap::fix_older_allocation_area (generation* older_gen)
{
    heap_segment* older_gen_seg = generation_allocation_segment (older_gen);
    if (generation_allocation_limit (older_gen) !=
        heap_segment_plan_allocated (older_gen_seg))
    {
        uint8_t*  point = generation_allocation_pointer (older_gen);

        size_t  size = (generation_allocation_limit (older_gen) -
                               generation_allocation_pointer (older_gen));
        if (size != 0)
        {
            assert ((size >= Align (min_obj_size)));
            dprintf(3,("Making unused area [%Ix, %Ix[", (size_t)point, (size_t)point+size));
            make_unused_array (point, size);
            if (size >= min_free_list)
            {
                generation_allocator (older_gen)->thread_item_front (point, size);
                add_gen_free (older_gen->gen_num, size);
                generation_free_list_space (older_gen) += size;
            }
            else
            {
                generation_free_obj_space (older_gen) += size;
            }
        }
    }
    else
    {
        assert (older_gen_seg != ephemeral_heap_segment);
        heap_segment_plan_allocated (older_gen_seg) =
            generation_allocation_pointer (older_gen);
        generation_allocation_limit (older_gen) =
            generation_allocation_pointer (older_gen);
    }

    generation_allocation_pointer (older_gen) = 0;
    generation_allocation_limit (older_gen) = 0;
}

void gc_heap::set_allocation_heap_segment (generation* gen)
{
    uint8_t* p = generation_allocation_start (gen);
    assert (p);
    heap_segment* seg = generation_allocation_segment (gen);
    if (in_range_for_segment (p, seg))
        return;

    // try ephemeral heap segment in case of heap expansion
    seg = ephemeral_heap_segment;
    if (!in_range_for_segment (p, seg))
    {
        seg = heap_segment_rw (generation_start_segment (gen));

        PREFIX_ASSUME(seg != NULL);

        while (!in_range_for_segment (p, seg))
        {
            seg = heap_segment_next_rw (seg);
            PREFIX_ASSUME(seg != NULL);
        }
    }

    generation_allocation_segment (gen) = seg;
}

void gc_heap::reset_allocation_pointers (generation* gen, uint8_t* start)
{
    assert (start);
    assert (Align ((size_t)start) == (size_t)start);
    generation_allocation_start (gen) = start;
    generation_allocation_pointer (gen) =  0;//start + Align (min_obj_size);
    generation_allocation_limit (gen) = 0;//generation_allocation_pointer (gen);
    set_allocation_heap_segment (gen);
}

#ifdef BACKGROUND_GC
//TODO BACKGROUND_GC this is for test only
void
gc_heap::disallow_new_allocation (int gen_number)
{
    UNREFERENCED_PARAMETER(gen_number);
    settings.allocations_allowed = FALSE;
}
void
gc_heap::allow_new_allocation (int gen_number)
{
    UNREFERENCED_PARAMETER(gen_number);
    settings.allocations_allowed = TRUE;
}

#endif //BACKGROUND_GC

bool gc_heap::new_allocation_allowed (int gen_number)
{
#ifdef BACKGROUND_GC
    //TODO BACKGROUND_GC this is for test only
    if (!settings.allocations_allowed)
    {
        dprintf (2, ("new allocation not allowed"));
        return FALSE;
    }
#endif //BACKGROUND_GC

    if (dd_new_allocation (dynamic_data_of (gen_number)) < 0)
    {
        if (gen_number != 0)
        {
            // For LOH we will give it more budget before we try a GC.
            if (settings.concurrent)
            {
                dynamic_data* dd2 = dynamic_data_of (max_generation + 1 );

                if (dd_new_allocation (dd2) <= (ptrdiff_t)(-2 * dd_desired_allocation (dd2)))
                {
                    return TRUE;
                }
            }
        }
        return FALSE;
    }
#ifndef MULTIPLE_HEAPS
    else if ((settings.pause_mode != pause_no_gc) && (gen_number == 0))
    {
        dprintf (3, ("evaluating allocation rate"));
        dynamic_data* dd0 = dynamic_data_of (0);
        if ((allocation_running_amount - dd_new_allocation (dd0)) >
            dd_min_size (dd0))
        {
            uint32_t ctime = GCToOSInterface::GetLowPrecisionTimeStamp();
            if ((ctime - allocation_running_time) > 1000)
            {
                dprintf (2, (">1s since last gen0 gc"));
                return FALSE;
            }
            else
            {
                allocation_running_amount = dd_new_allocation (dd0);
            }
        }
    }
#endif //MULTIPLE_HEAPS
    return TRUE;
}

inline
ptrdiff_t gc_heap::get_desired_allocation (int gen_number)
{
    return dd_desired_allocation (dynamic_data_of (gen_number));
}

inline
ptrdiff_t  gc_heap::get_new_allocation (int gen_number)
{
    return dd_new_allocation (dynamic_data_of (gen_number));
}

//return the amount allocated so far in gen_number
inline
ptrdiff_t  gc_heap::get_allocation (int gen_number)
{
    dynamic_data* dd = dynamic_data_of (gen_number);

    return dd_desired_allocation (dd) - dd_new_allocation (dd);
}

inline
BOOL grow_mark_stack (mark*& m, size_t& len, size_t init_len)
{
    size_t new_size = max (init_len, 2*len);
    mark* tmp = new (nothrow) mark [new_size];
    if (tmp)
    {
        memcpy (tmp, m, len * sizeof (mark));
        delete m;
        m = tmp;
        len = new_size;
        return TRUE;
    }
    else
    {
        dprintf (1, ("Failed to allocate %Id bytes for mark stack", (len * sizeof (mark))));
        return FALSE;
    }
}

inline
uint8_t* pinned_plug (mark* m)
{
   return m->first;
}

inline
size_t& pinned_len (mark* m)
{
    return m->len;
}

inline
void set_new_pin_info (mark* m, uint8_t* pin_free_space_start)
{
    m->len = pinned_plug (m) - pin_free_space_start;
#ifdef SHORT_PLUGS
    m->allocation_context_start_region = pin_free_space_start;
#endif //SHORT_PLUGS
}

#ifdef SHORT_PLUGS
inline
uint8_t*& pin_allocation_context_start_region (mark* m)
{
    return m->allocation_context_start_region;
}

uint8_t* get_plug_start_in_saved (uint8_t* old_loc, mark* pinned_plug_entry)
{
    uint8_t* saved_pre_plug_info = (uint8_t*)(pinned_plug_entry->get_pre_plug_reloc_info());
    uint8_t* plug_start_in_saved = saved_pre_plug_info + (old_loc - (pinned_plug (pinned_plug_entry) - sizeof (plug_and_gap)));
    //dprintf (1, ("detected a very short plug: %Ix before PP %Ix, pad %Ix",
    //    old_loc, pinned_plug (pinned_plug_entry), plug_start_in_saved));
    dprintf (1, ("EP: %Ix(%Ix), %Ix", old_loc, pinned_plug (pinned_plug_entry), plug_start_in_saved));
    return plug_start_in_saved;
}

inline
void set_padding_in_expand (uint8_t* old_loc,
                            BOOL set_padding_on_saved_p,
                            mark* pinned_plug_entry)
{
    if (set_padding_on_saved_p)
    {
        set_plug_padded (get_plug_start_in_saved (old_loc, pinned_plug_entry));
    }
    else
    {
        set_plug_padded (old_loc);
    }
}

inline
void clear_padding_in_expand (uint8_t* old_loc,
                              BOOL set_padding_on_saved_p,
                              mark* pinned_plug_entry)
{
    if (set_padding_on_saved_p)
    {
        clear_plug_padded (get_plug_start_in_saved (old_loc, pinned_plug_entry));
    }
    else
    {
        clear_plug_padded (old_loc);
    }
}
#endif //SHORT_PLUGS

void gc_heap::reset_pinned_queue()
{
    mark_stack_tos = 0;
    mark_stack_bos = 0;
}

void gc_heap::reset_pinned_queue_bos()
{
    mark_stack_bos = 0;
}

// last_pinned_plug is only for asserting purpose.
void gc_heap::merge_with_last_pinned_plug (uint8_t* last_pinned_plug, size_t plug_size)
{
    if (last_pinned_plug)
    {
        mark& last_m = mark_stack_array[mark_stack_tos - 1];
        assert (last_pinned_plug == last_m.first);
        if (last_m.saved_post_p)
        {
            last_m.saved_post_p = FALSE;
            dprintf (3, ("setting last plug %Ix post to false", last_m.first));
            // We need to recover what the gap has overwritten.
            memcpy ((last_m.first + last_m.len - sizeof (plug_and_gap)), &(last_m.saved_post_plug), sizeof (gap_reloc_pair));
        }
        last_m.len += plug_size;
        dprintf (3, ("recovered the last part of plug %Ix, setting its plug size to %Ix", last_m.first, last_m.len));
    }
}

void gc_heap::set_allocator_next_pin (uint8_t* alloc_pointer, uint8_t*& alloc_limit)
{
    dprintf (3, ("sanp: ptr: %Ix, limit: %Ix", alloc_pointer, alloc_limit));
    dprintf (3, ("oldest %Id: %Ix", mark_stack_bos, pinned_plug (oldest_pin())));
    if (!(pinned_plug_que_empty_p()))
    {
        mark*  oldest_entry = oldest_pin();
        uint8_t* plug = pinned_plug (oldest_entry);
        if ((plug >= alloc_pointer) && (plug < alloc_limit))
        {
            alloc_limit = pinned_plug (oldest_entry);
            dprintf (3, ("now setting alloc context: %Ix->%Ix(%Id)",
                alloc_pointer, alloc_limit, (alloc_limit - alloc_pointer)));
        }
    }
}

void gc_heap::set_allocator_next_pin (generation* gen)
{
    dprintf (3, ("SANP: gen%d, ptr; %Ix, limit: %Ix", gen->gen_num, generation_allocation_pointer (gen), generation_allocation_limit (gen)));
    if (!(pinned_plug_que_empty_p()))
    {
        mark*  oldest_entry = oldest_pin();
        uint8_t* plug = pinned_plug (oldest_entry);
        if ((plug >= generation_allocation_pointer (gen)) &&
            (plug <  generation_allocation_limit (gen)))
        {
            generation_allocation_limit (gen) = pinned_plug (oldest_entry);
            dprintf (3, ("SANP: get next pin free space in gen%d for alloc: %Ix->%Ix(%Id)",
                gen->gen_num,
                generation_allocation_pointer (gen), generation_allocation_limit (gen),
                (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
        }
        else
            assert (!((plug < generation_allocation_pointer (gen)) &&
                      (plug >= heap_segment_mem (generation_allocation_segment (gen)))));
    }
}

// After we set the info, we increase tos.
void gc_heap::set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, uint8_t* alloc_pointer, uint8_t*& alloc_limit)
{
    UNREFERENCED_PARAMETER(last_pinned_plug);

    mark& m = mark_stack_array[mark_stack_tos];
    assert (m.first == last_pinned_plug);

    m.len = plug_len;
    mark_stack_tos++;
    set_allocator_next_pin (alloc_pointer, alloc_limit);
}

// After we set the info, we increase tos.
void gc_heap::set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, generation* gen)
{
    UNREFERENCED_PARAMETER(last_pinned_plug);

    mark& m = mark_stack_array[mark_stack_tos];
    assert (m.first == last_pinned_plug);

    m.len = plug_len;
    mark_stack_tos++;
    assert (gen != 0);
    // Why are we checking here? gen is never 0.
    if (gen != 0)
    {
        set_allocator_next_pin (gen);
    }
}

size_t gc_heap::deque_pinned_plug ()
{
    dprintf (3, ("deque: %Id", mark_stack_bos));
    size_t m = mark_stack_bos;
    mark_stack_bos++;
    return m;
}

inline
mark* gc_heap::pinned_plug_of (size_t bos)
{
    return &mark_stack_array [ bos ];
}

inline
mark* gc_heap::oldest_pin ()
{
    return pinned_plug_of (mark_stack_bos);
}

inline
BOOL gc_heap::pinned_plug_que_empty_p ()
{
    return (mark_stack_bos == mark_stack_tos);
}

inline
mark* gc_heap::before_oldest_pin()
{
    if (mark_stack_bos >= 1)
        return pinned_plug_of (mark_stack_bos-1);
    else
        return 0;
}

inline
BOOL gc_heap::ephemeral_pointer_p (uint8_t* o)
{
    return ((o >= ephemeral_low) && (o < ephemeral_high));
}

#ifdef MH_SC_MARK
inline
int& gc_heap::mark_stack_busy()
{
    return  g_mark_stack_busy [(heap_number+2)*HS_CACHE_LINE_SIZE/sizeof(int)];
}
#endif //MH_SC_MARK

void gc_heap::make_mark_stack (mark* arr)
{
    reset_pinned_queue();
    mark_stack_array = arr;
    mark_stack_array_length = MARK_STACK_INITIAL_LENGTH;
#ifdef MH_SC_MARK
    mark_stack_busy() = 0;
#endif //MH_SC_MARK
}

#ifdef BACKGROUND_GC
inline
size_t& gc_heap::bpromoted_bytes(int thread)
{
#ifdef MULTIPLE_HEAPS
    return g_bpromoted [thread*16];
#else //MULTIPLE_HEAPS
    UNREFERENCED_PARAMETER(thread);
    return g_bpromoted;
#endif //MULTIPLE_HEAPS
}

void gc_heap::make_background_mark_stack (uint8_t** arr)
{
    background_mark_stack_array = arr;
    background_mark_stack_array_length = MARK_STACK_INITIAL_LENGTH;
    background_mark_stack_tos = arr;
}

void gc_heap::make_c_mark_list (uint8_t** arr)
{
    c_mark_list = arr;
    c_mark_list_index = 0;
    c_mark_list_length = 1 + (OS_PAGE_SIZE / MIN_OBJECT_SIZE);
}
#endif //BACKGROUND_GC


#ifdef CARD_BUNDLE

// The card bundle keeps track of groups of card words.
static const size_t card_bundle_word_width = 32;

// How do we express the fact that 32 bits (card_word_width) is one uint32_t?
static const size_t card_bundle_size = (size_t)(GC_PAGE_SIZE / (sizeof(uint32_t)*card_bundle_word_width));

inline
size_t card_bundle_word (size_t cardb)
{
    return cardb / card_bundle_word_width;
}

inline
uint32_t card_bundle_bit (size_t cardb)
{
    return (uint32_t)(cardb % card_bundle_word_width);
}

size_t align_cardw_on_bundle (size_t cardw)
{
    return ((size_t)(cardw + card_bundle_size - 1) & ~(card_bundle_size - 1 ));
}

// Get the card bundle representing a card word
size_t cardw_card_bundle (size_t cardw)
{
    return cardw / card_bundle_size;
}

// Get the first card word in a card bundle
size_t card_bundle_cardw (size_t cardb)
{
    return cardb * card_bundle_size;
}

// Clear the specified card bundle
void gc_heap::card_bundle_clear (size_t cardb)
{
#ifdef FEATURE_CARD_MARKING_STEALING
    // with card marking stealing, we may have multiple threads trying to clear bits in the same card bundle word
    Interlocked::And((volatile uint32_t*)& card_bundle_table[card_bundle_word(cardb)], (uint32_t)~(1 << card_bundle_bit(cardb)));
#else
    card_bundle_table[card_bundle_word(cardb)] &= ~(1 << card_bundle_bit(cardb));
#endif
    dprintf (2, ("Cleared card bundle %Ix [%Ix, %Ix[", cardb, (size_t)card_bundle_cardw (cardb),
              (size_t)card_bundle_cardw (cardb+1)));
}

void gc_heap::card_bundle_set (size_t cardb)
{
    if (!card_bundle_set_p (cardb))
    {
        card_bundle_table [card_bundle_word (cardb)] |= (1 << card_bundle_bit (cardb));
    }
}

// Set the card bundle bits between start_cardb and end_cardb
void gc_heap::card_bundles_set (size_t start_cardb, size_t end_cardb)
{
    if (start_cardb == end_cardb)
    {
        card_bundle_set(start_cardb);
        return;
    }

    size_t start_word = card_bundle_word (start_cardb);
    size_t end_word = card_bundle_word (end_cardb);

    if (start_word < end_word)
    {
        // Set the partial words
        card_bundle_table [start_word] |= highbits (~0u, card_bundle_bit (start_cardb));

        if (card_bundle_bit (end_cardb))
            card_bundle_table [end_word] |= lowbits (~0u, card_bundle_bit (end_cardb));

        // Set the full words
        for (size_t i = start_word + 1; i < end_word; i++)
            card_bundle_table [i] = ~0u;
    }
    else
    {
        card_bundle_table [start_word] |= (highbits (~0u, card_bundle_bit (start_cardb)) &
                                            lowbits (~0u, card_bundle_bit (end_cardb)));
    }
}

// Indicates whether the specified bundle is set.
BOOL gc_heap::card_bundle_set_p (size_t cardb)
{
    return (card_bundle_table[card_bundle_word(cardb)] & (1 << card_bundle_bit (cardb)));
}

// Returns the size (in bytes) of a card bundle representing the region from 'from' to 'end'
size_t size_card_bundle_of (uint8_t* from, uint8_t* end)
{
    // Number of heap bytes represented by a card bundle word
    size_t cbw_span = card_size * card_word_width * card_bundle_size * card_bundle_word_width;

    // Align the start of the region down
    from = (uint8_t*)((size_t)from & ~(cbw_span - 1));

    // Align the end of the region up
    end = (uint8_t*)((size_t)(end + (cbw_span - 1)) & ~(cbw_span - 1));

    // Make sure they're really aligned
    assert (((size_t)from & (cbw_span - 1)) == 0);
    assert (((size_t)end  & (cbw_span - 1)) == 0);

    return ((end - from) / cbw_span) * sizeof (uint32_t);
}

// Takes a pointer to a card bundle table and an address, and returns a pointer that represents
// where a theoretical card bundle table that represents every address (starting from 0) would
// start if the bundle word representing the address were to be located at the pointer passed in.
// The returned 'translated' pointer makes it convenient/fast to calculate where the card bundle
// for a given address is using a simple shift operation on the address.
uint32_t* translate_card_bundle_table (uint32_t* cb, uint8_t* lowest_address)
{
    // The number of bytes of heap memory represented by a card bundle word
    const size_t heap_bytes_for_bundle_word = card_size * card_word_width * card_bundle_size * card_bundle_word_width;

    // Each card bundle word is 32 bits
    return (uint32_t*)((uint8_t*)cb - (((size_t)lowest_address / heap_bytes_for_bundle_word) * sizeof (uint32_t)));
}

void gc_heap::enable_card_bundles ()
{
    if (can_use_write_watch_for_card_table() && (!card_bundles_enabled()))
    {
        dprintf (1, ("Enabling card bundles"));

        // We initially set all of the card bundles
        card_bundles_set (cardw_card_bundle (card_word (card_of (lowest_address))),
                          cardw_card_bundle (align_cardw_on_bundle (card_word (card_of (highest_address)))));
        settings.card_bundles = TRUE;
    }
}

BOOL gc_heap::card_bundles_enabled ()
{
    return settings.card_bundles;
}

#endif // CARD_BUNDLE

#if defined (_TARGET_AMD64_)
#define brick_size ((size_t)4096)
#else
#define brick_size ((size_t)2048)
#endif //_TARGET_AMD64_

inline
size_t gc_heap::brick_of (uint8_t* add)
{
    return (size_t)(add - lowest_address) / brick_size;
}

inline
uint8_t* gc_heap::brick_address (size_t brick)
{
    return lowest_address + (brick_size * brick);
}


void gc_heap::clear_brick_table (uint8_t* from, uint8_t* end)
{
    for (size_t i = brick_of (from);i < brick_of (end); i++)
        brick_table[i] = 0;
}

//codes for the brick entries:
//entry == 0 -> not assigned
//entry >0 offset is entry-1
//entry <0 jump back entry bricks


inline
void gc_heap::set_brick (size_t index, ptrdiff_t val)
{
    if (val < -32767)
    {
        val = -32767;
    }
    assert (val < 32767);
    if (val >= 0)
        brick_table [index] = (short)val+1;
    else
        brick_table [index] = (short)val;
}

inline
int gc_heap::get_brick_entry (size_t index)
{
#ifdef MULTIPLE_HEAPS
    return VolatileLoadWithoutBarrier(&brick_table [index]);
#else
    return brick_table[index];
#endif
}


inline
uint8_t* align_on_brick (uint8_t* add)
{
    return (uint8_t*)((size_t)(add + brick_size - 1) & ~(brick_size - 1));
}

inline
uint8_t* align_lower_brick (uint8_t* add)
{
    return (uint8_t*)(((size_t)add) & ~(brick_size - 1));
}

size_t size_brick_of (uint8_t* from, uint8_t* end)
{
    assert (((size_t)from & (brick_size-1)) == 0);
    assert (((size_t)end  & (brick_size-1)) == 0);

    return ((end - from) / brick_size) * sizeof (short);
}

inline
uint8_t* gc_heap::card_address (size_t card)
{
    return  (uint8_t*) (card_size * card);
}

inline
size_t gc_heap::card_of ( uint8_t* object)
{
    return (size_t)(object) / card_size;
}

inline
size_t gc_heap::card_to_brick (size_t card)
{
    return brick_of (card_address (card));
}

inline
uint8_t* align_on_card (uint8_t* add)
{
    return (uint8_t*)((size_t)(add + card_size - 1) & ~(card_size - 1 ));
}
inline
uint8_t* align_on_card_word (uint8_t* add)
{
    return (uint8_t*) ((size_t)(add + (card_size*card_word_width)-1) & ~(card_size*card_word_width - 1));
}

inline
uint8_t* align_lower_card (uint8_t* add)
{
    return (uint8_t*)((size_t)add & ~(card_size-1));
}

inline
void gc_heap::clear_card (size_t card)
{
    card_table [card_word (card)] =
        (card_table [card_word (card)] & ~(1 << card_bit (card)));
    dprintf (3,("Cleared card %Ix [%Ix, %Ix[", card, (size_t)card_address (card),
              (size_t)card_address (card+1)));
}

inline
void gc_heap::set_card (size_t card)
{
    size_t word = card_word (card);
    card_table[word] = (card_table [word] | (1 << card_bit (card)));

#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
    // Also set the card bundle that corresponds to the card
    size_t bundle_to_set = cardw_card_bundle(word);

    card_bundle_set(bundle_to_set);

    dprintf (3,("Set card %Ix [%Ix, %Ix[ and bundle %Ix", card, (size_t)card_address (card), (size_t)card_address (card+1), bundle_to_set));
    assert(card_bundle_set_p(bundle_to_set) != 0);
#endif
}

inline
BOOL  gc_heap::card_set_p (size_t card)
{
    return ( card_table [ card_word (card) ] & (1 << card_bit (card)));
}

// Returns the number of DWORDs in the card table that cover the
// range of addresses [from, end[.
size_t count_card_of (uint8_t* from, uint8_t* end)
{
    return card_word (gcard_of (end - 1)) - card_word (gcard_of (from)) + 1;
}

// Returns the number of bytes to allocate for a card table
// that covers the range of addresses [from, end[.
size_t size_card_of (uint8_t* from, uint8_t* end)
{
    return count_card_of (from, end) * sizeof(uint32_t);
}

// We don't store seg_mapping_table in card_table_info because there's only always one view.
class card_table_info
{
public:
    unsigned    recount;
    uint8_t*    lowest_address;
    uint8_t*    highest_address;
    short*      brick_table;

#ifdef CARD_BUNDLE
    uint32_t*   card_bundle_table;
#endif //CARD_BUNDLE

    // mark_array is always at the end of the data structure because we
    // want to be able to make one commit call for everything before it.
#ifdef MARK_ARRAY
    uint32_t*   mark_array;
#endif //MARK_ARRAY

    size_t      size;
    uint32_t*   next_card_table;
};

//These are accessors on untranslated cardtable
inline
unsigned& card_table_refcount (uint32_t* c_table)
{
    return *(unsigned*)((char*)c_table - sizeof (card_table_info));
}

inline
uint8_t*& card_table_lowest_address (uint32_t* c_table)
{
    return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->lowest_address;
}

uint32_t* translate_card_table (uint32_t* ct)
{
    return (uint32_t*)((uint8_t*)ct - card_word (gcard_of (card_table_lowest_address (ct))) * sizeof(uint32_t));
}

inline
uint8_t*& card_table_highest_address (uint32_t* c_table)
{
    return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->highest_address;
}

inline
short*& card_table_brick_table (uint32_t* c_table)
{
    return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->brick_table;
}

#ifdef CARD_BUNDLE
inline
uint32_t*& card_table_card_bundle_table (uint32_t* c_table)
{
    return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->card_bundle_table;
}
#endif //CARD_BUNDLE

#ifdef MARK_ARRAY
/* Support for mark_array */

inline
uint32_t*& card_table_mark_array (uint32_t* c_table)
{
    return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->mark_array;
}

#ifdef BIT64
#define mark_bit_pitch ((size_t)16)
#else
#define mark_bit_pitch ((size_t)8)
#endif // BIT64
#define mark_word_width ((size_t)32)
#define mark_word_size (mark_word_width * mark_bit_pitch)

inline
uint8_t* align_on_mark_bit (uint8_t* add)
{
    return (uint8_t*)((size_t)(add + (mark_bit_pitch - 1)) & ~(mark_bit_pitch - 1));
}

inline
uint8_t* align_lower_mark_bit (uint8_t* add)
{
    return (uint8_t*)((size_t)(add) & ~(mark_bit_pitch - 1));
}

inline
BOOL is_aligned_on_mark_word (uint8_t* add)
{
    return ((size_t)add == ((size_t)(add) & ~(mark_word_size - 1)));
}

inline
uint8_t* align_on_mark_word (uint8_t* add)
{
    return (uint8_t*)((size_t)(add + mark_word_size - 1) & ~(mark_word_size - 1));
}

inline
uint8_t* align_lower_mark_word (uint8_t* add)
{
    return (uint8_t*)((size_t)(add) & ~(mark_word_size - 1));
}

inline
size_t mark_bit_of (uint8_t* add)
{
    return ((size_t)add / mark_bit_pitch);
}

inline
unsigned int mark_bit_bit (size_t mark_bit)
{
    return (unsigned int)(mark_bit % mark_word_width);
}

inline
size_t mark_bit_word (size_t mark_bit)
{
    return (mark_bit / mark_word_width);
}

inline
size_t mark_word_of (uint8_t* add)
{
    return ((size_t)add) / mark_word_size;
}

uint8_t* mark_word_address (size_t wd)
{
    return (uint8_t*)(wd*mark_word_size);
}

uint8_t* mark_bit_address (size_t mark_bit)
{
    return (uint8_t*)(mark_bit*mark_bit_pitch);
}

inline
size_t mark_bit_bit_of (uint8_t* add)
{
    return  (((size_t)add / mark_bit_pitch) % mark_word_width);
}

inline
unsigned int gc_heap::mark_array_marked(uint8_t* add)
{
    return mark_array [mark_word_of (add)] & (1 << mark_bit_bit_of (add));
}

inline
BOOL gc_heap::is_mark_bit_set (uint8_t* add)
{
    return (mark_array [mark_word_of (add)] & (1 << mark_bit_bit_of (add)));
}

inline
void gc_heap::mark_array_set_marked (uint8_t* add)
{
    size_t index = mark_word_of (add);
    uint32_t val = (1 << mark_bit_bit_of (add));
#ifdef MULTIPLE_HEAPS
    Interlocked::Or (&(mark_array [index]), val);
#else
    mark_array [index] |= val;
#endif
}

inline
void gc_heap::mark_array_clear_marked (uint8_t* add)
{
    mark_array [mark_word_of (add)] &= ~(1 << mark_bit_bit_of (add));
}

size_t size_mark_array_of (uint8_t* from, uint8_t* end)
{
    assert (((size_t)from & ((mark_word_size)-1)) == 0);
    assert (((size_t)end  & ((mark_word_size)-1)) == 0);
    return sizeof (uint32_t)*(((end - from) / mark_word_size));
}

//In order to eliminate the lowest_address in the mark array
//computations (mark_word_of, etc) mark_array is offset
// according to the lowest_address.
uint32_t* translate_mark_array (uint32_t* ma)
{
    return (uint32_t*)((uint8_t*)ma - size_mark_array_of (0, g_gc_lowest_address));
}

// from and end must be page aligned addresses.
void gc_heap::clear_mark_array (uint8_t* from, uint8_t* end, BOOL check_only/*=TRUE*/
#ifdef FEATURE_BASICFREEZE
                                , BOOL read_only/*=FALSE*/
#endif // FEATURE_BASICFREEZE
                                )
{
    if(!gc_can_use_concurrent)
        return;

#ifdef FEATURE_BASICFREEZE
    if (!read_only)
#endif // FEATURE_BASICFREEZE
    {
        assert (from == align_on_mark_word (from));
    }
    assert (end == align_on_mark_word (end));

#ifdef BACKGROUND_GC
    uint8_t* current_lowest_address = background_saved_lowest_address;
    uint8_t* current_highest_address = background_saved_highest_address;
#else
    uint8_t* current_lowest_address = lowest_address;
    uint8_t* current_highest_address = highest_address;
#endif //BACKGROUND_GC

    //there is a possibility of the addresses to be
    //outside of the covered range because of a newly allocated
    //large object segment
    if ((end <= current_highest_address) && (from >= current_lowest_address))
    {
        size_t beg_word = mark_word_of (align_on_mark_word (from));
        MAYBE_UNUSED_VAR(beg_word);
        //align end word to make sure to cover the address
        size_t end_word = mark_word_of (align_on_mark_word (end));
        MAYBE_UNUSED_VAR(end_word);
        dprintf (3, ("Calling clearing mark array [%Ix, %Ix[ for addresses [%Ix, %Ix[(%s)",
                     (size_t)mark_word_address (beg_word),
                     (size_t)mark_word_address (end_word),
                     (size_t)from, (size_t)end,
                     (check_only ? "check_only" : "clear")));
        if (!check_only)
        {
            uint8_t* op = from;
            while (op < mark_word_address (beg_word))
            {
                mark_array_clear_marked (op);
                op += mark_bit_pitch;
            }

            memset (&mark_array[beg_word], 0, (end_word - beg_word)*sizeof (uint32_t));
        }
#ifdef _DEBUG
        else
        {
            //Beware, it is assumed that the mark array word straddling
            //start has been cleared before
            //verify that the array is empty.
            size_t  markw = mark_word_of (align_on_mark_word (from));
            size_t  markw_end = mark_word_of (align_on_mark_word (end));
            while (markw < markw_end)
            {
                assert (!(mark_array [markw]));
                markw++;
            }
            uint8_t* p = mark_word_address (markw_end);
            while (p < end)
            {
                assert (!(mark_array_marked (p)));
                p++;
            }
        }
#endif //_DEBUG
    }
}
#endif //MARK_ARRAY

//These work on untranslated card tables
inline
uint32_t*& card_table_next (uint32_t* c_table)
{
    return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->next_card_table;
}

inline
size_t& card_table_size (uint32_t* c_table)
{
    return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->size;
}

void own_card_table (uint32_t* c_table)
{
    card_table_refcount (c_table) += 1;
}

void destroy_card_table (uint32_t* c_table);

void delete_next_card_table (uint32_t* c_table)
{
    uint32_t* n_table = card_table_next (c_table);
    if (n_table)
    {
        if (card_table_next (n_table))
        {
            delete_next_card_table (n_table);
        }
        if (card_table_refcount (n_table) == 0)
        {
            destroy_card_table (n_table);
            card_table_next (c_table) = 0;
        }
    }
}

void release_card_table (uint32_t* c_table)
{
    assert (card_table_refcount (c_table) >0);
    card_table_refcount (c_table) -= 1;
    if (card_table_refcount (c_table) == 0)
    {
        delete_next_card_table (c_table);
        if (card_table_next (c_table) == 0)
        {
            destroy_card_table (c_table);
            // sever the link from the parent
            if (&g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))] == c_table)
            {
                g_gc_card_table = 0;

#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
                g_gc_card_bundle_table = 0;
#endif
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
                SoftwareWriteWatch::StaticClose();
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
            }
            else
            {
                uint32_t* p_table = &g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))];
                if (p_table)
                {
                    while (p_table && (card_table_next (p_table) != c_table))
                        p_table = card_table_next (p_table);
                    card_table_next (p_table) = 0;
                }
            }
        }
    }
}

void destroy_card_table (uint32_t* c_table)
{
//  delete (uint32_t*)&card_table_refcount(c_table);

    GCToOSInterface::VirtualRelease (&card_table_refcount(c_table), card_table_size(c_table));
    dprintf (2, ("Table Virtual Free : %Ix", (size_t)&card_table_refcount(c_table)));
}

uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end)
{
    assert (g_gc_lowest_address == start);
    assert (g_gc_highest_address == end);

    uint32_t virtual_reserve_flags = VirtualReserveFlags::None;

    size_t bs = size_brick_of (start, end);
    size_t cs = size_card_of (start, end);
#ifdef MARK_ARRAY
    size_t ms = (gc_can_use_concurrent ?
                 size_mark_array_of (start, end) :
                 0);
#else
    size_t ms = 0;
#endif //MARK_ARRAY

    size_t cb = 0;

#ifdef CARD_BUNDLE
    if (can_use_write_watch_for_card_table())
    {
        cb = size_card_bundle_of (g_gc_lowest_address, g_gc_highest_address);
#ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
        // If we're not manually managing the card bundles, we will need to use OS write
        // watch APIs over this region to track changes.
        virtual_reserve_flags |= VirtualReserveFlags::WriteWatch;
#endif
    }
#endif //CARD_BUNDLE

    size_t wws = 0;
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
    size_t sw_ww_table_offset = 0;
    if (gc_can_use_concurrent)
    {
        size_t sw_ww_size_before_table = sizeof(card_table_info) + cs + bs + cb;
        sw_ww_table_offset = SoftwareWriteWatch::GetTableStartByteOffset(sw_ww_size_before_table);
        wws = sw_ww_table_offset - sw_ww_size_before_table + SoftwareWriteWatch::GetTableByteSize(start, end);
    }
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP

#ifdef GROWABLE_SEG_MAPPING_TABLE
    size_t st = size_seg_mapping_table_of (g_gc_lowest_address, g_gc_highest_address);
    size_t st_table_offset = sizeof(card_table_info) + cs + bs + cb + wws;
    size_t st_table_offset_aligned = align_for_seg_mapping_table (st_table_offset);

    st += (st_table_offset_aligned - st_table_offset);
#else //GROWABLE_SEG_MAPPING_TABLE
    size_t st = 0;
#endif //GROWABLE_SEG_MAPPING_TABLE

    // it is impossible for alloc_size to overflow due bounds on each of
    // its components.
    size_t alloc_size = sizeof (uint8_t)*(sizeof(card_table_info) + cs + bs + cb + wws + st + ms);
    uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size, 0, virtual_reserve_flags);

    if (!mem)
        return 0;

    dprintf (2, ("Init - Card table alloc for %Id bytes: [%Ix, %Ix[",
                 alloc_size, (size_t)mem, (size_t)(mem+alloc_size)));

    // mark array will be committed separately (per segment).
    size_t commit_size = alloc_size - ms;

    if (!virtual_commit (mem, commit_size))
    {
        dprintf (1, ("Card table commit failed"));
        GCToOSInterface::VirtualRelease (mem, alloc_size);
        return 0;
    }

    // initialize the ref count
    uint32_t* ct = (uint32_t*)(mem+sizeof (card_table_info));
    card_table_refcount (ct) = 0;
    card_table_lowest_address (ct) = start;
    card_table_highest_address (ct) = end;
    card_table_brick_table (ct) = (short*)((uint8_t*)ct + cs);
    card_table_size (ct) = alloc_size;
    card_table_next (ct) = 0;

#ifdef CARD_BUNDLE
    card_table_card_bundle_table (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs);

#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
    g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), g_gc_lowest_address);
#endif

#endif //CARD_BUNDLE

#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
    if (gc_can_use_concurrent)
    {
        SoftwareWriteWatch::InitializeUntranslatedTable(mem + sw_ww_table_offset, start);
    }
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP

#ifdef GROWABLE_SEG_MAPPING_TABLE
    seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned);
    seg_mapping_table = (seg_mapping*)((uint8_t*)seg_mapping_table -
                                        size_seg_mapping_table_of (0, (align_lower_segment (g_gc_lowest_address))));
#endif //GROWABLE_SEG_MAPPING_TABLE

#ifdef MARK_ARRAY
    if (gc_can_use_concurrent)
        card_table_mark_array (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs + cb + wws + st);
    else
        card_table_mark_array (ct) = NULL;
#endif //MARK_ARRAY

    return translate_card_table(ct);
}

void gc_heap::set_fgm_result (failure_get_memory f, size_t s, BOOL loh_p)
{
#ifdef MULTIPLE_HEAPS
    for (int hn = 0; hn < gc_heap::n_heaps; hn++)
    {
        gc_heap* hp = gc_heap::g_heaps [hn];
        hp->fgm_result.set_fgm (f, s, loh_p);
    }
#else //MULTIPLE_HEAPS
    fgm_result.set_fgm (f, s, loh_p);
#endif //MULTIPLE_HEAPS
}

//returns 0 for success, -1 otherwise
// We are doing all the decommitting here because we want to make sure we have
// enough memory to do so - if we do this during copy_brick_card_table and
// and fail to decommit it would make the failure case very complicated to
// handle. This way we can waste some decommit if we call this multiple
// times before the next FGC but it's easier to handle the failure case.
int gc_heap::grow_brick_card_tables (uint8_t* start,
                                     uint8_t* end,
                                     size_t size,
                                     heap_segment* new_seg,
                                     gc_heap* hp,
                                     BOOL loh_p)
{
    uint8_t* la = g_gc_lowest_address;
    uint8_t* ha = g_gc_highest_address;
    uint8_t* saved_g_lowest_address = min (start, g_gc_lowest_address);
    uint8_t* saved_g_highest_address = max (end, g_gc_highest_address);
    seg_mapping* new_seg_mapping_table = nullptr;
#ifdef BACKGROUND_GC
    // This value is only for logging purpose - it's not necessarily exactly what we
    // would commit for mark array but close enough for diagnostics purpose.
    size_t logging_ma_commit_size = size_mark_array_of (0, (uint8_t*)size);
#endif //BACKGROUND_GC

    // See if the address is already covered
    if ((la != saved_g_lowest_address ) || (ha != saved_g_highest_address))
    {
        {
            //modify the highest address so the span covered
            //is twice the previous one.
            uint8_t* top = (uint8_t*)0 + Align (GCToOSInterface::GetVirtualMemoryLimit());
            // On non-Windows systems, we get only an approximate value that can possibly be
            // slightly lower than the saved_g_highest_address.
            // In such case, we set the top to the saved_g_highest_address so that the
            // card and brick tables always cover the whole new range.
            if (top < saved_g_highest_address)
            {
                top = saved_g_highest_address;
            }
            size_t ps = ha-la;
#ifdef BIT64
            if (ps > (uint64_t)200*1024*1024*1024)
                ps += (uint64_t)100*1024*1024*1024;
            else
#endif // BIT64
                ps *= 2;

            if (saved_g_lowest_address < g_gc_lowest_address)
            {
                if (ps > (size_t)g_gc_lowest_address)
                    saved_g_lowest_address = (uint8_t*)(size_t)OS_PAGE_SIZE;
                else
                {
                    assert (((size_t)g_gc_lowest_address - ps) >= OS_PAGE_SIZE);
                    saved_g_lowest_address = min (saved_g_lowest_address, (g_gc_lowest_address - ps));
                }
            }

            if (saved_g_highest_address > g_gc_highest_address)
            {
                saved_g_highest_address = max ((saved_g_lowest_address + ps), saved_g_highest_address);
                if (saved_g_highest_address > top)
                    saved_g_highest_address = top;
            }
        }
        dprintf (GC_TABLE_LOG, ("Growing card table [%Ix, %Ix[",
                                (size_t)saved_g_lowest_address,
                                (size_t)saved_g_highest_address));

        bool write_barrier_updated = false;
        uint32_t virtual_reserve_flags = VirtualReserveFlags::None;
        uint32_t* saved_g_card_table = g_gc_card_table;

#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
        uint32_t* saved_g_card_bundle_table = g_gc_card_bundle_table;
#endif

        uint32_t* ct = 0;
        uint32_t* translated_ct = 0;
        short* bt = 0;

        size_t cs = size_card_of (saved_g_lowest_address, saved_g_highest_address);
        size_t bs = size_brick_of (saved_g_lowest_address, saved_g_highest_address);

#ifdef MARK_ARRAY
        size_t ms = (gc_heap::gc_can_use_concurrent ?
                    size_mark_array_of (saved_g_lowest_address, saved_g_highest_address) :
                    0);
#else
        size_t ms = 0;
#endif //MARK_ARRAY

        size_t cb = 0;

#ifdef CARD_BUNDLE
        if (can_use_write_watch_for_card_table())
        {
            cb = size_card_bundle_of (saved_g_lowest_address, saved_g_highest_address);

#ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
            // If we're not manually managing the card bundles, we will need to use OS write
            // watch APIs over this region to track changes.
            virtual_reserve_flags |= VirtualReserveFlags::WriteWatch;
#endif
        }
#endif //CARD_BUNDLE

        size_t wws = 0;
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
        size_t sw_ww_table_offset = 0;
        if (gc_can_use_concurrent)
        {
            size_t sw_ww_size_before_table = sizeof(card_table_info) + cs + bs + cb;
            sw_ww_table_offset = SoftwareWriteWatch::GetTableStartByteOffset(sw_ww_size_before_table);
            wws =
                sw_ww_table_offset -
                sw_ww_size_before_table +
                SoftwareWriteWatch::GetTableByteSize(saved_g_lowest_address, saved_g_highest_address);
        }
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP

#ifdef GROWABLE_SEG_MAPPING_TABLE
        size_t st = size_seg_mapping_table_of (saved_g_lowest_address, saved_g_highest_address);
        size_t st_table_offset = sizeof(card_table_info) + cs + bs + cb + wws;
        size_t st_table_offset_aligned = align_for_seg_mapping_table (st_table_offset);
        st += (st_table_offset_aligned - st_table_offset);
#else //GROWABLE_SEG_MAPPING_TABLE
        size_t st = 0;
#endif //GROWABLE_SEG_MAPPING_TABLE

        // it is impossible for alloc_size to overflow due bounds on each of
        // its components.
        size_t alloc_size = sizeof (uint8_t)*(sizeof(card_table_info) + cs + bs + cb + wws + st + ms);
        dprintf (GC_TABLE_LOG, ("card table: %Id; brick table: %Id; card bundle: %Id; sw ww table: %Id; seg table: %Id; mark array: %Id",
                                  cs, bs, cb, wws, st, ms));

        uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size, 0, virtual_reserve_flags);

        if (!mem)
        {
            set_fgm_result (fgm_grow_table, alloc_size, loh_p);
            goto fail;
        }

        dprintf (GC_TABLE_LOG, ("Table alloc for %Id bytes: [%Ix, %Ix[",
                                 alloc_size, (size_t)mem, (size_t)((uint8_t*)mem+alloc_size)));

        {
            // mark array will be committed separately (per segment).
            size_t commit_size = alloc_size - ms;

            if (!virtual_commit (mem, commit_size))
            {
                dprintf (GC_TABLE_LOG, ("Table commit failed"));
                set_fgm_result (fgm_commit_table, commit_size, loh_p);
                goto fail;
            }
        }

        ct = (uint32_t*)(mem + sizeof (card_table_info));
        card_table_refcount (ct) = 0;
        card_table_lowest_address (ct) = saved_g_lowest_address;
        card_table_highest_address (ct) = saved_g_highest_address;
        card_table_next (ct) = &g_gc_card_table[card_word (gcard_of (la))];

        //clear the card table
/*
        memclr ((uint8_t*)ct,
                (((saved_g_highest_address - saved_g_lowest_address)*sizeof (uint32_t) /
                  (card_size * card_word_width))
                 + sizeof (uint32_t)));
*/

        bt = (short*)((uint8_t*)ct + cs);

        // No initialization needed, will be done in copy_brick_card

        card_table_brick_table (ct) = bt;

#ifdef CARD_BUNDLE
        card_table_card_bundle_table (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs);
        //set all bundle to look at all of the cards
        memset(card_table_card_bundle_table (ct), 0xFF, cb);
#endif //CARD_BUNDLE

#ifdef GROWABLE_SEG_MAPPING_TABLE
        {
            new_seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned);
            new_seg_mapping_table = (seg_mapping*)((uint8_t*)new_seg_mapping_table -
                                              size_seg_mapping_table_of (0, (align_lower_segment (saved_g_lowest_address))));
            memcpy(&new_seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
                &seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)],
                size_seg_mapping_table_of(g_gc_lowest_address, g_gc_highest_address));

            // new_seg_mapping_table gets assigned to seg_mapping_table at the bottom of this function,
            // not here. The reason for this is that, if we fail at mark array committing (OOM) and we've
            // already switched seg_mapping_table to point to the new mapping table, we'll decommit it and
            // run into trouble. By not assigning here, we're making sure that we will not change seg_mapping_table
            // if an OOM occurs.
        }
#endif //GROWABLE_SEG_MAPPING_TABLE

#ifdef MARK_ARRAY
        if(gc_can_use_concurrent)
            card_table_mark_array (ct) = (uint32_t*)((uint8_t*)card_table_brick_table (ct) + bs + cb + wws + st);
        else
            card_table_mark_array (ct) = NULL;
#endif //MARK_ARRAY

        translated_ct = translate_card_table (ct);

        dprintf (GC_TABLE_LOG, ("card table: %Ix(translated: %Ix), seg map: %Ix, mark array: %Ix",
            (size_t)ct, (size_t)translated_ct, (size_t)new_seg_mapping_table, (size_t)card_table_mark_array (ct)));

#ifdef BACKGROUND_GC
        if (hp->should_commit_mark_array())
        {
            dprintf (GC_TABLE_LOG, ("new low: %Ix, new high: %Ix, latest mark array is %Ix(translate: %Ix)",
                                    saved_g_lowest_address, saved_g_highest_address,
                                    card_table_mark_array (ct),
                                    translate_mark_array (card_table_mark_array (ct))));
            uint32_t* new_mark_array = (uint32_t*)((uint8_t*)card_table_mark_array (ct) - size_mark_array_of (0, saved_g_lowest_address));
            if (!commit_new_mark_array_global (new_mark_array))
            {
                dprintf (GC_TABLE_LOG, ("failed to commit portions in the mark array for existing segments"));
                set_fgm_result (fgm_commit_table, logging_ma_commit_size, loh_p);
                goto fail;
            }

            if (!commit_mark_array_new_seg (hp, new_seg, translated_ct, saved_g_lowest_address))
            {
                dprintf (GC_TABLE_LOG, ("failed to commit mark array for the new seg"));
                set_fgm_result (fgm_commit_table, logging_ma_commit_size, loh_p);
                goto fail;
            }
        }
        else
        {
            clear_commit_flag_global();
        }
#endif //BACKGROUND_GC

#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
        if (gc_can_use_concurrent)
        {
            // The current design of software write watch requires that the runtime is suspended during resize. Suspending
            // on resize is preferred because it is a far less frequent operation than GetWriteWatch() / ResetWriteWatch().
            // Suspending here allows copying dirty state from the old table into the new table, and not have to merge old
            // table info lazily as done for card tables.

            // Either this thread was the thread that did the suspension which means we are suspended; or this is called
            // from a GC thread which means we are in a blocking GC and also suspended.
            bool is_runtime_suspended = GCToEEInterface::IsGCThread();
            if (!is_runtime_suspended)
            {
                // Note on points where the runtime is suspended anywhere in this function. Upon an attempt to suspend the
                // runtime, a different thread may suspend first, causing this thread to block at the point of the suspend call.
                // So, at any suspend point, externally visible state needs to be consistent, as code that depends on that state
                // may run while this thread is blocked. This includes updates to g_gc_card_table, g_gc_lowest_address, and
                // g_gc_highest_address.
                suspend_EE();
            }

            g_gc_card_table = translated_ct;

#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
            g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), saved_g_lowest_address);
#endif

            SoftwareWriteWatch::SetResizedUntranslatedTable(
                mem + sw_ww_table_offset,
                saved_g_lowest_address,
                saved_g_highest_address);

            seg_mapping_table = new_seg_mapping_table;

            // Since the runtime is already suspended, update the write barrier here as well.
            // This passes a bool telling whether we need to switch to the post
            // grow version of the write barrier.  This test tells us if the new
            // segment was allocated at a lower address than the old, requiring
            // that we start doing an upper bounds check in the write barrier.
            g_gc_lowest_address = saved_g_lowest_address;
            g_gc_highest_address = saved_g_highest_address;
            stomp_write_barrier_resize(true, la != saved_g_lowest_address);
            write_barrier_updated = true;

            if (!is_runtime_suspended)
            {
                restart_EE();
            }
        }
        else
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
        {
            g_gc_card_table = translated_ct;

#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
            g_gc_card_bundle_table = translate_card_bundle_table(card_table_card_bundle_table(ct), saved_g_lowest_address);
#endif
        }

        if (!write_barrier_updated)
        {
            seg_mapping_table = new_seg_mapping_table;
            GCToOSInterface::FlushProcessWriteBuffers();
            g_gc_lowest_address = saved_g_lowest_address;
            g_gc_highest_address = saved_g_highest_address;

            // This passes a bool telling whether we need to switch to the post
            // grow version of the write barrier.  This test tells us if the new
            // segment was allocated at a lower address than the old, requiring
            // that we start doing an upper bounds check in the write barrier.
            // This will also suspend the runtime if the write barrier type needs
            // to be changed, so we are doing this after all global state has
            // been updated. See the comment above suspend_EE() above for more
            // info.
            stomp_write_barrier_resize(GCToEEInterface::IsGCThread(), la != saved_g_lowest_address);
        }

        return 0;

fail:
        //cleanup mess and return -1;

        if (mem)
        {
            assert(g_gc_card_table == saved_g_card_table);

#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
            assert(g_gc_card_bundle_table  == saved_g_card_bundle_table);
#endif

            //delete (uint32_t*)((uint8_t*)ct - sizeof(card_table_info));
            if (!GCToOSInterface::VirtualRelease (mem, alloc_size))
            {
                dprintf (GC_TABLE_LOG, ("GCToOSInterface::VirtualRelease failed"));
                assert (!"release failed");
            }
        }

        return -1;
    }
    else
    {
#ifdef BACKGROUND_GC
        if (hp->should_commit_mark_array())
        {
            dprintf (GC_TABLE_LOG, ("in range new seg %Ix, mark_array is %Ix", new_seg, hp->mark_array));
            if (!commit_mark_array_new_seg (hp, new_seg))
            {
                dprintf (GC_TABLE_LOG, ("failed to commit mark array for the new seg in range"));
                set_fgm_result (fgm_commit_table, logging_ma_commit_size, loh_p);
                return -1;
            }
        }
#endif //BACKGROUND_GC
    }

    return 0;
}

//copy all of the arrays managed by the card table for a page aligned range
void gc_heap::copy_brick_card_range (uint8_t* la, uint32_t* old_card_table,
                                     short* old_brick_table,
                                     heap_segment* seg,
                                     uint8_t* start, uint8_t* end)
{
    ptrdiff_t brick_offset = brick_of (start) - brick_of (la);


    dprintf (2, ("copying tables for range [%Ix %Ix[", (size_t)start, (size_t)end));

    // copy brick table
    short* brick_start = &brick_table [brick_of (start)];
    if (old_brick_table)
    {
        // segments are always on page boundaries
        memcpy (brick_start, &old_brick_table[brick_offset],
                size_brick_of (start, end));

    }
    else
    {
        // This is a large heap, just clear the brick table
    }

    uint32_t* old_ct = &old_card_table[card_word (card_of (la))];
#ifdef MARK_ARRAY
#ifdef BACKGROUND_GC
    UNREFERENCED_PARAMETER(seg);
    if (recursive_gc_sync::background_running_p())
    {
        uint32_t* old_mark_array = card_table_mark_array (old_ct);

        // We don't need to go through all the card tables here because
        // we only need to copy from the GC version of the mark array - when we
        // mark (even in allocate_large_object) we always use that mark array.
        if ((card_table_highest_address (old_ct) >= start) &&
            (card_table_lowest_address (old_ct) <= end))
        {
            if ((background_saved_highest_address >= start) &&
                (background_saved_lowest_address <= end))
            {
                //copy the mark bits
                // segments are always on page boundaries
                uint8_t* m_start = max (background_saved_lowest_address, start);
                uint8_t* m_end = min (background_saved_highest_address, end);
                memcpy (&mark_array[mark_word_of (m_start)],
                        &old_mark_array[mark_word_of (m_start) - mark_word_of (la)],
                        size_mark_array_of (m_start, m_end));
            }
        }
        else
        {
            //only large segments can be out of range
            assert (old_brick_table == 0);
        }
    }
#else //BACKGROUND_GC
    assert (seg != 0);
    clear_mark_array (start, heap_segment_committed(seg));
#endif //BACKGROUND_GC
#endif //MARK_ARRAY

    // n way merge with all of the card table ever used in between
    uint32_t* ct = card_table_next (&card_table[card_word (card_of(lowest_address))]);

    assert (ct);
    while (card_table_next (old_ct) != ct)
    {
        //copy if old card table contained [start, end[
        if ((card_table_highest_address (ct) >= end) &&
            (card_table_lowest_address (ct) <= start))
        {
            // or the card_tables

            size_t start_word = card_word (card_of (start));

            uint32_t* dest = &card_table[start_word];
            uint32_t* src = &((translate_card_table (ct))[start_word]);
            ptrdiff_t count = count_card_of (start, end);
            for (int x = 0; x < count; x++)
            {
                *dest |= *src;

#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
                if (*src != 0)
                {
                    card_bundle_set(cardw_card_bundle(start_word+x));
                }
#endif

                dest++;
                src++;
            }
        }
        ct = card_table_next (ct);
    }
}

//initialize all of the arrays managed by the card table for a page aligned range when an existing ro segment becomes in range
void gc_heap::init_brick_card_range (heap_segment* seg)
{
    dprintf (2, ("initialising tables for range [%Ix %Ix[",
                 (size_t)heap_segment_mem (seg),
                 (size_t)heap_segment_allocated (seg)));

    // initialize the brick table
    for (size_t b = brick_of (heap_segment_mem (seg));
         b < brick_of (align_on_brick (heap_segment_allocated (seg)));
         b++)
    {
        set_brick (b, -1);
    }

#ifdef MARK_ARRAY
    if (recursive_gc_sync::background_running_p() && (seg->flags & heap_segment_flags_ma_committed))
    {
        assert (seg != 0);
        clear_mark_array (heap_segment_mem (seg), heap_segment_committed(seg));
    }
#endif //MARK_ARRAY

    clear_card_for_addresses (heap_segment_mem (seg),
                              heap_segment_allocated (seg));
}

void gc_heap::copy_brick_card_table()
{
    uint8_t* la = lowest_address;
    uint8_t* ha = highest_address;
    MAYBE_UNUSED_VAR(ha);
    uint32_t* old_card_table = card_table;
    short* old_brick_table = brick_table;

    assert (la == card_table_lowest_address (&old_card_table[card_word (card_of (la))]));
    assert (ha == card_table_highest_address (&old_card_table[card_word (card_of (la))]));

    /* todo: Need a global lock for this */
    uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))];
    own_card_table (ct);
    card_table = translate_card_table (ct);
    /* End of global lock */
    highest_address = card_table_highest_address (ct);
    lowest_address = card_table_lowest_address (ct);

    brick_table = card_table_brick_table (ct);

#ifdef MARK_ARRAY
    if (gc_can_use_concurrent)
    {
        mark_array = translate_mark_array (card_table_mark_array (ct));
        assert (mark_word_of (g_gc_highest_address) ==
            mark_word_of (align_on_mark_word (g_gc_highest_address)));
    }
    else
        mark_array = NULL;
#endif //MARK_ARRAY

#ifdef CARD_BUNDLE
#if defined(MARK_ARRAY) && defined(_DEBUG)
    size_t cb_end = (size_t)((uint8_t*)card_table_card_bundle_table (ct) + size_card_bundle_of (g_gc_lowest_address, g_gc_highest_address));
#ifdef GROWABLE_SEG_MAPPING_TABLE
    size_t st = size_seg_mapping_table_of (g_gc_lowest_address, g_gc_highest_address);
    size_t cb_end_aligned = align_for_seg_mapping_table (cb_end);
    st += (cb_end_aligned - cb_end);
#else  //GROWABLE_SEG_MAPPING_TABLE
    size_t st = 0;
#endif //GROWABLE_SEG_MAPPING_TABLE
#endif //MARK_ARRAY && _DEBUG
    card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct), g_gc_lowest_address);

    // Ensure that the word that represents g_gc_lowest_address in the translated table is located at the
    // start of the untranslated table.
    assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] ==
            card_table_card_bundle_table (ct));

    //set the card table if we are in a heap growth scenario
    if (card_bundles_enabled())
    {
        card_bundles_set (cardw_card_bundle (card_word (card_of (lowest_address))),
                          cardw_card_bundle (align_cardw_on_bundle (card_word (card_of (highest_address)))));
    }
    //check if we need to turn on card_bundles.
#ifdef MULTIPLE_HEAPS
    // use INT64 arithmetic here because of possible overflow on 32p
    uint64_t th = (uint64_t)MH_TH_CARD_BUNDLE*gc_heap::n_heaps;
#else
    // use INT64 arithmetic here because of possible overflow on 32p
    uint64_t th = (uint64_t)SH_TH_CARD_BUNDLE;
#endif //MULTIPLE_HEAPS
    if (reserved_memory >= th)
    {
        enable_card_bundles();
    }

#endif //CARD_BUNDLE

    // for each of the segments and heaps, copy the brick table and
    // or the card table
    heap_segment* seg = generation_start_segment (generation_of (max_generation));
    while (seg)
    {
        if (heap_segment_read_only_p (seg) && !heap_segment_in_range_p (seg))
        {
            //check if it became in range
            if ((heap_segment_reserved (seg) > lowest_address) &&
                (heap_segment_mem (seg) < highest_address))
            {
                set_ro_segment_in_range (seg);
            }
        }
        else
        {

            uint8_t* end = align_on_page (heap_segment_allocated (seg));
            copy_brick_card_range (la, old_card_table,
                                   old_brick_table,
                                   seg,
                                   align_lower_page (heap_segment_mem (seg)),
                                   end);
        }
        seg = heap_segment_next (seg);
    }

    seg = generation_start_segment (large_object_generation);
    while (seg)
    {
        if (heap_segment_read_only_p (seg) && !heap_segment_in_range_p (seg))
        {
            //check if it became in range
            if ((heap_segment_reserved (seg) > lowest_address) &&
                (heap_segment_mem (seg) < highest_address))
            {
                set_ro_segment_in_range (seg);
            }
        }
        else
        {
            uint8_t* end = align_on_page (heap_segment_allocated (seg));
            copy_brick_card_range (la, old_card_table,
                                   0,
                                   seg,
                                   align_lower_page (heap_segment_mem (seg)),
                                   end);
        }
        seg = heap_segment_next (seg);
    }

    release_card_table (&old_card_table[card_word (card_of(la))]);
}

#ifdef FEATURE_BASICFREEZE
BOOL gc_heap::insert_ro_segment (heap_segment* seg)
{
    enter_spin_lock (&gc_heap::gc_lock);

    if (!gc_heap::seg_table->ensure_space_for_insert ()
        || (should_commit_mark_array() && !commit_mark_array_new_seg(__this, seg)))
    {
        leave_spin_lock(&gc_heap::gc_lock);
        return FALSE;
    }

    //insert at the head of the segment list
    generation* gen2 = generation_of (max_generation);
    heap_segment* oldhead = generation_start_segment (gen2);
    heap_segment_next (seg) = oldhead;
    generation_start_segment (gen2) = seg;

    seg_table->insert (heap_segment_mem(seg), (size_t)seg);

#ifdef SEG_MAPPING_TABLE
    seg_mapping_table_add_ro_segment (seg);
#endif //SEG_MAPPING_TABLE

    //test if in range
    if ((heap_segment_reserved (seg) > lowest_address) &&
        (heap_segment_mem (seg) < highest_address))
    {
        set_ro_segment_in_range (seg);
    }

    FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(seg), (size_t)(heap_segment_reserved (seg) - heap_segment_mem(seg)), gc_etw_segment_read_only_heap);

    leave_spin_lock (&gc_heap::gc_lock);
    return TRUE;
}

// No one is calling this function right now. If this is getting called we need
// to take care of decommitting the mark array for it - we will need to remember
// which portion of the mark array was committed and only decommit that.
void gc_heap::remove_ro_segment (heap_segment* seg)
{
//clear the mark bits so a new segment allocated in its place will have a clear mark bits
#ifdef MARK_ARRAY
    if (gc_can_use_concurrent)
    {
        clear_mark_array (align_lower_mark_word (max (heap_segment_mem (seg), lowest_address)),
                      align_on_card_word (min (heap_segment_allocated (seg), highest_address)),
                      false); // read_only segments need the mark clear
    }
#endif //MARK_ARRAY

    enter_spin_lock (&gc_heap::gc_lock);

    seg_table->remove ((uint8_t*)seg);

#ifdef SEG_MAPPING_TABLE
    seg_mapping_table_remove_ro_segment (seg);
#endif //SEG_MAPPING_TABLE

    // Locate segment (and previous segment) in the list.
    generation* gen2 = generation_of (max_generation);
    heap_segment* curr_seg = generation_start_segment (gen2);
    heap_segment* prev_seg = NULL;

    while (curr_seg && curr_seg != seg)
    {
        prev_seg = curr_seg;
        curr_seg = heap_segment_next (curr_seg);
    }
    assert (curr_seg == seg);

    // Patch previous segment (or list head if there is none) to skip the removed segment.
    if (prev_seg)
        heap_segment_next (prev_seg) = heap_segment_next (curr_seg);
    else
        generation_start_segment (gen2) = heap_segment_next (curr_seg);

    leave_spin_lock (&gc_heap::gc_lock);
}
#endif //FEATURE_BASICFREEZE

BOOL gc_heap::set_ro_segment_in_range (heap_segment* seg)
{
    //set it in range
    seg->flags |= heap_segment_flags_inrange;
//    init_brick_card_range (seg);
    ro_segments_in_range = TRUE;
    //right now, segments aren't protected
    //unprotect_segment (seg);
    return TRUE;
}

#ifdef MARK_LIST

uint8_t** make_mark_list (size_t size)
{
    uint8_t** mark_list = new (nothrow) uint8_t* [size];
    return mark_list;
}

#define swap(a,b){uint8_t* t; t = a; a = b; b = t;}

void verify_qsort_array (uint8_t* *low, uint8_t* *high)
{
    uint8_t **i = 0;

    for (i = low+1; i <= high; i++)
    {
        if (*i < *(i-1))
        {
            FATAL_GC_ERROR();
        }
    }
}

#ifndef USE_INTROSORT
void qsort1( uint8_t* *low, uint8_t* *high, unsigned int depth)
{
    if (((low + 16) >= high) || (depth > 100))
    {
        //insertion sort
        uint8_t **i, **j;
        for (i = low+1; i <= high; i++)
        {
            uint8_t* val = *i;
            for (j=i;j >low && val<*(j-1);j--)
            {
                *j=*(j-1);
            }
            *j=val;
        }
    }
    else
    {
        uint8_t *pivot, **left, **right;

        //sort low middle and high
        if (*(low+((high-low)/2)) < *low)
            swap (*(low+((high-low)/2)), *low);
        if (*high < *low)
            swap (*low, *high);
        if (*high < *(low+((high-low)/2)))
            swap (*(low+((high-low)/2)), *high);

        swap (*(low+((high-low)/2)), *(high-1));
        pivot =  *(high-1);
        left = low; right = high-1;
        while (1) {
            while (*(--right) > pivot);
            while (*(++left)  < pivot);
            if (left < right)
            {
                swap(*left, *right);
            }
            else
                break;
        }
        swap (*left, *(high-1));
        qsort1(low, left-1, depth+1);
        qsort1(left+1, high, depth+1);
    }
}
#endif //USE_INTROSORT
void rqsort1( uint8_t* *low, uint8_t* *high)
{
    if ((low + 16) >= high)
    {
        //insertion sort
        uint8_t **i, **j;
        for (i = low+1; i <= high; i++)
        {
            uint8_t* val = *i;
            for (j=i;j >low && val>*(j-1);j--)
            {
                *j=*(j-1);
            }
            *j=val;
        }
    }
    else
    {
        uint8_t *pivot, **left, **right;

        //sort low middle and high
        if (*(low+((high-low)/2)) > *low)
            swap (*(low+((high-low)/2)), *low);
        if (*high > *low)
            swap (*low, *high);
        if (*high > *(low+((high-low)/2)))
            swap (*(low+((high-low)/2)), *high);

        swap (*(low+((high-low)/2)), *(high-1));
        pivot =  *(high-1);
        left = low; right = high-1;
        while (1) {
            while (*(--right) < pivot);
            while (*(++left)  > pivot);
            if (left < right)
            {
                swap(*left, *right);
            }
            else
                break;
        }
        swap (*left, *(high-1));
        rqsort1(low, left-1);
        rqsort1(left+1, high);
    }
}

#ifdef USE_INTROSORT
class introsort
{

private:
    static const int size_threshold = 64;
    static const int max_depth = 100;


inline static void swap_elements(uint8_t** i,uint8_t** j)
    {
        uint8_t* t=*i;
        *i=*j;
        *j=t;
    }

public:
    static void sort (uint8_t** begin, uint8_t** end, int ignored)
    {
        ignored = 0;
        introsort_loop (begin, end, max_depth);
        insertionsort (begin, end);
    }

private:

    static void introsort_loop (uint8_t** lo, uint8_t** hi, int depth_limit)
    {
        while (hi-lo >= size_threshold)
        {
            if (depth_limit == 0)
            {
                heapsort (lo, hi);
                return;
            }
            uint8_t** p=median_partition (lo, hi);
            depth_limit=depth_limit-1;
            introsort_loop (p, hi, depth_limit);
            hi=p-1;
        }
    }

    static uint8_t** median_partition (uint8_t** low, uint8_t** high)
    {
        uint8_t *pivot, **left, **right;

        //sort low middle and high
        if (*(low+((high-low)/2)) < *low)
            swap_elements ((low+((high-low)/2)), low);
        if (*high < *low)
            swap_elements (low, high);
        if (*high < *(low+((high-low)/2)))
            swap_elements ((low+((high-low)/2)), high);

        swap_elements ((low+((high-low)/2)), (high-1));
        pivot =  *(high-1);
        left = low; right = high-1;
        while (1) {
            while (*(--right) > pivot);
            while (*(++left)  < pivot);
            if (left < right)
            {
                swap_elements(left, right);
            }
            else
                break;
        }
        swap_elements (left, (high-1));
        return left;
    }


    static void insertionsort (uint8_t** lo, uint8_t** hi)
    {
        for (uint8_t** i=lo+1; i <= hi; i++)
        {
            uint8_t** j = i;
            uint8_t* t = *i;
            while((j > lo) && (t <*(j-1)))
            {
                *j = *(j-1);
                j--;
            }
            *j = t;
        }
    }

    static void heapsort (uint8_t** lo, uint8_t** hi)
    {
        size_t n = hi - lo + 1;
        for (size_t i=n / 2; i >= 1; i--)
        {
            downheap (i,n,lo);
        }
        for (size_t i = n; i > 1; i--)
        {
            swap_elements (lo, lo + i - 1);
            downheap(1, i - 1,  lo);
        }
    }

    static void downheap (size_t i, size_t n, uint8_t** lo)
    {
        uint8_t* d = *(lo + i - 1);
        size_t child;
        while (i <= n / 2)
        {
            child = 2*i;
            if (child < n && *(lo + child - 1)<(*(lo + child)))
            {
                child++;
            }
            if (!(d<*(lo + child - 1)))
            {
                break;
            }
            *(lo + i - 1) = *(lo + child - 1);
            i = child;
        }
        *(lo + i - 1) = d;
    }

};

#endif //USE_INTROSORT

#ifdef MULTIPLE_HEAPS
#ifdef PARALLEL_MARK_LIST_SORT
void gc_heap::sort_mark_list()
{
    // if this heap had a mark list overflow, we don't do anything
    if (mark_list_index > mark_list_end)
    {
//        printf("sort_mark_list: overflow on heap %d\n", heap_number);
        return;
    }

    // if any other heap had a mark list overflow, we fake one too,
    // so we don't use an incomplete mark list by mistake
    for (int i = 0; i < n_heaps; i++)
    {
        if (g_heaps[i]->mark_list_index > g_heaps[i]->mark_list_end)
        {
            mark_list_index = mark_list_end + 1;
//            printf("sort_mark_list: overflow on heap %d\n", i);
            return;
        }
    }

//    unsigned long start = GetCycleCount32();

    dprintf (3, ("Sorting mark lists"));
    if (mark_list_index > mark_list)
        _sort (mark_list, mark_list_index - 1, 0);

//    printf("first phase of sort_mark_list for heap %d took %u cycles to sort %u entries\n", this->heap_number, GetCycleCount32() - start, mark_list_index - mark_list);
//    start = GetCycleCount32();

    // first set the pieces for all heaps to empty
    int heap_num;
    for (heap_num = 0; heap_num < n_heaps; heap_num++)
    {
        mark_list_piece_start[heap_num] = NULL;
        mark_list_piece_end[heap_num] = NULL;
    }

    uint8_t** x = mark_list;

// predicate means: x is still within the mark list, and within the bounds of this heap
#define predicate(x) (((x) < mark_list_index) && (*(x) < heap->ephemeral_high))

    heap_num = -1;
    while (x < mark_list_index)
    {
        gc_heap* heap;
        // find the heap x points into - searching cyclically from the last heap,
        // because in many cases the right heap is the next one or comes soon after
        int last_heap_num = heap_num;
        MAYBE_UNUSED_VAR(last_heap_num);
        do
        {
            heap_num++;
            if (heap_num >= n_heaps)
                heap_num = 0;
            assert(heap_num != last_heap_num); // we should always find the heap - infinite loop if not!
            heap = g_heaps[heap_num];
        }
        while (!(*x >= heap->ephemeral_low && *x < heap->ephemeral_high));

        // x is the start of the mark list piece for this heap
        mark_list_piece_start[heap_num] = x;

        // to find the end of the mark list piece for this heap, find the first x
        // that has !predicate(x), i.e. that is either not in this heap, or beyond the end of the list
        if (predicate(x))
        {
            // let's see if we get lucky and the whole rest belongs to this piece
            if (predicate(mark_list_index-1))
            {
                x = mark_list_index;
                mark_list_piece_end[heap_num] = x;
                break;
            }

            // we play a variant of binary search to find the point sooner.
            // the first loop advances by increasing steps until the predicate turns false.
            // then we retreat the last step, and the second loop advances by decreasing steps, keeping the predicate true.
            unsigned inc = 1;
            do
            {
                inc *= 2;
                uint8_t** temp_x = x;
                x += inc;
                if (temp_x > x)
                {
                    break;
                }
            }
            while (predicate(x));
            // we know that only the last step was wrong, so we undo it
            x -= inc;
            do
            {
                // loop invariant - predicate holds at x, but not x + inc
                assert (predicate(x) && !(((x + inc) > x) && predicate(x + inc)));
                inc /= 2;
                if (((x + inc) > x) && predicate(x + inc))
                {
                    x += inc;
                }
            }
            while (inc > 1);
            // the termination condition and the loop invariant together imply this:
            assert(predicate(x) && !predicate(x + inc) && (inc == 1));
            // so the spot we're looking for is one further
            x += 1;
        }
        mark_list_piece_end[heap_num] = x;
    }

#undef predicate

//    printf("second phase of sort_mark_list for heap %d took %u cycles\n", this->heap_number, GetCycleCount32() - start);
}

void gc_heap::append_to_mark_list(uint8_t **start, uint8_t **end)
{
    size_t slots_needed = end - start;
    size_t slots_available = mark_list_end + 1 - mark_list_index;
    size_t slots_to_copy = min(slots_needed, slots_available);
    memcpy(mark_list_index, start, slots_to_copy*sizeof(*start));
    mark_list_index += slots_to_copy;
//    printf("heap %d: appended %Id slots to mark_list\n", heap_number, slots_to_copy);
}

void gc_heap::merge_mark_lists()
{
    uint8_t** source[MAX_SUPPORTED_CPUS];
    uint8_t** source_end[MAX_SUPPORTED_CPUS];
    int source_heap[MAX_SUPPORTED_CPUS];
    int source_count = 0;

    // in case of mark list overflow, don't bother
    if (mark_list_index >  mark_list_end)
    {
//        printf("merge_mark_lists: overflow\n");
        return;
    }

    dprintf(3, ("merge_mark_lists: heap_number = %d  starts out with %Id entries", heap_number, mark_list_index - mark_list));
//    unsigned long start = GetCycleCount32();
    for (int i = 0; i < n_heaps; i++)
    {
        gc_heap* heap = g_heaps[i];
        if (heap->mark_list_piece_start[heap_number] < heap->mark_list_piece_end[heap_number])
        {
            source[source_count] = heap->mark_list_piece_start[heap_number];
            source_end[source_count] = heap->mark_list_piece_end[heap_number];
            source_heap[source_count] = i;
            if (source_count < MAX_SUPPORTED_CPUS)
                source_count++;
        }
    }
//    printf("first phase of merge_mark_lists for heap %d took %u cycles\n", heap_number, GetCycleCount32() - start);

    dprintf(3, ("heap_number = %d  has %d sources\n", heap_number, source_count));
#if defined(_DEBUG) || defined(TRACE_GC)
    for (int j = 0; j < source_count; j++)
    {
        dprintf(3, ("heap_number = %d  ", heap_number));
        dprintf(3, (" source from heap %d = %Ix .. %Ix (%Id entries)",
            (size_t)(source_heap[j]), (size_t)(source[j][0]), (size_t)(source_end[j][-1]), (size_t)(source_end[j] - source[j])));
       // the sources should all be sorted
        for (uint8_t **x = source[j]; x < source_end[j] - 1; x++)
        {
            if (x[0] > x[1])
            {
                dprintf(3, ("oops, mark_list from source %d for heap %d isn't sorted\n", j, heap_number));
                assert (0);
            }
        }
    }
#endif //_DEBUG || TRACE_GC

//    start = GetCycleCount32();

    mark_list = &g_mark_list_copy [heap_number*mark_list_size];
    mark_list_index = mark_list;
    mark_list_end = &mark_list [mark_list_size-1];
    int piece_count = 0;
    if (source_count == 0)
    {
        ; // nothing to do
    }
    else if (source_count == 1)
    {
        mark_list = source[0];
        mark_list_index = source_end[0];
        mark_list_end = mark_list_index;
        piece_count++;
    }
    else
    {
        while (source_count > 1)
        {
            // find the lowest and second lowest value in the sources we're merging from
            int lowest_source = 0;
            uint8_t *lowest = *source[0];
            uint8_t *second_lowest = *source[1];
            for (int i = 1; i < source_count; i++)
            {
                if (lowest > *source[i])
                {
                    second_lowest = lowest;
                    lowest = *source[i];
                    lowest_source = i;
                }
                else if (second_lowest > *source[i])
                {
                    second_lowest = *source[i];
                }
            }

            // find the point in the lowest source where it either runs out or is not <= second_lowest anymore

            // let's first try to get lucky and see if the whole source is <= second_lowest -- this is actually quite common
            uint8_t **x;
            if (source_end[lowest_source][-1] <= second_lowest)
                x = source_end[lowest_source];
            else
            {
                // use linear search to find the end -- could also use binary search as in sort_mark_list,
                // but saw no improvement doing that
                for (x = source[lowest_source]; x < source_end[lowest_source] && *x <= second_lowest; x++)
                    ;
            }

            // blast this piece to the mark list
            append_to_mark_list(source[lowest_source], x);
            piece_count++;

            source[lowest_source] = x;

            // check whether this source is now exhausted
            if (x >= source_end[lowest_source])
            {
                // if it's not the source with the highest index, copy the source with the highest index
                // over it so the non-empty sources are always at the beginning
                if (lowest_source < source_count-1)
                {
                    source[lowest_source] = source[source_count-1];
                    source_end[lowest_source] = source_end[source_count-1];
                }
                source_count--;
            }
        }
        // we're left with just one source that we copy
        append_to_mark_list(source[0], source_end[0]);
        piece_count++;
    }

//    printf("second phase of merge_mark_lists for heap %d took %u cycles to merge %d pieces\n", heap_number, GetCycleCount32() - start, piece_count);

#if defined(_DEBUG) || defined(TRACE_GC)
    // the final mark list must be sorted
    for (uint8_t **x = mark_list; x < mark_list_index - 1; x++)
    {
        if (x[0] > x[1])
        {
            dprintf(3, ("oops, mark_list for heap %d isn't sorted at the end of merge_mark_lists", heap_number));
            assert (0);
        }
    }
#endif //defined(_DEBUG) || defined(TRACE_GC)
}
#else //PARALLEL_MARK_LIST_SORT
void gc_heap::combine_mark_lists()
{
    dprintf (3, ("Combining mark lists"));
    //verify if a heap has overflowed its mark list
    BOOL use_mark_list = TRUE;
    for (int i = 0; i < n_heaps; i++)
    {
        if (g_heaps [i]->mark_list_index >  g_heaps [i]->mark_list_end)
        {
            use_mark_list = FALSE;
            break;
        }
    }

    if (use_mark_list)
    {
        dprintf (3, ("Using mark list"));
        //compact the gaps out of the mark list
        int gn = 0;
        uint8_t** current_gap = g_heaps [gn]->mark_list_index;
        uint8_t** current_gap_end = g_heaps[gn]->mark_list_end + 1;
        uint8_t** dst_last = current_gap-1;

        int srcn = n_heaps-1;
        gc_heap* srch = g_heaps [srcn];
        uint8_t** src = srch->mark_list_index - 1;
        uint8_t** src_beg = srch->mark_list;

        while (current_gap <= src)
        {
            while ((gn < n_heaps-1) && (current_gap >= current_gap_end))
            {
                //go to the next gap
                gn++;
                dprintf (3, ("Going to the next gap %d", gn));
                assert (gn < n_heaps);
                current_gap = g_heaps [gn]->mark_list_index;
                current_gap_end = g_heaps[gn]->mark_list_end + 1;
                assert ((gn == (n_heaps-1)) || (current_gap_end == g_heaps[gn+1]->mark_list));
            }
            while ((srcn > 0) && (src < src_beg))
            {
                //go to the previous source
                srcn--;
                dprintf (3, ("going to the previous source %d", srcn));
                assert (srcn>=0);
                gc_heap* srch = g_heaps [srcn];
                src = srch->mark_list_index - 1;
                src_beg = srch->mark_list;
            }
            if (current_gap < src)
            {
                dst_last = current_gap;
                *current_gap++ = *src--;
            }
        }
        dprintf (3, ("src: %Ix dst_last: %Ix", (size_t)src, (size_t)dst_last));

        uint8_t** end_of_list = max (src, dst_last);

        //sort the resulting compacted list
        assert (end_of_list < &g_mark_list [n_heaps*mark_list_size]);
        if (end_of_list > &g_mark_list[0])
            _sort (&g_mark_list[0], end_of_list, 0);
        //adjust the mark_list to the beginning of the resulting mark list.
        for (int i = 0; i < n_heaps; i++)
        {
            g_heaps [i]->mark_list = g_mark_list;
            g_heaps [i]->mark_list_index = end_of_list + 1;
            g_heaps [i]->mark_list_end = end_of_list + 1;
        }
    }
    else
    {
        uint8_t** end_of_list = g_mark_list;
        //adjust the mark_list to the beginning of the resulting mark list.
        //put the index beyond the end to turn off mark list processing
        for (int i = 0; i < n_heaps; i++)
        {
            g_heaps [i]->mark_list = g_mark_list;
            g_heaps [i]->mark_list_index = end_of_list + 1;
            g_heaps [i]->mark_list_end = end_of_list;
        }
    }
}
#endif // PARALLEL_MARK_LIST_SORT
#endif //MULTIPLE_HEAPS
#endif //MARK_LIST

class seg_free_spaces
{
    struct seg_free_space
    {
        BOOL is_plug;
        void* start;
    };

    struct free_space_bucket
    {
        seg_free_space* free_space;
        ptrdiff_t count_add; // Assigned when we first construct the array.
        ptrdiff_t count_fit; // How many items left when we are fitting plugs.
    };

    void move_bucket (int old_power2, int new_power2)
    {
        // PREFAST warning 22015: old_power2 could be negative
        assert (old_power2 >= 0);
        assert (old_power2 >= new_power2);

        if (old_power2 == new_power2)
        {
            return;
        }

        seg_free_space* src_index = free_space_buckets[old_power2].free_space;
        for (int i = old_power2; i > new_power2; i--)
        {
            seg_free_space** dest = &(free_space_buckets[i].free_space);
            (*dest)++;

            seg_free_space* dest_index = free_space_buckets[i - 1].free_space;
            if (i > (new_power2 + 1))
            {
                seg_free_space temp = *src_index;
                *src_index = *dest_index;
                *dest_index = temp;
            }
            src_index = dest_index;
        }

        free_space_buckets[old_power2].count_fit--;
        free_space_buckets[new_power2].count_fit++;
    }

#ifdef _DEBUG

    void dump_free_space (seg_free_space* item)
    {
        uint8_t* addr = 0;
        size_t len = 0;

        if (item->is_plug)
        {
            mark* m = (mark*)(item->start);
            len = pinned_len (m);
            addr = pinned_plug (m) - len;
        }
        else
        {
            heap_segment* seg = (heap_segment*)(item->start);
            addr = heap_segment_plan_allocated (seg);
            len = heap_segment_committed (seg) - addr;
        }

        dprintf (SEG_REUSE_LOG_1, ("[%d]0x%Ix %Id", heap_num, addr, len));
    }

    void dump()
    {
        seg_free_space* item = NULL;
        int i = 0;

        dprintf (SEG_REUSE_LOG_1, ("[%d]----------------------------------\nnow the free spaces look like:", heap_num));
        for (i = 0; i < (free_space_bucket_count - 1); i++)
        {
            dprintf (SEG_REUSE_LOG_1, ("[%d]Free spaces for 2^%d bucket:", heap_num, (base_power2 + i)));
            dprintf (SEG_REUSE_LOG_1, ("[%d]%s %s", heap_num, "start", "len"));
            item = free_space_buckets[i].free_space;
            while (item < free_space_buckets[i + 1].free_space)
            {
                dump_free_space (item);
                item++;
            }
            dprintf (SEG_REUSE_LOG_1, ("[%d]----------------------------------", heap_num));
        }

        dprintf (SEG_REUSE_LOG_1, ("[%d]Free spaces for 2^%d bucket:", heap_num, (base_power2 + i)));
        dprintf (SEG_REUSE_LOG_1, ("[%d]%s %s", heap_num, "start", "len"));
        item = free_space_buckets[i].free_space;

        while (item <= &seg_free_space_array[free_space_item_count - 1])
        {
            dump_free_space (item);
            item++;
        }
        dprintf (SEG_REUSE_LOG_1, ("[%d]----------------------------------", heap_num));
    }

#endif //_DEBUG

    free_space_bucket* free_space_buckets;
    seg_free_space* seg_free_space_array;
    ptrdiff_t free_space_bucket_count;
    ptrdiff_t free_space_item_count;
    int base_power2;
    int heap_num;
#ifdef _DEBUG
    BOOL has_end_of_seg;
#endif //_DEBUG

public:

    seg_free_spaces (int h_number)
    {
        heap_num = h_number;
    }

    BOOL alloc ()
    {
        size_t total_prealloc_size =
            MAX_NUM_BUCKETS * sizeof (free_space_bucket) +
            MAX_NUM_FREE_SPACES * sizeof (seg_free_space);

        free_space_buckets = (free_space_bucket*) new (nothrow) uint8_t[total_prealloc_size];

        return (!!free_space_buckets);
    }

    // We take the ordered free space array we got from the 1st pass,
    // and feed the portion that we decided to use to this method, ie,
    // the largest item_count free spaces.
    void add_buckets (int base, size_t* ordered_free_spaces, int bucket_count, size_t item_count)
    {
        assert (free_space_buckets);
        assert (item_count <= (size_t)MAX_PTR);

        free_space_bucket_count = bucket_count;
        free_space_item_count = item_count;
        base_power2 = base;
#ifdef _DEBUG
        has_end_of_seg = FALSE;
#endif //_DEBUG

        ptrdiff_t total_item_count = 0;
        ptrdiff_t i = 0;

        seg_free_space_array = (seg_free_space*)(free_space_buckets + free_space_bucket_count);

        for (i = 0; i < (ptrdiff_t)item_count; i++)
        {
            seg_free_space_array[i].start = 0;
            seg_free_space_array[i].is_plug = FALSE;
        }

        for (i = 0; i < bucket_count; i++)
        {
            free_space_buckets[i].count_add = ordered_free_spaces[i];
            free_space_buckets[i].count_fit = ordered_free_spaces[i];
            free_space_buckets[i].free_space = &seg_free_space_array[total_item_count];
            total_item_count += free_space_buckets[i].count_add;
        }

        assert (total_item_count == (ptrdiff_t)item_count);
    }

    // If we are adding a free space before a plug we pass the
    // mark stack position so we can update the length; we could
    // also be adding the free space after the last plug in which
    // case start is the segment which we'll need to update the
    // heap_segment_plan_allocated.
    void add (void* start, BOOL plug_p, BOOL first_p)
    {
        size_t size = (plug_p ?
                       pinned_len ((mark*)start) :
                       (heap_segment_committed ((heap_segment*)start) -
                           heap_segment_plan_allocated ((heap_segment*)start)));

        if (plug_p)
        {
            dprintf (SEG_REUSE_LOG_1, ("[%d]Adding a free space before plug: %Id", heap_num, size));
        }
        else
        {
            dprintf (SEG_REUSE_LOG_1, ("[%d]Adding a free space at end of seg: %Id", heap_num, size));
#ifdef _DEBUG
            has_end_of_seg = TRUE;
#endif //_DEBUG
        }

        if (first_p)
        {
            size_t eph_gen_starts = gc_heap::eph_gen_starts_size;
            size -= eph_gen_starts;
            if (plug_p)
            {
                mark* m = (mark*)(start);
                pinned_len (m) -= eph_gen_starts;
            }
            else
            {
                heap_segment* seg = (heap_segment*)start;
                heap_segment_plan_allocated (seg) += eph_gen_starts;
            }
        }

        int bucket_power2 = index_of_highest_set_bit (size);
        if (bucket_power2 < base_power2)
        {
            return;
        }

        free_space_bucket* bucket = &free_space_buckets[bucket_power2 - base_power2];

        seg_free_space* bucket_free_space = bucket->free_space;
        assert (plug_p || (!plug_p && bucket->count_add));

        if (bucket->count_add == 0)
        {
            dprintf (SEG_REUSE_LOG_1, ("[%d]Already have enough of 2^%d", heap_num, bucket_power2));
            return;
        }

        ptrdiff_t index = bucket->count_add - 1;

        dprintf (SEG_REUSE_LOG_1, ("[%d]Building free spaces: adding %Ix; len: %Id (2^%d)",
                    heap_num,
                    (plug_p ?
                        (pinned_plug ((mark*)start) - pinned_len ((mark*)start)) :
                        heap_segment_plan_allocated ((heap_segment*)start)),
                    size,
                    bucket_power2));

        if (plug_p)
        {
            bucket_free_space[index].is_plug = TRUE;
        }

        bucket_free_space[index].start = start;
        bucket->count_add--;
    }

#ifdef _DEBUG

    // Do a consistency check after all free spaces are added.
    void check()
    {
        ptrdiff_t i = 0;
        int end_of_seg_count = 0;

        for (i = 0; i < free_space_item_count; i++)
        {
            assert (seg_free_space_array[i].start);
            if (!(seg_free_space_array[i].is_plug))
            {
                end_of_seg_count++;
            }
        }

        if (has_end_of_seg)
        {
            assert (end_of_seg_count == 1);
        }
        else
        {
            assert (end_of_seg_count == 0);
        }

        for (i = 0; i < free_space_bucket_count; i++)
        {
            assert (free_space_buckets[i].count_add == 0);
        }
    }

#endif //_DEBUG

    uint8_t* fit (uint8_t* old_loc,
               size_t plug_size
               REQD_ALIGN_AND_OFFSET_DCL)
    {
        if (old_loc)
        {
#ifdef SHORT_PLUGS
            assert (!is_plug_padded (old_loc));
#endif //SHORT_PLUGS
            assert (!node_realigned (old_loc));
        }

        size_t saved_plug_size = plug_size;

#ifdef FEATURE_STRUCTALIGN
        // BARTOKTODO (4841): this code path is disabled (see can_fit_all_blocks_p) until we take alignment requirements into account
        _ASSERTE(requiredAlignment == DATA_ALIGNMENT && false);
#endif // FEATURE_STRUCTALIGN
        // TODO: this is also not large alignment ready. We would need to consider alignment when choosing the
        // the bucket.

        size_t plug_size_to_fit = plug_size;

        // best fit is only done for gen1 to gen2 and we do not pad in gen2.
        // however we must account for requirements of large alignment.
        // which may result in realignment padding.
#ifdef RESPECT_LARGE_ALIGNMENT
        plug_size_to_fit += switch_alignment_size(FALSE);
#endif //RESPECT_LARGE_ALIGNMENT

        int plug_power2 = index_of_highest_set_bit (round_up_power2 (plug_size_to_fit + Align(min_obj_size)));
        ptrdiff_t i;
        uint8_t* new_address = 0;

        if (plug_power2 < base_power2)
        {
            plug_power2 = base_power2;
        }

        int chosen_power2 = plug_power2 - base_power2;
retry:
        for (i = chosen_power2; i < free_space_bucket_count; i++)
        {
            if (free_space_buckets[i].count_fit != 0)
            {
                break;
            }
            chosen_power2++;
        }

        dprintf (SEG_REUSE_LOG_1, ("[%d]Fitting plug len %Id (2^%d) using 2^%d free space",
            heap_num,
            plug_size,
            plug_power2,
            (chosen_power2 + base_power2)));

        assert (i < free_space_bucket_count);

        seg_free_space* bucket_free_space = free_space_buckets[chosen_power2].free_space;
        ptrdiff_t free_space_count = free_space_buckets[chosen_power2].count_fit;
        size_t new_free_space_size = 0;
        BOOL can_fit = FALSE;
        size_t pad = 0;

        for (i = 0; i < free_space_count; i++)
        {
            size_t free_space_size = 0;
            pad = 0;

            BOOL realign_padding_p = FALSE;

            if (bucket_free_space[i].is_plug)
            {
                mark* m = (mark*)(bucket_free_space[i].start);
                uint8_t* plug_free_space_start = pinned_plug (m) - pinned_len (m);

                if (!((old_loc == 0) || same_large_alignment_p (old_loc, plug_free_space_start)))
                {
                    pad += switch_alignment_size (FALSE);
                    realign_padding_p = TRUE;
                }

                plug_size = saved_plug_size + pad;

                free_space_size = pinned_len (m);
                new_address = pinned_plug (m) - pinned_len (m);

                if (free_space_size >= (plug_size + Align (min_obj_size)) ||
                    free_space_size == plug_size)
                {
                    new_free_space_size = free_space_size - plug_size;
                    pinned_len (m) = new_free_space_size;
#ifdef SIMPLE_DPRINTF
                    dprintf (SEG_REUSE_LOG_0, ("[%d]FP: 0x%Ix->0x%Ix(%Ix)(%Ix), [0x%Ix (2^%d) -> [0x%Ix (2^%d)",
                                heap_num,
                                old_loc,
                                new_address,
                                (plug_size - pad),
                                pad,
                                pinned_plug (m),
                                index_of_highest_set_bit (free_space_size),
                                (pinned_plug (m) - pinned_len (m)),
                                index_of_highest_set_bit (new_free_space_size)));
#endif //SIMPLE_DPRINTF

                    if (realign_padding_p)
                    {
                        set_node_realigned (old_loc);
                    }

                    can_fit = TRUE;
                }
            }
            else
            {
                heap_segment* seg = (heap_segment*)(bucket_free_space[i].start);
                free_space_size = heap_segment_committed (seg) - heap_segment_plan_allocated (seg);

                if (!((old_loc == 0) || same_large_alignment_p (old_loc, heap_segment_plan_allocated (seg))))
                {
                    pad = switch_alignment_size (FALSE);
                    realign_padding_p = TRUE;
                }

                plug_size = saved_plug_size + pad;

                if (free_space_size >= (plug_size + Align (min_obj_size)) ||
                    free_space_size == plug_size)
                {
                    new_address = heap_segment_plan_allocated (seg);
                    new_free_space_size = free_space_size - plug_size;
                    heap_segment_plan_allocated (seg) = new_address + plug_size;
#ifdef SIMPLE_DPRINTF
                    dprintf (SEG_REUSE_LOG_0, ("[%d]FS: 0x%Ix-> 0x%Ix(%Ix) (2^%d) -> 0x%Ix (2^%d)",
                                heap_num,
                                old_loc,
                                new_address,
                                (plug_size - pad),
                                index_of_highest_set_bit (free_space_size),
                                heap_segment_plan_allocated (seg),
                                index_of_highest_set_bit (new_free_space_size)));
#endif //SIMPLE_DPRINTF

                    if (realign_padding_p)
                        set_node_realigned (old_loc);

                    can_fit = TRUE;
                }
            }

            if (can_fit)
            {
                break;
            }
        }

        if (!can_fit)
        {
            assert (chosen_power2 == 0);
            chosen_power2 = 1;
            goto retry;
        }
        else
        {
            if (pad)
            {
                new_address += pad;
            }
            assert ((chosen_power2 && (i == 0)) ||
                    ((!chosen_power2) && (i < free_space_count)));
        }

        int new_bucket_power2 = index_of_highest_set_bit (new_free_space_size);

        if (new_bucket_power2 < base_power2)
        {
            new_bucket_power2 = base_power2;
        }

        move_bucket (chosen_power2, new_bucket_power2 - base_power2);

        //dump();

        return new_address;
    }

    void cleanup ()
    {
        if (free_space_buckets)
        {
            delete [] free_space_buckets;
        }
        if (seg_free_space_array)
        {
            delete [] seg_free_space_array;
        }
    }
};


#define marked(i) header(i)->IsMarked()
#define set_marked(i) header(i)->SetMarked()
#define clear_marked(i) header(i)->ClearMarked()
#define pinned(i) header(i)->IsPinned()
#define set_pinned(i) header(i)->SetPinned()
#define clear_pinned(i) header(i)->GetHeader()->ClrGCBit();

inline size_t my_get_size (Object* ob)
{
    MethodTable* mT = header(ob)->GetMethodTable();

    return (mT->GetBaseSize() +
            (mT->HasComponentSize() ?
             ((size_t)((CObjectHeader*)ob)->GetNumComponents() * mT->RawGetComponentSize()) : 0));
}

//#define size(i) header(i)->GetSize()
#define size(i) my_get_size (header(i))

#define contain_pointers(i) header(i)->ContainsPointers()
#ifdef COLLECTIBLE_CLASS
#define contain_pointers_or_collectible(i) header(i)->ContainsPointersOrCollectible()

#define get_class_object(i) GCToEEInterface::GetLoaderAllocatorObjectForGC((Object *)i)
#define is_collectible(i) method_table(i)->Collectible()
#else //COLLECTIBLE_CLASS
#define contain_pointers_or_collectible(i) header(i)->ContainsPointers()
#endif //COLLECTIBLE_CLASS

#if defined (MARK_ARRAY) && defined (BACKGROUND_GC)
inline
void gc_heap::seg_clear_mark_array_bits_soh (heap_segment* seg)
{
    uint8_t* range_beg = 0;
    uint8_t* range_end = 0;
    if (bgc_mark_array_range (seg, FALSE, &range_beg, &range_end))
    {
        clear_mark_array (range_beg, align_on_mark_word (range_end), FALSE
#ifdef FEATURE_BASICFREEZE
            , TRUE
#endif // FEATURE_BASICFREEZE
            );
    }
}

void gc_heap::clear_batch_mark_array_bits (uint8_t* start, uint8_t* end)
{
    if ((start < background_saved_highest_address) &&
        (end > background_saved_lowest_address))
    {
        start = max (start, background_saved_lowest_address);
        end = min (end, background_saved_highest_address);

        size_t start_mark_bit = mark_bit_of (start);
        size_t end_mark_bit = mark_bit_of (end);
        unsigned int startbit = mark_bit_bit (start_mark_bit);
        unsigned int endbit = mark_bit_bit (end_mark_bit);
        size_t startwrd = mark_bit_word (start_mark_bit);
        size_t endwrd = mark_bit_word (end_mark_bit);

        dprintf (3, ("Clearing all mark array bits between [%Ix:%Ix-[%Ix:%Ix",
            (size_t)start, (size_t)start_mark_bit,
            (size_t)end, (size_t)end_mark_bit));

        unsigned int firstwrd = lowbits (~0, startbit);
        unsigned int lastwrd = highbits (~0, endbit);

        if (startwrd == endwrd)
        {
            unsigned int wrd = firstwrd | lastwrd;
            mark_array[startwrd] &= wrd;
            return;
        }

        // clear the first mark word.
        if (startbit)
        {
            mark_array[startwrd] &= firstwrd;
            startwrd++;
        }

        for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++)
        {
            mark_array[wrdtmp] = 0;
        }

        // clear the last mark word.
        if (endbit)
        {
            mark_array[endwrd] &= lastwrd;
        }
    }
}

void gc_heap::bgc_clear_batch_mark_array_bits (uint8_t* start, uint8_t* end)
{
    if ((start < background_saved_highest_address) &&
        (end > background_saved_lowest_address))
    {
        start = max (start, background_saved_lowest_address);
        end = min (end, background_saved_highest_address);

        clear_batch_mark_array_bits (start, end);
    }
}

void gc_heap::clear_mark_array_by_objects (uint8_t* from, uint8_t* end, BOOL loh_p)
{
    dprintf (3, ("clearing mark array bits by objects for addr [%Ix,[%Ix",
                  from, end));
    int align_const = get_alignment_constant (!loh_p);

    uint8_t* o = from;

    while (o < end)
    {
        uint8_t*  next_o = o + Align (size (o), align_const);

        if (background_object_marked (o, TRUE))
        {
            dprintf (3, ("%Ix was marked by bgc, is now cleared", o));
        }

        o = next_o;
    }
}
#endif //MARK_ARRAY && BACKGROUND_GC

inline
BOOL gc_heap::is_mark_set (uint8_t* o)
{
    return marked (o);
}

#if defined (_MSC_VER) && defined (_TARGET_X86_)
#pragma optimize("y", on)        // Small critical routines, don't put in EBP frame
#endif //_MSC_VER && _TARGET_X86_

// return the generation number of an object.
// It is assumed that the object is valid.
//Note that this will return max_generation for a LOH object
int gc_heap::object_gennum (uint8_t* o)
{
    if (in_range_for_segment (o, ephemeral_heap_segment) &&
        (o >= generation_allocation_start (generation_of (max_generation-1))))
    {
        // in an ephemeral generation.
        for ( int i = 0; i < max_generation-1; i++)
        {
            if ((o >= generation_allocation_start (generation_of (i))))
                return i;
        }
        return max_generation-1;
    }
    else
    {
        return max_generation;
    }
}

int gc_heap::object_gennum_plan (uint8_t* o)
{
    if (in_range_for_segment (o, ephemeral_heap_segment))
    {
        for (int i = 0; i <= max_generation-1; i++)
        {
            uint8_t* plan_start = generation_plan_allocation_start (generation_of (i));
            if (plan_start && (o >= plan_start))
            {
                return i;
            }
        }
    }
    return max_generation;
}

#if defined(_MSC_VER) && defined(_TARGET_X86_)
#pragma optimize("", on)        // Go back to command line default optimizations
#endif //_MSC_VER && _TARGET_X86_

heap_segment* gc_heap::make_heap_segment (uint8_t* new_pages, size_t size, int h_number)
{
    size_t initial_commit = SEGMENT_INITIAL_COMMIT;

    //Commit the first page
    if (!virtual_commit (new_pages, initial_commit, h_number))
    {
        return 0;
    }

    //overlay the heap_segment
    heap_segment* new_segment = (heap_segment*)new_pages;

    uint8_t* start = new_pages + segment_info_size;
    heap_segment_mem (new_segment) = start;
    heap_segment_used (new_segment) = start;
    heap_segment_reserved (new_segment) = new_pages + size;
    heap_segment_committed (new_segment) = (use_large_pages_p ? heap_segment_reserved(new_segment) : (new_pages + initial_commit));
    init_heap_segment (new_segment);
    dprintf (2, ("Creating heap segment %Ix", (size_t)new_segment));
    return new_segment;
}

void gc_heap::init_heap_segment (heap_segment* seg)
{
    seg->flags = 0;
    heap_segment_next (seg) = 0;
    heap_segment_plan_allocated (seg) = heap_segment_mem (seg);
    heap_segment_allocated (seg) = heap_segment_mem (seg);
#ifdef BACKGROUND_GC
    heap_segment_background_allocated (seg) = 0;
    heap_segment_saved_bg_allocated (seg) = 0;
#endif //BACKGROUND_GC
}

//Releases the segment to the OS.
// this is always called on one thread only so calling seg_table->remove is fine.
void gc_heap::delete_heap_segment (heap_segment* seg, BOOL consider_hoarding)
{
    if (!heap_segment_loh_p (seg))
    {
        //cleanup the brick table back to the empty value
        clear_brick_table (heap_segment_mem (seg), heap_segment_reserved (seg));
    }

    if (consider_hoarding)
    {
        assert ((heap_segment_mem (seg) - (uint8_t*)seg) <= ptrdiff_t(2*OS_PAGE_SIZE));
        size_t ss = (size_t) (heap_segment_reserved (seg) - (uint8_t*)seg);
        //Don't keep the big ones.
        if (ss <= INITIAL_ALLOC)
        {
            dprintf (2, ("Hoarding segment %Ix", (size_t)seg));
#ifdef BACKGROUND_GC
            // We don't need to clear the decommitted flag because when this segment is used
            // for a new segment the flags will be cleared.
            if (!heap_segment_decommitted_p (seg))
#endif //BACKGROUND_GC
            {
                decommit_heap_segment (seg);
            }

#ifdef SEG_MAPPING_TABLE
            seg_mapping_table_remove_segment (seg);
#endif //SEG_MAPPING_TABLE

            heap_segment_next (seg) = segment_standby_list;
            segment_standby_list = seg;
            seg = 0;
        }
    }

    if (seg != 0)
    {
        dprintf (2, ("h%d: del seg: [%Ix, %Ix[",
                     heap_number, (size_t)seg,
                     (size_t)(heap_segment_reserved (seg))));

#ifdef BACKGROUND_GC
        ::record_changed_seg ((uint8_t*)seg, heap_segment_reserved (seg),
                            settings.gc_index, current_bgc_state,
                            seg_deleted);
        decommit_mark_array_by_seg (seg);
#endif //BACKGROUND_GC

#ifdef SEG_MAPPING_TABLE
        seg_mapping_table_remove_segment (seg);
#else //SEG_MAPPING_TABLE
        seg_table->remove ((uint8_t*)seg);
#endif //SEG_MAPPING_TABLE

        release_segment (seg);
    }
}

//resets the pages beyond allocates size so they won't be swapped out and back in

void gc_heap::reset_heap_segment_pages (heap_segment* seg)
{
    size_t page_start = align_on_page ((size_t)heap_segment_allocated (seg));
    size_t size = (size_t)heap_segment_committed (seg) - page_start;
    if (size != 0)
        GCToOSInterface::VirtualReset((void*)page_start, size, false /* unlock */);
}

void gc_heap::decommit_heap_segment_pages (heap_segment* seg,
                                           size_t extra_space)
{
    if (use_large_pages_p)
        return;
    uint8_t*  page_start = align_on_page (heap_segment_allocated(seg));
    size_t size = heap_segment_committed (seg) - page_start;
    extra_space = align_on_page (extra_space);
    if (size >= max ((extra_space + 2*OS_PAGE_SIZE), 100*OS_PAGE_SIZE))
    {
        page_start += max(extra_space, 32*OS_PAGE_SIZE);
        size -= max (extra_space, 32*OS_PAGE_SIZE);

        virtual_decommit (page_start, size, heap_number);
        dprintf (3, ("Decommitting heap segment [%Ix, %Ix[(%d)",
            (size_t)page_start,
            (size_t)(page_start + size),
            size));
        heap_segment_committed (seg) = page_start;
        if (heap_segment_used (seg) > heap_segment_committed (seg))
        {
            heap_segment_used (seg) = heap_segment_committed (seg);
        }
    }
}

//decommit all pages except one or 2
void gc_heap::decommit_heap_segment (heap_segment* seg)
{
    uint8_t*  page_start = align_on_page (heap_segment_mem (seg));

    dprintf (3, ("Decommitting heap segment %Ix", (size_t)seg));

#ifdef BACKGROUND_GC
    page_start += OS_PAGE_SIZE;
#endif //BACKGROUND_GC

    size_t size = heap_segment_committed (seg) - page_start;
    virtual_decommit (page_start, size, heap_number);

    //re-init the segment object
    heap_segment_committed (seg) = page_start;
    if (heap_segment_used (seg) > heap_segment_committed (seg))
    {
        heap_segment_used (seg) = heap_segment_committed (seg);
    }
}

void gc_heap::clear_gen0_bricks()
{
    if (!gen0_bricks_cleared)
    {
        gen0_bricks_cleared = TRUE;
        //initialize brick table for gen 0
        for (size_t b = brick_of (generation_allocation_start (generation_of (0)));
                b < brick_of (align_on_brick
                            (heap_segment_allocated (ephemeral_heap_segment)));
                b++)
        {
            set_brick (b, -1);
        }
    }
}

#ifdef BACKGROUND_GC
void gc_heap::rearrange_small_heap_segments()
{
    heap_segment* seg = freeable_small_heap_segment;
    while (seg)
    {
        heap_segment* next_seg = heap_segment_next (seg);
        // TODO: we need to consider hoarding here.
        delete_heap_segment (seg, FALSE);
        seg = next_seg;
    }
    freeable_small_heap_segment = 0;
}
#endif //BACKGROUND_GC

void gc_heap::rearrange_large_heap_segments()
{
    dprintf (2, ("deleting empty large segments"));
    heap_segment* seg = freeable_large_heap_segment;
    while (seg)
    {
        heap_segment* next_seg = heap_segment_next (seg);
        delete_heap_segment (seg, GCConfig::GetRetainVM());
        seg = next_seg;
    }
    freeable_large_heap_segment = 0;
}

void gc_heap::rearrange_heap_segments(BOOL compacting)
{
    heap_segment* seg =
        generation_start_segment (generation_of (max_generation));

    heap_segment* prev_seg = 0;
    heap_segment* next_seg = 0;
    while (seg)
    {
        next_seg = heap_segment_next (seg);

        //link ephemeral segment when expanding
        if ((next_seg == 0) && (seg != ephemeral_heap_segment))
        {
            seg->next = ephemeral_heap_segment;
            next_seg = heap_segment_next (seg);
        }

        //re-used expanded heap segment
        if ((seg == ephemeral_heap_segment) && next_seg)
        {
            heap_segment_next (prev_seg) = next_seg;
            heap_segment_next (seg) = 0;
        }
        else
        {
            uint8_t* end_segment = (compacting ?
                                 heap_segment_plan_allocated (seg) :
                                 heap_segment_allocated (seg));
            // check if the segment was reached by allocation
            if ((end_segment == heap_segment_mem (seg))&&
                !heap_segment_read_only_p (seg))
            {
                //if not, unthread and delete
                assert (prev_seg);
                assert (seg != ephemeral_heap_segment);
                heap_segment_next (prev_seg) = next_seg;
                delete_heap_segment (seg, GCConfig::GetRetainVM());

                dprintf (2, ("Deleting heap segment %Ix", (size_t)seg));
            }
            else
            {
                if (!heap_segment_read_only_p (seg))
                {
                    if (compacting)
                    {
                        heap_segment_allocated (seg) =
                            heap_segment_plan_allocated (seg);
                    }

                    // reset the pages between allocated and committed.
                    if (seg != ephemeral_heap_segment)
                    {
                        decommit_heap_segment_pages (seg, 0);
                    }
                }
                prev_seg = seg;
            }
        }

        seg = next_seg;
    }
}


#ifdef WRITE_WATCH

uint8_t* g_addresses [array_size+2]; // to get around the bug in GetWriteWatch

#ifdef TIME_WRITE_WATCH
static unsigned int tot_cycles = 0;
#endif //TIME_WRITE_WATCH

#ifdef CARD_BUNDLE

inline void gc_heap::verify_card_bundle_bits_set(size_t first_card_word, size_t last_card_word)
{
#ifdef _DEBUG
    for (size_t x = cardw_card_bundle (first_card_word); x < cardw_card_bundle (last_card_word); x++)
    {
        if (!card_bundle_set_p (x))
        {
            assert (!"Card bundle not set");
            dprintf (3, ("Card bundle %Ix not set", x));
        }
    }
#else
    UNREFERENCED_PARAMETER(first_card_word);
    UNREFERENCED_PARAMETER(last_card_word);
#endif
}

// Verifies that any bundles that are not set represent only cards that are not set.
inline void gc_heap::verify_card_bundles()
{
#ifdef _DEBUG
    size_t lowest_card = card_word (card_of (lowest_address));
    size_t highest_card = card_word (card_of (highest_address));
    size_t cardb = cardw_card_bundle (lowest_card);
    size_t end_cardb = cardw_card_bundle (align_cardw_on_bundle (highest_card));

    while (cardb < end_cardb)
    {
        uint32_t* card_word = &card_table[max(card_bundle_cardw (cardb), lowest_card)];
        uint32_t* card_word_end = &card_table[min(card_bundle_cardw (cardb+1), highest_card)];

        if (card_bundle_set_p (cardb) == 0)
        {
            // Verify that no card is set
            while (card_word < card_word_end)
            {
                if (*card_word != 0)
                {
                    dprintf  (3, ("gc: %d, Card word %Ix for address %Ix set, card_bundle %Ix clear",
                            dd_collection_count (dynamic_data_of (0)),
                            (size_t)(card_word-&card_table[0]),
                            (size_t)(card_address ((size_t)(card_word-&card_table[0]) * card_word_width)), cardb));
                }

                assert((*card_word)==0);
                card_word++;
            }
        }

        cardb++;
    }
#endif
}

// If card bundles are enabled, use write watch to find pages in the card table that have
// been dirtied, and set the corresponding card bundle bits.
void gc_heap::update_card_table_bundle()
{
    if (card_bundles_enabled())
    {
        // The address of the card word containing the card representing the lowest heap address
        uint8_t* base_address = (uint8_t*)(&card_table[card_word (card_of (lowest_address))]);

        // The address of the card word containing the card representing the highest heap address
        uint8_t* high_address = (uint8_t*)(&card_table[card_word (card_of (highest_address))]);

        uint8_t* saved_base_address = base_address;
        uintptr_t bcount = array_size;
        size_t saved_region_size = align_on_page (high_address) - saved_base_address;

        do
        {
            size_t region_size = align_on_page (high_address) - base_address;

            dprintf (3,("Probing card table pages [%Ix, %Ix[", (size_t)base_address, (size_t)base_address+region_size));
            bool success = GCToOSInterface::GetWriteWatch(false /* resetState */,
                                                          base_address,
                                                          region_size,
                                                          (void**)g_addresses,
                                                          &bcount);
            assert (success && "GetWriteWatch failed!");

            dprintf (3,("Found %d pages written", bcount));
            for (unsigned i = 0; i < bcount; i++)
            {
                // Offset of the dirty page from the start of the card table (clamped to base_address)
                size_t bcardw = (uint32_t*)(max(g_addresses[i],base_address)) - &card_table[0];

                // Offset of the end of the page from the start of the card table (clamped to high addr)
                size_t ecardw = (uint32_t*)(min(g_addresses[i]+OS_PAGE_SIZE, high_address)) - &card_table[0];
                assert (bcardw >= card_word (card_of (g_gc_lowest_address)));

                // Set the card bundle bits representing the dirty card table page
                card_bundles_set (cardw_card_bundle (bcardw), cardw_card_bundle (align_cardw_on_bundle (ecardw)));
                dprintf (3,("Set Card bundle [%Ix, %Ix[", cardw_card_bundle (bcardw), cardw_card_bundle (align_cardw_on_bundle (ecardw))));

                verify_card_bundle_bits_set(bcardw, ecardw);
            }

            if (bcount >= array_size)
            {
                base_address = g_addresses [array_size-1] + OS_PAGE_SIZE;
                bcount = array_size;
            }

        } while ((bcount >= array_size) && (base_address < high_address));

        // Now that we've updated the card bundle bits, reset the write-tracking state.
        GCToOSInterface::ResetWriteWatch (saved_base_address, saved_region_size);
    }
}
#endif //CARD_BUNDLE

// static
void gc_heap::reset_write_watch_for_gc_heap(void* base_address, size_t region_size)
{
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
    SoftwareWriteWatch::ClearDirty(base_address, region_size);
#else // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
    GCToOSInterface::ResetWriteWatch(base_address, region_size);
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
}

// static
void gc_heap::get_write_watch_for_gc_heap(bool reset, void *base_address, size_t region_size, void** dirty_pages, uintptr_t* dirty_page_count_ref, bool is_runtime_suspended)
{
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
    SoftwareWriteWatch::GetDirty(base_address, region_size, dirty_pages, dirty_page_count_ref, reset, is_runtime_suspended);
#else // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
    UNREFERENCED_PARAMETER(is_runtime_suspended);
    bool success = GCToOSInterface::GetWriteWatch(reset, base_address, region_size, dirty_pages, dirty_page_count_ref);
    assert(success);
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
}

const size_t ww_reset_quantum = 128*1024*1024;

inline
void gc_heap::switch_one_quantum()
{
    enable_preemptive ();
    GCToOSInterface::Sleep (1);
    disable_preemptive (true);
}

void gc_heap::reset_ww_by_chunk (uint8_t* start_address, size_t total_reset_size)
{
    size_t reset_size = 0;
    size_t remaining_reset_size = 0;
    size_t next_reset_size = 0;

    while (reset_size != total_reset_size)
    {
        remaining_reset_size = total_reset_size - reset_size;
        next_reset_size = ((remaining_reset_size >= ww_reset_quantum) ? ww_reset_quantum : remaining_reset_size);
        if (next_reset_size)
        {
            reset_write_watch_for_gc_heap(start_address, next_reset_size);
            reset_size += next_reset_size;

            switch_one_quantum();
        }
    }

    assert (reset_size == total_reset_size);
}

// This does a Sleep(1) for every reset ww_reset_quantum bytes of reset
// we do concurrently.
void gc_heap::switch_on_reset (BOOL concurrent_p, size_t* current_total_reset_size, size_t last_reset_size)
{
    if (concurrent_p)
    {
        *current_total_reset_size += last_reset_size;

        dprintf (2, ("reset %Id bytes so far", *current_total_reset_size));

        if (*current_total_reset_size > ww_reset_quantum)
        {
            switch_one_quantum();

            *current_total_reset_size = 0;
        }
    }
}

void gc_heap::reset_write_watch (BOOL concurrent_p)
{
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
    // Software write watch currently requires the runtime to be suspended during reset. See SoftwareWriteWatch::ClearDirty().
    assert(!concurrent_p);
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP

    heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));

    PREFIX_ASSUME(seg != NULL);

    size_t reset_size = 0;
    size_t region_size = 0;

    dprintf (2, ("bgc lowest: %Ix, bgc highest: %Ix", background_saved_lowest_address, background_saved_highest_address));

    while (seg)
    {
        uint8_t* base_address = align_lower_page (heap_segment_mem (seg));
        base_address = max (base_address, background_saved_lowest_address);

        uint8_t* high_address = 0;
        high_address = ((seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg));
        high_address = min (high_address, background_saved_highest_address);

        if (base_address < high_address)
        {
            region_size = high_address - base_address;

#ifdef TIME_WRITE_WATCH
            unsigned int time_start = GetCycleCount32();
#endif //TIME_WRITE_WATCH
            dprintf (3, ("h%d: soh ww: [%Ix(%Id)", heap_number, (size_t)base_address, region_size));
            //reset_ww_by_chunk (base_address, region_size);
            reset_write_watch_for_gc_heap(base_address, region_size);

#ifdef TIME_WRITE_WATCH
            unsigned int time_stop = GetCycleCount32();
            tot_cycles += time_stop - time_start;
            printf ("ResetWriteWatch Duration: %d, total: %d\n",
                    time_stop - time_start, tot_cycles);
#endif //TIME_WRITE_WATCH

            switch_on_reset (concurrent_p, &reset_size, region_size);
        }

        seg = heap_segment_next_rw (seg);

        concurrent_print_time_delta ("CRWW soh");
    }

    //concurrent_print_time_delta ("CRW soh");

    seg = heap_segment_rw (generation_start_segment (large_object_generation));

    PREFIX_ASSUME(seg != NULL);

    while (seg)
    {
        uint8_t* base_address = align_lower_page (heap_segment_mem (seg));
        uint8_t* high_address =  heap_segment_allocated (seg);

        base_address = max (base_address, background_saved_lowest_address);
        high_address = min (high_address, background_saved_highest_address);

        if (base_address < high_address)
        {
            region_size = high_address - base_address;

#ifdef TIME_WRITE_WATCH
            unsigned int time_start = GetCycleCount32();
#endif //TIME_WRITE_WATCH
            dprintf (3, ("h%d: loh ww: [%Ix(%Id)", heap_number, (size_t)base_address, region_size));
            //reset_ww_by_chunk (base_address, region_size);
            reset_write_watch_for_gc_heap(base_address, region_size);

#ifdef TIME_WRITE_WATCH
            unsigned int time_stop = GetCycleCount32();
            tot_cycles += time_stop - time_start;
            printf ("ResetWriteWatch Duration: %d, total: %d\n",
                    time_stop - time_start, tot_cycles);
#endif //TIME_WRITE_WATCH

            switch_on_reset (concurrent_p, &reset_size, region_size);
        }

        seg = heap_segment_next_rw (seg);

        concurrent_print_time_delta ("CRWW loh");
    }

#ifdef DEBUG_WRITE_WATCH
    debug_write_watch = (uint8_t**)~0;
#endif //DEBUG_WRITE_WATCH
}

#endif //WRITE_WATCH

#ifdef BACKGROUND_GC
void gc_heap::restart_vm()
{
    //assert (generation_allocation_pointer (youngest_generation) == 0);
    dprintf (3, ("Restarting EE"));
    STRESS_LOG0(LF_GC, LL_INFO10000, "Concurrent GC: Restarting EE\n");
    ee_proceed_event.Set();
}

inline
void fire_alloc_wait_event (alloc_wait_reason awr, BOOL begin_p)
{
    if (awr != awr_ignored)
    {
        if (begin_p)
        {
            FIRE_EVENT(BGCAllocWaitBegin, awr);
        }
        else
        {
            FIRE_EVENT(BGCAllocWaitEnd, awr);
        }
    }
}


void gc_heap::fire_alloc_wait_event_begin (alloc_wait_reason awr)
{
    fire_alloc_wait_event (awr, TRUE);
}


void gc_heap::fire_alloc_wait_event_end (alloc_wait_reason awr)
{
    fire_alloc_wait_event (awr, FALSE);
}
#endif //BACKGROUND_GC
void gc_heap::make_generation (generation& gen, heap_segment* seg, uint8_t* start, uint8_t* pointer)
{
    gen.allocation_start = start;
    gen.allocation_context.alloc_ptr = pointer;
    gen.allocation_context.alloc_limit = pointer;
    gen.allocation_context.alloc_bytes = 0;
    gen.allocation_context.alloc_bytes_loh = 0;
    gen.allocation_context_start_region = pointer;
    gen.start_segment = seg;
    gen.allocation_segment = seg;
    gen.plan_allocation_start = 0;
    gen.free_list_space = 0;
    gen.pinned_allocated = 0;
    gen.free_list_allocated = 0;
    gen.end_seg_allocated = 0;
    gen.condemned_allocated = 0;
    gen.sweep_allocated = 0;
    gen.free_obj_space = 0;
    gen.allocation_size = 0;
    gen.pinned_allocation_sweep_size = 0;
    gen.pinned_allocation_compact_size = 0;
    gen.allocate_end_seg_p = FALSE;
    gen.free_list_allocator.clear();

#ifdef FREE_USAGE_STATS
    memset (gen.gen_free_spaces, 0, sizeof (gen.gen_free_spaces));
    memset (gen.gen_current_pinned_free_spaces, 0, sizeof (gen.gen_current_pinned_free_spaces));
    memset (gen.gen_plugs, 0, sizeof (gen.gen_plugs));
#endif //FREE_USAGE_STATS
}

void gc_heap::adjust_ephemeral_limits ()
{
    ephemeral_low = generation_allocation_start (generation_of (max_generation - 1));
    ephemeral_high = heap_segment_reserved (ephemeral_heap_segment);

    dprintf (3, ("new ephemeral low: %Ix new ephemeral high: %Ix",
        (size_t)ephemeral_low, (size_t)ephemeral_high))

#ifndef MULTIPLE_HEAPS
    // This updates the write barrier helpers with the new info.
    stomp_write_barrier_ephemeral(ephemeral_low, ephemeral_high);
#endif // MULTIPLE_HEAPS
}

#if defined(TRACE_GC) || defined(GC_CONFIG_DRIVEN)
FILE* CreateLogFile(const GCConfigStringHolder& temp_logfile_name, bool is_config)
{
    FILE* logFile;

    if (!temp_logfile_name.Get())
    {
        return nullptr;
    }

    char logfile_name[MAX_LONGPATH+1];
    uint32_t pid = GCToOSInterface::GetCurrentProcessId();
    const char* suffix = is_config ? ".config.log" : ".log";
    _snprintf_s(logfile_name, MAX_LONGPATH+1, _TRUNCATE, "%s.%d%s", temp_logfile_name.Get(), pid, suffix);
    logFile = fopen(logfile_name, "wb");
    return logFile;
}
#endif //TRACE_GC || GC_CONFIG_DRIVEN

size_t gc_heap::get_segment_size_hard_limit (uint32_t* num_heaps, bool should_adjust_num_heaps)
{
    assert (heap_hard_limit);
    size_t aligned_hard_limit =  align_on_segment_hard_limit (heap_hard_limit);
    if (should_adjust_num_heaps)
    {
        uint32_t max_num_heaps = (uint32_t)(aligned_hard_limit / min_segment_size_hard_limit);
        if (*num_heaps > max_num_heaps)
        {
            *num_heaps = max_num_heaps;
        }
    }

    size_t seg_size = aligned_hard_limit / *num_heaps;
    size_t aligned_seg_size = (use_large_pages_p ? align_on_segment_hard_limit (seg_size) : round_up_power2 (seg_size));

    assert (g_theGCHeap->IsValidSegmentSize (aligned_seg_size));

    size_t seg_size_from_config = (size_t)GCConfig::GetSegmentSize();
    if (seg_size_from_config)
    {
        size_t aligned_seg_size_config = (use_large_pages_p ? align_on_segment_hard_limit (seg_size) : round_up_power2 (seg_size_from_config));

        aligned_seg_size = max (aligned_seg_size, aligned_seg_size_config);
    }

    //printf ("limit: %Idmb, aligned: %Idmb, %d heaps, seg size from config: %Idmb, seg size %Idmb",
    //    (heap_hard_limit / 1024 / 1024),
    //    (aligned_hard_limit / 1024 / 1024),
    //    *num_heaps,
    //    (seg_size_from_config / 1024 / 1024),
    //    (aligned_seg_size / 1024 / 1024));
    return aligned_seg_size;
}

HRESULT gc_heap::initialize_gc (size_t segment_size,
                                size_t heap_size
#ifdef MULTIPLE_HEAPS
                                ,unsigned number_of_heaps
#endif //MULTIPLE_HEAPS
)
{
#ifdef TRACE_GC
    if (GCConfig::GetLogEnabled())
    {
        gc_log = CreateLogFile(GCConfig::GetLogFile(), false);

        if (gc_log == NULL)
            return E_FAIL;

        // GCLogFileSize in MBs.
        gc_log_file_size = static_cast<size_t>(GCConfig::GetLogFileSize());

        if (gc_log_file_size <= 0 || gc_log_file_size > 500)
        {
            fclose (gc_log);
            return E_FAIL;
        }

        gc_log_lock.Initialize();
        gc_log_buffer = new (nothrow) uint8_t [gc_log_buffer_size];
        if (!gc_log_buffer)
        {
            fclose(gc_log);
            return E_FAIL;
        }

        memset (gc_log_buffer, '*', gc_log_buffer_size);

        max_gc_buffers = gc_log_file_size * 1024 * 1024 / gc_log_buffer_size;
    }
#endif // TRACE_GC

#ifdef GC_CONFIG_DRIVEN
    if (GCConfig::GetConfigLogEnabled())
    {
        gc_config_log = CreateLogFile(GCConfig::GetConfigLogFile(), true);

        if (gc_config_log == NULL)
            return E_FAIL;

        gc_config_log_buffer = new (nothrow) uint8_t [gc_config_log_buffer_size];
        if (!gc_config_log_buffer)
        {
            fclose(gc_config_log);
            return E_FAIL;
        }

        compact_ratio = static_cast<int>(GCConfig::GetCompactRatio());

        //         h#  | GC  | gen | C   | EX   | NF  | BF  | ML  | DM  || PreS | PostS | Merge | Conv | Pre | Post | PrPo | PreP | PostP |
        cprintf (("%2s | %6s | %1s | %1s | %2s | %2s | %2s | %2s | %2s || %5s | %5s | %5s | %5s | %5s | %5s | %5s | %5s | %5s |",
                "h#", // heap index
                "GC", // GC index
                "g", // generation
                "C",  // compaction (empty means sweeping), 'M' means it was mandatory, 'W' means it was not
                "EX", // heap expansion
                "NF", // normal fit
                "BF", // best fit (if it indicates neither NF nor BF it means it had to acquire a new seg.
                "ML", // mark list
                "DM", // demotion
                "PreS", // short object before pinned plug
                "PostS", // short object after pinned plug
                "Merge", // merged pinned plugs
                "Conv", // converted to pinned plug
                "Pre", // plug before pinned plug but not after
                "Post", // plug after pinned plug but not before
                "PrPo", // plug both before and after pinned plug
                "PreP", // pre short object padded
                "PostP" // post short object padded
                ));
    }
#endif //GC_CONFIG_DRIVEN

#ifdef GC_STATS
    GCConfigStringHolder logFileName = GCConfig::GetMixLogFile();
    if (logFileName.Get() != nullptr)
    {
        GCStatistics::logFileName = _strdup(logFileName.Get());
        GCStatistics::logFile = fopen(GCStatistics::logFileName, "a");
        if (!GCStatistics::logFile)
        {
            return E_FAIL;
        }
    }
#endif // GC_STATS

    HRESULT hres = S_OK;

#ifdef WRITE_WATCH
    hardware_write_watch_api_supported();
#ifdef BACKGROUND_GC
    if (can_use_write_watch_for_gc_heap() && GCConfig::GetConcurrentGC())
    {
        gc_can_use_concurrent = true;
#ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
        virtual_alloc_hardware_write_watch = true;
#endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
    }
    else
    {
        gc_can_use_concurrent = false;
    }
#endif //BACKGROUND_GC
#endif //WRITE_WATCH

#ifdef BACKGROUND_GC
    // leave the first page to contain only segment info
    // because otherwise we could need to revisit the first page frequently in
    // background GC.
    segment_info_size = OS_PAGE_SIZE;
#else
    segment_info_size = Align (sizeof (heap_segment), get_alignment_constant (FALSE));
#endif //BACKGROUND_GC

    reserved_memory = 0;
#ifdef MULTIPLE_HEAPS
    reserved_memory_limit = (segment_size + heap_size) * number_of_heaps;
#else //MULTIPLE_HEAPS
    reserved_memory_limit = segment_size + heap_size;
    int number_of_heaps = 1;
#endif //MULTIPLE_HEAPS

    if (heap_hard_limit)
    {
        check_commit_cs.Initialize();
    }

    if (!reserve_initial_memory (segment_size,heap_size,number_of_heaps,use_large_pages_p))
        return E_OUTOFMEMORY;

#ifdef CARD_BUNDLE
    //check if we need to turn on card_bundles.
#ifdef MULTIPLE_HEAPS
    // use INT64 arithmetic here because of possible overflow on 32p
    uint64_t th = (uint64_t)MH_TH_CARD_BUNDLE*number_of_heaps;
#else
    // use INT64 arithmetic here because of possible overflow on 32p
    uint64_t th = (uint64_t)SH_TH_CARD_BUNDLE;
#endif //MULTIPLE_HEAPS

    if (can_use_write_watch_for_card_table() && reserved_memory >= th)
    {
        settings.card_bundles = TRUE;
    }
    else
    {
        settings.card_bundles = FALSE;
    }
#endif //CARD_BUNDLE

    settings.first_init();

    int latency_level_from_config = static_cast<int>(GCConfig::GetLatencyLevel());
    if (latency_level_from_config >= latency_level_first && latency_level_from_config <= latency_level_last)
    {
        gc_heap::latency_level = static_cast<gc_latency_level>(latency_level_from_config);
    }

    init_static_data();

    g_gc_card_table = make_card_table (g_gc_lowest_address, g_gc_highest_address);

    if (!g_gc_card_table)
        return E_OUTOFMEMORY;

    gc_started = FALSE;

#ifdef MULTIPLE_HEAPS
    g_heaps = new (nothrow) gc_heap* [number_of_heaps];
    if (!g_heaps)
        return E_OUTOFMEMORY;

#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable:22011) // Suppress PREFast warning about integer underflow/overflow
#endif // _PREFAST_
    g_promoted = new (nothrow) size_t [number_of_heaps*16];
    g_bpromoted = new (nothrow) size_t [number_of_heaps*16];
#ifdef MH_SC_MARK
    g_mark_stack_busy = new (nothrow) int[(number_of_heaps+2)*HS_CACHE_LINE_SIZE/sizeof(int)];
#endif //MH_SC_MARK
#ifdef _PREFAST_
#pragma warning(pop)
#endif // _PREFAST_
    if (!g_promoted || !g_bpromoted)
        return E_OUTOFMEMORY;

#ifdef MH_SC_MARK
    if (!g_mark_stack_busy)
        return E_OUTOFMEMORY;
#endif //MH_SC_MARK

    if (!create_thread_support (number_of_heaps))
        return E_OUTOFMEMORY;

    if (!heap_select::init (number_of_heaps))
        return E_OUTOFMEMORY;

#endif //MULTIPLE_HEAPS

#ifdef MULTIPLE_HEAPS
    yp_spin_count_unit = 32 * number_of_heaps;
#else
    yp_spin_count_unit = 32 * g_num_processors;
#endif //MULTIPLE_HEAPS

#if defined(__linux__)
    GCToEEInterface::UpdateGCEventStatus(static_cast<int>(GCEventStatus::GetEnabledLevel(GCEventProvider_Default)),
                                         static_cast<int>(GCEventStatus::GetEnabledKeywords(GCEventProvider_Default)),
                                         static_cast<int>(GCEventStatus::GetEnabledLevel(GCEventProvider_Private)),
                                         static_cast<int>(GCEventStatus::GetEnabledKeywords(GCEventProvider_Private)));
#endif // __linux__

    if (!init_semi_shared())
    {
        hres = E_FAIL;
    }

    return hres;
}

//Initializes PER_HEAP_ISOLATED data members.
int
gc_heap::init_semi_shared()
{
    int ret = 0;

#ifdef BGC_SERVO_TUNING
    uint32_t current_memory_load = 0;
    uint32_t sweep_flr_goal = 0;
    uint32_t sweep_flr_goal_loh = 0;
#endif //BGC_SERVO_TUNING

    // This is used for heap expansion - it's to fix exactly the start for gen 0
    // through (max_generation-1). When we expand the heap we allocate all these
    // gen starts at the beginning of the new ephemeral seg.
    eph_gen_starts_size = (Align (min_obj_size)) * max_generation;

#ifdef MARK_LIST
#ifdef MULTIPLE_HEAPS
    mark_list_size = min (150*1024, max (8192, soh_segment_size/(2*10*32)));
    g_mark_list = make_mark_list (mark_list_size*n_heaps);

    min_balance_threshold = alloc_quantum_balance_units * CLR_SIZE * 2;
#ifdef PARALLEL_MARK_LIST_SORT
    g_mark_list_copy = make_mark_list (mark_list_size*n_heaps);
    if (!g_mark_list_copy)
    {
        goto cleanup;
    }
#endif //PARALLEL_MARK_LIST_SORT

#else //MULTIPLE_HEAPS

    mark_list_size = max (8192, soh_segment_size/(64*32));
    g_mark_list = make_mark_list (mark_list_size);

#endif //MULTIPLE_HEAPS

    dprintf (3, ("mark_list_size: %d", mark_list_size));

    if (!g_mark_list)
    {
        goto cleanup;
    }
#endif //MARK_LIST

#if defined(SEG_MAPPING_TABLE) && !defined(GROWABLE_SEG_MAPPING_TABLE)
    if (!seg_mapping_table_init())
        goto cleanup;
#endif //SEG_MAPPING_TABLE && !GROWABLE_SEG_MAPPING_TABLE

#if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
    seg_table = sorted_table::make_sorted_table();

    if (!seg_table)
        goto cleanup;
#endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE

    segment_standby_list = 0;

    if (!full_gc_approach_event.CreateManualEventNoThrow(FALSE))
    {
        goto cleanup;
    }
    if (!full_gc_end_event.CreateManualEventNoThrow(FALSE))
    {
        goto cleanup;
    }

    fgn_loh_percent = 0;
    full_gc_approach_event_set = false;

    memset (full_gc_counts, 0, sizeof (full_gc_counts));

    last_gc_index = 0;
    should_expand_in_full_gc = FALSE;

#ifdef FEATURE_LOH_COMPACTION
    loh_compaction_always_p = GCConfig::GetLOHCompactionMode() != 0;
    loh_compaction_mode = loh_compaction_default;
#endif //FEATURE_LOH_COMPACTION

    loh_size_threshold = (size_t)GCConfig::GetLOHThreshold();
    assert (loh_size_threshold >= LARGE_OBJECT_SIZE);

#ifdef BGC_SERVO_TUNING
    memset (bgc_tuning::gen_calc, 0, sizeof (bgc_tuning::gen_calc));
    memset (bgc_tuning::gen_stats, 0, sizeof (bgc_tuning::gen_stats));
    memset (bgc_tuning::current_bgc_end_data, 0, sizeof (bgc_tuning::current_bgc_end_data));

    // for the outer loop - the ML (memory load) loop
    bgc_tuning::enable_fl_tuning = (GCConfig::GetBGCFLTuningEnabled() != 0);
    bgc_tuning::memory_load_goal = (uint32_t)GCConfig::GetBGCMemGoal();
    bgc_tuning::memory_load_goal_slack = (uint32_t)GCConfig::GetBGCMemGoalSlack();
    bgc_tuning::ml_kp = (double)GCConfig::GetBGCMLkp() / 1000.0;
    bgc_tuning::ml_ki = (double)GCConfig::GetBGCMLki() / 1000.0;
    bgc_tuning::ratio_correction_step = (double)GCConfig::GetBGCG2RatioStep() / 100.0;

    // for the inner loop - the alloc loop which calculates the allocated bytes in gen2 before
    // triggering the next BGC.
    bgc_tuning::above_goal_kp = (double)GCConfig::GetBGCFLkp() / 1000000.0;
    bgc_tuning::enable_ki = (GCConfig::GetBGCFLEnableKi() != 0);
    bgc_tuning::above_goal_ki = (double)GCConfig::GetBGCFLki() / 1000000.0;
    bgc_tuning::enable_kd = (GCConfig::GetBGCFLEnableKd() != 0);
    bgc_tuning::above_goal_kd = (double)GCConfig::GetBGCFLkd() / 100.0;
    bgc_tuning::enable_smooth = (GCConfig::GetBGCFLEnableSmooth() != 0);
    bgc_tuning::num_gen1s_smooth_factor = (double)GCConfig::GetBGCFLSmoothFactor() / 100.0;
    bgc_tuning::enable_tbh = (GCConfig::GetBGCFLEnableTBH() != 0);
    bgc_tuning::enable_ff = (GCConfig::GetBGCFLEnableFF() != 0);
    bgc_tuning::above_goal_ff = (double)GCConfig::GetBGCFLff() / 100.0;
    bgc_tuning::enable_gradual_d = (GCConfig::GetBGCFLGradualD() != 0);
    sweep_flr_goal = (uint32_t)GCConfig::GetBGCFLSweepGoal();
    sweep_flr_goal_loh = (uint32_t)GCConfig::GetBGCFLSweepGoalLOH();

    bgc_tuning::gen_calc[0].sweep_flr_goal = ((sweep_flr_goal == 0) ? 20.0 : (double)sweep_flr_goal);
    bgc_tuning::gen_calc[1].sweep_flr_goal = ((sweep_flr_goal_loh == 0) ? 20.0 : (double)sweep_flr_goal_loh);

    bgc_tuning::available_memory_goal = (uint64_t)((double)gc_heap::total_physical_mem * (double)(100 - bgc_tuning::memory_load_goal) / 100);
    get_memory_info (&current_memory_load);

    dprintf (BGC_TUNING_LOG, ("BTL tuning %s!!!",
        (bgc_tuning::enable_fl_tuning ? "enabled" : "disabled")));

#ifdef SIMPLE_DPRINTF
    dprintf (BGC_TUNING_LOG, ("BTL tuning parameters: mem goal: %d%%(%I64d), +/-%d%%, gen2 correction factor: %.2f, sweep flr goal: %d%%, smooth factor: %.3f(%s), TBH: %s, FF: %.3f(%s), ml: kp %.5f, ki %.10f",
        bgc_tuning::memory_load_goal,
        bgc_tuning::available_memory_goal,
        bgc_tuning::memory_load_goal_slack,
        bgc_tuning::ratio_correction_step,
        (int)bgc_tuning::gen_calc[0].sweep_flr_goal,
        bgc_tuning::num_gen1s_smooth_factor,
        (bgc_tuning::enable_smooth ? "enabled" : "disabled"),
        (bgc_tuning::enable_tbh ? "enabled" : "disabled"),
        bgc_tuning::above_goal_ff,
        (bgc_tuning::enable_ff ? "enabled" : "disabled"),
        bgc_tuning::ml_kp,
        bgc_tuning::ml_ki));

    dprintf (BGC_TUNING_LOG, ("BTL tuning parameters: kp: %.5f, ki: %.5f (%s), kd: %.3f (kd-%s, gd-%s), ff: %.3f",
        bgc_tuning::above_goal_kp,
        bgc_tuning::above_goal_ki,
        (bgc_tuning::enable_ki ? "enabled" : "disabled"),
        bgc_tuning::above_goal_kd,
        (bgc_tuning::enable_kd ? "enabled" : "disabled"),
        (bgc_tuning::enable_gradual_d ? "enabled" : "disabled"),
        bgc_tuning::above_goal_ff));
#endif //SIMPLE_DPRINTF

    if (bgc_tuning::enable_fl_tuning && (current_memory_load < bgc_tuning::memory_load_goal))
    {
        uint32_t distance_to_goal = bgc_tuning::memory_load_goal - current_memory_load;
        bgc_tuning::stepping_interval = max (distance_to_goal / 10, 1);
        bgc_tuning::last_stepping_mem_load = current_memory_load;
        bgc_tuning::last_stepping_bgc_count = 0;
        dprintf (BGC_TUNING_LOG, ("current ml: %d, %d to goal, interval: %d",
            current_memory_load, distance_to_goal, bgc_tuning::stepping_interval));
    }
    else
    {
        dprintf (BGC_TUNING_LOG, ("current ml: %d, >= goal: %d, disable stepping",
            current_memory_load, bgc_tuning::memory_load_goal));
        bgc_tuning::use_stepping_trigger_p = false;
    }
#endif //BGC_SERVO_TUNING

#ifdef BACKGROUND_GC
    memset (ephemeral_fgc_counts, 0, sizeof (ephemeral_fgc_counts));
    bgc_alloc_spin_count = static_cast<uint32_t>(GCConfig::GetBGCSpinCount());
    bgc_alloc_spin = static_cast<uint32_t>(GCConfig::GetBGCSpin());

    {
        int number_bgc_threads = 1;
#ifdef MULTIPLE_HEAPS
        number_bgc_threads = n_heaps;
#endif //MULTIPLE_HEAPS
        if (!create_bgc_threads_support (number_bgc_threads))
        {
            goto cleanup;
        }
    }
#endif //BACKGROUND_GC

    memset (&current_no_gc_region_info, 0, sizeof (current_no_gc_region_info));

#ifdef GC_CONFIG_DRIVEN
    compact_or_sweep_gcs[0] = 0;
    compact_or_sweep_gcs[1] = 0;
#endif //GC_CONFIG_DRIVEN

#ifdef SHORT_PLUGS
    short_plugs_pad_ratio = (double)DESIRED_PLUG_LENGTH / (double)(DESIRED_PLUG_LENGTH - Align (min_obj_size));
#endif //SHORT_PLUGS

    ret = 1;

cleanup:

    if (!ret)
    {
        if (full_gc_approach_event.IsValid())
        {
            full_gc_approach_event.CloseEvent();
        }
        if (full_gc_end_event.IsValid())
        {
            full_gc_end_event.CloseEvent();
        }
    }

    return ret;
}

gc_heap* gc_heap::make_gc_heap (
#ifdef MULTIPLE_HEAPS
                                GCHeap* vm_hp,
                                int heap_number
#endif //MULTIPLE_HEAPS
                                )
{
    gc_heap* res = 0;

#ifdef MULTIPLE_HEAPS
    res = new (nothrow) gc_heap;
    if (!res)
        return 0;

    res->vm_heap = vm_hp;
    res->alloc_context_count = 0;

#ifdef MARK_LIST
#ifdef PARALLEL_MARK_LIST_SORT
    res->mark_list_piece_start = new (nothrow) uint8_t**[n_heaps];
    if (!res->mark_list_piece_start)
        return 0;

#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable:22011) // Suppress PREFast warning about integer underflow/overflow
#endif // _PREFAST_
    res->mark_list_piece_end = new (nothrow) uint8_t**[n_heaps + 32]; // +32 is padding to reduce false sharing
#ifdef _PREFAST_
#pragma warning(pop)
#endif // _PREFAST_

    if (!res->mark_list_piece_end)
        return 0;
#endif //PARALLEL_MARK_LIST_SORT
#endif //MARK_LIST


#endif //MULTIPLE_HEAPS

    if (res->init_gc_heap (
#ifdef MULTIPLE_HEAPS
        heap_number
#else  //MULTIPLE_HEAPS
        0
#endif //MULTIPLE_HEAPS
        )==0)
    {
        return 0;
    }

#ifdef MULTIPLE_HEAPS
    return res;
#else
    return (gc_heap*)1;
#endif //MULTIPLE_HEAPS
}

uint32_t
gc_heap::wait_for_gc_done(int32_t timeOut)
{
    bool cooperative_mode = enable_preemptive ();

    uint32_t dwWaitResult = NOERROR;

    gc_heap* wait_heap = NULL;
    while (gc_heap::gc_started)
    {
#ifdef MULTIPLE_HEAPS
        wait_heap = GCHeap::GetHeap(heap_select::select_heap(NULL))->pGenGCHeap;
        dprintf(2, ("waiting for the gc_done_event on heap %d", wait_heap->heap_number));
#endif // MULTIPLE_HEAPS

#ifdef _PREFAST_
        PREFIX_ASSUME(wait_heap != NULL);
#endif // _PREFAST_

        dwWaitResult = wait_heap->gc_done_event.Wait(timeOut, FALSE);
    }
    disable_preemptive (cooperative_mode);

    return dwWaitResult;
}

void
gc_heap::set_gc_done()
{
    enter_gc_done_event_lock();
    if (!gc_done_event_set)
    {
        gc_done_event_set = true;
        dprintf (2, ("heap %d: setting gc_done_event", heap_number));
        gc_done_event.Set();
    }
    exit_gc_done_event_lock();
}

void
gc_heap::reset_gc_done()
{
    enter_gc_done_event_lock();
    if (gc_done_event_set)
    {
        gc_done_event_set = false;
        dprintf (2, ("heap %d: resetting gc_done_event", heap_number));
        gc_done_event.Reset();
    }
    exit_gc_done_event_lock();
}

void
gc_heap::enter_gc_done_event_lock()
{
    uint32_t dwSwitchCount = 0;
retry:

    if (Interlocked::CompareExchange(&gc_done_event_lock, 0, -1) >= 0)
    {
        while (gc_done_event_lock >= 0)
        {
            if  (g_num_processors > 1)
            {
                int spin_count = yp_spin_count_unit;
                for (int j = 0; j < spin_count; j++)
                {
                    if  (gc_done_event_lock < 0)
                        break;
                    YieldProcessor();           // indicate to the processor that we are spinning
                }
                if  (gc_done_event_lock >= 0)
                    GCToOSInterface::YieldThread(++dwSwitchCount);
            }
            else
                GCToOSInterface::YieldThread(++dwSwitchCount);
        }
        goto retry;
    }
}

void
gc_heap::exit_gc_done_event_lock()
{
    gc_done_event_lock = -1;
}

#ifndef MULTIPLE_HEAPS

#ifdef RECORD_LOH_STATE
int gc_heap::loh_state_index = 0;
gc_heap::loh_state_info gc_heap::last_loh_states[max_saved_loh_states];
#endif //RECORD_LOH_STATE

VOLATILE(int32_t) gc_heap::gc_done_event_lock;
VOLATILE(bool) gc_heap::gc_done_event_set;
GCEvent gc_heap::gc_done_event;
#endif //!MULTIPLE_HEAPS
VOLATILE(bool) gc_heap::internal_gc_done;

void gc_heap::add_saved_spinlock_info (
            bool loh_p,
            msl_enter_state enter_state,
            msl_take_state take_state)

{
#ifdef SPINLOCK_HISTORY
    spinlock_info* current = &last_spinlock_info[spinlock_info_index];

    current->enter_state = enter_state;
    current->take_state = take_state;
    current->thread_id.SetToCurrentThread();
    current->loh_p = loh_p;
    dprintf (SPINLOCK_LOG, ("[%d]%s %s %s",
        heap_number,
        (loh_p ? "loh" : "soh"),
        ((enter_state == me_acquire) ? "E" : "L"),
        msl_take_state_str[take_state]));

    spinlock_info_index++;

    assert (spinlock_info_index <= max_saved_spinlock_info);

    if (spinlock_info_index >= max_saved_spinlock_info)
    {
        spinlock_info_index = 0;
    }
#else
    MAYBE_UNUSED_VAR(enter_state);
    MAYBE_UNUSED_VAR(take_state);
#endif //SPINLOCK_HISTORY
}

int
gc_heap::init_gc_heap (int  h_number)
{
#ifdef MULTIPLE_HEAPS

    time_bgc_last = 0;

    allocated_since_last_gc = 0;

#ifdef SPINLOCK_HISTORY
    spinlock_info_index = 0;
    memset (last_spinlock_info, 0, sizeof(last_spinlock_info));
#endif //SPINLOCK_HISTORY

    // initialize per heap members.
    ephemeral_low = (uint8_t*)1;

    ephemeral_high = MAX_PTR;

    ephemeral_heap_segment = 0;

    oomhist_index_per_heap = 0;

    freeable_large_heap_segment = 0;

    condemned_generation_num = 0;

    blocking_collection = FALSE;

    generation_skip_ratio = 100;

    mark_stack_tos = 0;

    mark_stack_bos = 0;

    mark_stack_array_length = 0;

    mark_stack_array = 0;

#if defined (_DEBUG) && defined (VERIFY_HEAP)
    verify_pinned_queue_p = FALSE;
#endif // _DEBUG && VERIFY_HEAP

    loh_pinned_queue_tos = 0;

    loh_pinned_queue_bos = 0;

    loh_pinned_queue_length = 0;

    loh_pinned_queue_decay = LOH_PIN_DECAY;

    loh_pinned_queue = 0;

    min_overflow_address = MAX_PTR;

    max_overflow_address = 0;

    gen0_bricks_cleared = FALSE;

    gen0_must_clear_bricks = 0;

    allocation_quantum = CLR_SIZE;

    more_space_lock_soh = gc_lock;

    more_space_lock_loh = gc_lock;

    ro_segments_in_range = FALSE;

    loh_alloc_since_cg = 0;

    new_heap_segment = NULL;

    gen0_allocated_after_gc_p = false;

#ifdef RECORD_LOH_STATE
    loh_state_index = 0;
#endif //RECORD_LOH_STATE
#endif //MULTIPLE_HEAPS

#ifdef MULTIPLE_HEAPS
    if (h_number > n_heaps)
    {
        assert (!"Number of heaps exceeded");
        return 0;
    }

    heap_number = h_number;
#endif //MULTIPLE_HEAPS

    memset (&oom_info, 0, sizeof (oom_info));
    memset (&fgm_result, 0, sizeof (fgm_result));
    if (!gc_done_event.CreateManualEventNoThrow(FALSE))
    {
        return 0;
    }
    gc_done_event_lock = -1;
    gc_done_event_set = false;

#ifndef SEG_MAPPING_TABLE
    if (!gc_heap::seg_table->ensure_space_for_insert ())
    {
        return 0;
    }
#endif //!SEG_MAPPING_TABLE

    heap_segment* seg = get_initial_segment (soh_segment_size, h_number);
    if (!seg)
        return 0;

    FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(seg),
                              (size_t)(heap_segment_reserved (seg) - heap_segment_mem(seg)),
                              gc_etw_segment_small_object_heap);

#ifdef SEG_MAPPING_TABLE
    seg_mapping_table_add_segment (seg, __this);
#else //SEG_MAPPING_TABLE
    seg_table->insert ((uint8_t*)seg, sdelta);
#endif //SEG_MAPPING_TABLE

#ifdef MULTIPLE_HEAPS
    heap_segment_heap (seg) = this;
#endif //MULTIPLE_HEAPS

    /* todo: Need a global lock for this */
    uint32_t* ct = &g_gc_card_table [card_word (card_of (g_gc_lowest_address))];
    own_card_table (ct);
    card_table = translate_card_table (ct);
    /* End of global lock */

    brick_table = card_table_brick_table (ct);
    highest_address = card_table_highest_address (ct);
    lowest_address = card_table_lowest_address (ct);

#ifdef CARD_BUNDLE
    card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct), g_gc_lowest_address);
    assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] ==
            card_table_card_bundle_table (ct));
#endif //CARD_BUNDLE

#ifdef MARK_ARRAY
    if (gc_can_use_concurrent)
        mark_array = translate_mark_array (card_table_mark_array (&g_gc_card_table[card_word (card_of (g_gc_lowest_address))]));
    else
        mark_array = NULL;
#endif //MARK_ARRAY

    uint8_t*  start = heap_segment_mem (seg);

    for (int i = 0; i < 1 + max_generation; i++)
    {
        make_generation (generation_table [ (max_generation - i) ],
                         seg, start, 0);
        generation_table [(max_generation - i)].gen_num = max_generation - i;
        start += Align (min_obj_size);
    }

    heap_segment_allocated (seg) = start;
    alloc_allocated = start;
    heap_segment_used (seg) = start - plug_skew;

    ephemeral_heap_segment = seg;

#ifndef SEG_MAPPING_TABLE
    if (!gc_heap::seg_table->ensure_space_for_insert ())
    {
        return 0;
    }
#endif //!SEG_MAPPING_TABLE
    //Create the large segment generation
    heap_segment* lseg = get_initial_segment(min_loh_segment_size, h_number);
    if (!lseg)
        return 0;
    lseg->flags |= heap_segment_flags_loh;

    FIRE_EVENT(GCCreateSegment_V1, heap_segment_mem(lseg),
                              (size_t)(heap_segment_reserved (lseg) - heap_segment_mem(lseg)),
                              gc_etw_segment_large_object_heap);

#ifdef SEG_MAPPING_TABLE
    seg_mapping_table_add_segment (lseg, __this);
#else //SEG_MAPPING_TABLE
    seg_table->insert ((uint8_t*)lseg, sdelta);
#endif //SEG_MAPPING_TABLE

    generation_table [max_generation].free_list_allocator = allocator(NUM_GEN2_ALIST, BASE_GEN2_ALIST, gen2_alloc_list);
    //assign the alloc_list for the large generation
    generation_table [max_generation+1].free_list_allocator = allocator(NUM_LOH_ALIST, BASE_LOH_ALIST, loh_alloc_list);
    generation_table [max_generation+1].gen_num = max_generation+1;
    make_generation (generation_table [max_generation+1],lseg, heap_segment_mem (lseg), 0);
    heap_segment_allocated (lseg) = heap_segment_mem (lseg) + Align (min_obj_size, get_alignment_constant (FALSE));
    heap_segment_used (lseg) = heap_segment_allocated (lseg) - plug_skew;

    for (int gen_num = 0; gen_num <= 1 + max_generation; gen_num++)
    {
        generation*  gen = generation_of (gen_num);
        make_unused_array (generation_allocation_start (gen), Align (min_obj_size));
    }

#ifdef MULTIPLE_HEAPS
    heap_segment_heap (lseg) = this;

    //initialize the alloc context heap
    generation_alloc_context (generation_of (0))->set_alloc_heap(vm_heap);

    //initialize the alloc context heap
    generation_alloc_context (generation_of (max_generation+1))->set_alloc_heap(vm_heap);

#endif //MULTIPLE_HEAPS

    //Do this only once
#ifdef MULTIPLE_HEAPS
    if (h_number == 0)
#endif //MULTIPLE_HEAPS
    {
#ifndef INTERIOR_POINTERS
        //set the brick_table for large objects
        //but default value is cleared
        //clear_brick_table ((uint8_t*)heap_segment_mem (lseg),
        //                   (uint8_t*)heap_segment_reserved (lseg));

#else //INTERIOR_POINTERS

        //Because of the interior pointer business, we have to clear
        //the whole brick table
        //but the default value is cleared
        // clear_brick_table (lowest_address, highest_address);
#endif //INTERIOR_POINTERS
    }

    if (!init_dynamic_data())
    {
        return 0;
    }

    etw_allocation_running_amount[0] = 0;
    etw_allocation_running_amount[1] = 0;
    total_alloc_bytes_soh = 0;
    total_alloc_bytes_loh = 0;

    //needs to be done after the dynamic data has been initialized
#ifndef MULTIPLE_HEAPS
    allocation_running_amount = dd_min_size (dynamic_data_of (0));
#endif //!MULTIPLE_HEAPS

    fgn_maxgen_percent = 0;
    fgn_last_alloc = dd_min_size (dynamic_data_of (0));

    mark* arr = new (nothrow) (mark [MARK_STACK_INITIAL_LENGTH]);
    if (!arr)
        return 0;

    make_mark_stack(arr);

#ifdef BACKGROUND_GC
#ifdef BGC_SERVO_TUNING
    loh_a_no_bgc = 0;
    loh_a_bgc_marking = 0;
    loh_a_bgc_planning = 0;
    bgc_maxgen_end_fl_size = 0;
#endif //BGC_SERVO_TUNING
    freeable_small_heap_segment = 0;
    gchist_index_per_heap = 0;
    uint8_t** b_arr = new (nothrow) (uint8_t* [MARK_STACK_INITIAL_LENGTH]);
    if (!b_arr)
        return 0;

    make_background_mark_stack (b_arr);
#endif //BACKGROUND_GC

    ephemeral_low = generation_allocation_start(generation_of(max_generation - 1));
    ephemeral_high = heap_segment_reserved(ephemeral_heap_segment);
    if (heap_number == 0)
    {
        stomp_write_barrier_initialize(
#ifdef MULTIPLE_HEAPS
            reinterpret_cast<uint8_t*>(1), reinterpret_cast<uint8_t*>(~0)
#else
            ephemeral_low, ephemeral_high
#endif //!MULTIPLE_HEAPS
        );
    }

#ifdef MARK_ARRAY
    // why would we clear the mark array for this page? it should be cleared..
    // clear the first committed page
    //if(gc_can_use_concurrent)
    //{
    //    clear_mark_array (align_lower_page (heap_segment_mem (seg)), heap_segment_committed (seg));
    //}
#endif //MARK_ARRAY

#ifdef MULTIPLE_HEAPS
    get_proc_and_numa_for_heap (heap_number);
    if (!create_gc_thread ())
        return 0;

    g_heaps [heap_number] = this;

#endif //MULTIPLE_HEAPS

#ifdef FEATURE_PREMORTEM_FINALIZATION
    HRESULT hr = AllocateCFinalize(&finalize_queue);
    if (FAILED(hr))
        return 0;
#endif // FEATURE_PREMORTEM_FINALIZATION

    max_free_space_items = MAX_NUM_FREE_SPACES;

    bestfit_seg = new (nothrow) seg_free_spaces (heap_number);

    if (!bestfit_seg)
    {
        return 0;
    }

    if (!bestfit_seg->alloc())
    {
        return 0;
    }

    last_gc_before_oom = FALSE;

    sufficient_gen0_space_p = FALSE;

#ifdef MULTIPLE_HEAPS

#ifdef HEAP_ANALYZE

    heap_analyze_success = TRUE;

    internal_root_array  = 0;

    internal_root_array_index = 0;

    internal_root_array_length = initial_internal_roots;

    current_obj          = 0;

    current_obj_size     = 0;

#endif //HEAP_ANALYZE

#endif // MULTIPLE_HEAPS

#ifdef BACKGROUND_GC
    bgc_thread_id.Clear();

    if (!create_bgc_thread_support())
    {
        return 0;
    }

    bgc_alloc_lock = new (nothrow) exclusive_sync;
    if (!bgc_alloc_lock)
    {
        return 0;
    }

    bgc_alloc_lock->init();

    if (h_number == 0)
    {
        if (!recursive_gc_sync::init())
            return 0;
    }

    bgc_thread_running = 0;
    bgc_thread = 0;
    bgc_threads_timeout_cs.Initialize();
    expanded_in_fgc = 0;
    current_bgc_state = bgc_not_in_process;
    background_soh_alloc_count = 0;
    background_loh_alloc_count = 0;
    bgc_overflow_count = 0;
    end_loh_size = dd_min_size (dynamic_data_of (max_generation + 1));
#endif //BACKGROUND_GC

#ifdef GC_CONFIG_DRIVEN
    memset (interesting_data_per_heap, 0, sizeof (interesting_data_per_heap));
    memset(compact_reasons_per_heap, 0, sizeof (compact_reasons_per_heap));
    memset(expand_mechanisms_per_heap, 0, sizeof (expand_mechanisms_per_heap));
    memset(interesting_mechanism_bits_per_heap, 0, sizeof (interesting_mechanism_bits_per_heap));
#endif //GC_CONFIG_DRIVEN

    return 1;
}

void
gc_heap::destroy_semi_shared()
{
//TODO: will need to move this to per heap
//#ifdef BACKGROUND_GC
//    if (c_mark_list)
//        delete c_mark_list;
//#endif //BACKGROUND_GC

#ifdef MARK_LIST
    if (g_mark_list)
        delete g_mark_list;
#endif //MARK_LIST

#if defined(SEG_MAPPING_TABLE) && !defined(GROWABLE_SEG_MAPPING_TABLE)
    if (seg_mapping_table)
        delete seg_mapping_table;
#endif //SEG_MAPPING_TABLE && !GROWABLE_SEG_MAPPING_TABLE

#if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE)
    //destroy the segment map
    seg_table->delete_sorted_table();
#endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE
}

void
gc_heap::self_destroy()
{
#ifdef BACKGROUND_GC
    kill_gc_thread();
#endif //BACKGROUND_GC

    if (gc_done_event.IsValid())
    {
        gc_done_event.CloseEvent();
    }

    // destroy every segment.
    heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));

    PREFIX_ASSUME(seg != NULL);

    heap_segment* next_seg;
    while (seg)
    {
        next_seg = heap_segment_next_rw (seg);
        delete_heap_segment (seg);
        seg = next_seg;
    }

    seg = heap_segment_rw (generation_start_segment (generation_of (max_generation+1)));

    PREFIX_ASSUME(seg != NULL);

    while (seg)
    {
        next_seg = heap_segment_next_rw (seg);
        delete_heap_segment (seg);
        seg = next_seg;
    }

    // get rid of the card table
    release_card_table (card_table);

    // destroy the mark stack
    delete mark_stack_array;

#ifdef FEATURE_PREMORTEM_FINALIZATION
    if (finalize_queue)
        delete finalize_queue;
#endif // FEATURE_PREMORTEM_FINALIZATION
}

void
gc_heap::destroy_gc_heap(gc_heap* heap)
{
    heap->self_destroy();
    delete heap;
}

// Destroys resources owned by gc. It is assumed that a last GC has been performed and that
// the finalizer queue has been drained.
void gc_heap::shutdown_gc()
{
    destroy_semi_shared();

#ifdef MULTIPLE_HEAPS
    //delete the heaps array
    delete g_heaps;
    destroy_thread_support();
    n_heaps = 0;
#endif //MULTIPLE_HEAPS
    //destroy seg_manager

    destroy_initial_memory();

    GCToOSInterface::Shutdown();
}

inline
BOOL gc_heap::size_fit_p (size_t size REQD_ALIGN_AND_OFFSET_DCL, uint8_t* alloc_pointer, uint8_t* alloc_limit,
                          uint8_t* old_loc, int use_padding)
{
    BOOL already_padded = FALSE;
#ifdef SHORT_PLUGS
    if ((old_loc != 0) && (use_padding & USE_PADDING_FRONT))
    {
        alloc_pointer = alloc_pointer + Align (min_obj_size);
        already_padded = TRUE;
    }
#endif //SHORT_PLUGS

    if (!((old_loc == 0) || same_large_alignment_p (old_loc, alloc_pointer)))
        size = size + switch_alignment_size (already_padded);

#ifdef FEATURE_STRUCTALIGN
    alloc_pointer = StructAlign(alloc_pointer, requiredAlignment, alignmentOffset);
#endif // FEATURE_STRUCTALIGN

    // in allocate_in_condemned_generation we can have this when we
    // set the alloc_limit to plan_allocated which could be less than
    // alloc_ptr
    if (alloc_limit < alloc_pointer)
    {
        return FALSE;
    }

    if (old_loc != 0)
    {
        return (((size_t)(alloc_limit - alloc_pointer) >= (size + ((use_padding & USE_PADDING_TAIL)? Align(min_obj_size) : 0)))
#ifdef SHORT_PLUGS
                ||((!(use_padding & USE_PADDING_FRONT)) && ((alloc_pointer + size) == alloc_limit))
#else //SHORT_PLUGS
                ||((alloc_pointer + size) == alloc_limit)
#endif //SHORT_PLUGS
            );
    }
    else
    {
        assert (size == Align (min_obj_size));
        return ((size_t)(alloc_limit - alloc_pointer) >= size);
    }
}

inline
BOOL gc_heap::a_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit,
                            int align_const)
{
    // We could have run into cases where this is true when alloc_allocated is the
    // the same as the seg committed.
    if (alloc_limit < alloc_pointer)
    {
        return FALSE;
    }

    return ((size_t)(alloc_limit - alloc_pointer) >= (size + Align(min_obj_size, align_const)));
}

// Grow by committing more pages
BOOL gc_heap::grow_heap_segment (heap_segment* seg, uint8_t* high_address, bool* hard_limit_exceeded_p)
{
    assert (high_address <= heap_segment_reserved (seg));

    if (hard_limit_exceeded_p)
        *hard_limit_exceeded_p = false;

    //return 0 if we are at the end of the segment.
    if (align_on_page (high_address) > heap_segment_reserved (seg))
        return FALSE;

    if (high_address <= heap_segment_committed (seg))
        return TRUE;

    size_t c_size = align_on_page ((size_t)(high_address - heap_segment_committed (seg)));
    c_size = max (c_size, commit_min_th);
    c_size = min (c_size, (size_t)(heap_segment_reserved (seg) - heap_segment_committed (seg)));

    if (c_size == 0)
        return FALSE;

    STRESS_LOG2(LF_GC, LL_INFO10000,
                "Growing heap_segment: %Ix high address: %Ix\n",
                (size_t)seg, (size_t)high_address);

    bool ret = virtual_commit (heap_segment_committed (seg), c_size, heap_number, hard_limit_exceeded_p);
    if (ret)
    {
#ifdef MARK_ARRAY
#ifndef BACKGROUND_GC
        clear_mark_array (heap_segment_committed (seg),
                        heap_segment_committed (seg)+c_size, TRUE);
#endif //BACKGROUND_GC
#endif //MARK_ARRAY
        heap_segment_committed (seg) += c_size;

        STRESS_LOG1(LF_GC, LL_INFO10000, "New commit: %Ix\n",
                    (size_t)heap_segment_committed (seg));

        assert (heap_segment_committed (seg) <= heap_segment_reserved (seg));
        assert (high_address <= heap_segment_committed (seg));
    }

    return !!ret;
}

inline
int gc_heap::grow_heap_segment (heap_segment* seg, uint8_t* allocated, uint8_t* old_loc, size_t size, BOOL pad_front_p  REQD_ALIGN_AND_OFFSET_DCL)
{
#ifdef SHORT_PLUGS
    if ((old_loc != 0) && pad_front_p)
    {
        allocated = allocated + Align (min_obj_size);
    }
#endif //SHORT_PLUGS

    if (!((old_loc == 0) || same_large_alignment_p (old_loc, allocated)))
        size = size + switch_alignment_size (FALSE);
#ifdef FEATURE_STRUCTALIGN
    size_t pad = ComputeStructAlignPad(allocated, requiredAlignment, alignmentOffset);
    return grow_heap_segment (seg, allocated + pad + size);
#else // FEATURE_STRUCTALIGN
    return grow_heap_segment (seg, allocated + size);
#endif // FEATURE_STRUCTALIGN
}

//used only in older generation allocation (i.e during gc).
void gc_heap::adjust_limit (uint8_t* start, size_t limit_size, generation* gen,
                            int gennum)
{
    UNREFERENCED_PARAMETER(gennum);
    dprintf (3, ("gc Expanding segment allocation"));
    heap_segment* seg = generation_allocation_segment (gen);
    if ((generation_allocation_limit (gen) != start) || (start != heap_segment_plan_allocated (seg)))
    {
        if (generation_allocation_limit (gen) == heap_segment_plan_allocated (seg))
        {
            assert (generation_allocation_pointer (gen) >= heap_segment_mem (seg));
            assert (generation_allocation_pointer (gen) <= heap_segment_committed (seg));
            heap_segment_plan_allocated (generation_allocation_segment (gen)) = generation_allocation_pointer (gen);
        }
        else
        {
            uint8_t*  hole = generation_allocation_pointer (gen);
            size_t  size = (generation_allocation_limit (gen) - generation_allocation_pointer (gen));

            if (size != 0)
            {
                dprintf (3, ("filling up hole: %Ix, size %Ix", hole, size));
                size_t allocated_size = generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen);
                if (size >= Align (min_free_list))
                {
                    if (allocated_size < min_free_list)
                    {
                        if (size >= (Align (min_free_list) + Align (min_obj_size)))
                        {
                            //split hole into min obj + threadable free item
                            make_unused_array (hole, min_obj_size);
                            generation_free_obj_space (gen) += Align (min_obj_size);
                            make_unused_array (hole + Align (min_obj_size), size - Align (min_obj_size));
                            generation_free_list_space (gen) += size - Align (min_obj_size);
                            generation_allocator(gen)->thread_item_front (hole + Align (min_obj_size),
                                                                          size - Align (min_obj_size));
                            add_gen_free (gen->gen_num, (size - Align (min_obj_size)));
                        }
                        else
                        {
                            dprintf (3, ("allocated size too small, can't put back rest on free list %Ix", allocated_size));
                            make_unused_array (hole, size);
                            generation_free_obj_space (gen) += size;
                        }
                    }
                    else
                    {
                        dprintf (3, ("threading hole in front of free list"));
                        make_unused_array (hole, size);
                        generation_free_list_space (gen) += size;
                        generation_allocator(gen)->thread_item_front (hole, size);
                        add_gen_free (gen->gen_num, size);
                    }
                }
                else
                {
                    make_unused_array (hole, size);
                    generation_free_obj_space (gen) += size;
                }
            }
        }
        generation_allocation_pointer (gen) = start;
        generation_allocation_context_start_region (gen) = start;
    }
    generation_allocation_limit (gen) = (start + limit_size);
}

void verify_mem_cleared (uint8_t* start, size_t size)
{
    if (!Aligned (size))
    {
        FATAL_GC_ERROR();
    }

    PTR_PTR curr_ptr = (PTR_PTR) start;
    for (size_t i = 0; i < size / sizeof(PTR_PTR); i++)
    {
        if (*(curr_ptr++) != 0)
        {
            FATAL_GC_ERROR();
        }
    }
}

#if defined (VERIFY_HEAP) && defined (BACKGROUND_GC)
void gc_heap::set_batch_mark_array_bits (uint8_t* start, uint8_t* end)
{
    size_t start_mark_bit = mark_bit_of (start);
    size_t end_mark_bit = mark_bit_of (end);
    unsigned int startbit = mark_bit_bit (start_mark_bit);
    unsigned int endbit = mark_bit_bit (end_mark_bit);
    size_t startwrd = mark_bit_word (start_mark_bit);
    size_t endwrd = mark_bit_word (end_mark_bit);

    dprintf (3, ("Setting all mark array bits between [%Ix:%Ix-[%Ix:%Ix",
        (size_t)start, (size_t)start_mark_bit,
        (size_t)end, (size_t)end_mark_bit));

    unsigned int firstwrd = ~(lowbits (~0, startbit));
    unsigned int lastwrd = ~(highbits (~0, endbit));

    if (startwrd == endwrd)
    {
        unsigned int wrd = firstwrd & lastwrd;
        mark_array[startwrd] |= wrd;
        return;
    }

    // set the first mark word.
    if (startbit)
    {
        mark_array[startwrd] |= firstwrd;
        startwrd++;
    }

    for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++)
    {
        mark_array[wrdtmp] = ~(unsigned int)0;
    }

    // set the last mark word.
    if (endbit)
    {
        mark_array[endwrd] |= lastwrd;
    }
}

// makes sure that the mark array bits between start and end are 0.
void gc_heap::check_batch_mark_array_bits (uint8_t* start, uint8_t* end)
{
    size_t start_mark_bit = mark_bit_of (start);
    size_t end_mark_bit = mark_bit_of (end);
    unsigned int startbit = mark_bit_bit (start_mark_bit);
    unsigned int endbit = mark_bit_bit (end_mark_bit);
    size_t startwrd = mark_bit_word (start_mark_bit);
    size_t endwrd = mark_bit_word (end_mark_bit);

    //dprintf (3, ("Setting all mark array bits between [%Ix:%Ix-[%Ix:%Ix",
    //    (size_t)start, (size_t)start_mark_bit,
    //    (size_t)end, (size_t)end_mark_bit));

    unsigned int firstwrd = ~(lowbits (~0, startbit));
    unsigned int lastwrd = ~(highbits (~0, endbit));

    if (startwrd == endwrd)
    {
        unsigned int wrd = firstwrd & lastwrd;
        if (mark_array[startwrd] & wrd)
        {
            dprintf  (3, ("The %Ix portion of mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
                            wrd, startwrd,
                            mark_array [startwrd], mark_word_address (startwrd)));
            FATAL_GC_ERROR();
        }
        return;
    }

    // set the first mark word.
    if (startbit)
    {
        if (mark_array[startwrd] & firstwrd)
        {
            dprintf  (3, ("The %Ix portion of mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
                            firstwrd, startwrd,
                            mark_array [startwrd], mark_word_address (startwrd)));
            FATAL_GC_ERROR();
        }

        startwrd++;
    }

    for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++)
    {
        if (mark_array[wrdtmp])
        {
            dprintf  (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
                            wrdtmp,
                            mark_array [wrdtmp], mark_word_address (wrdtmp)));
            FATAL_GC_ERROR();
        }
    }

    // set the last mark word.
    if (endbit)
    {
        if (mark_array[endwrd] & lastwrd)
        {
            dprintf  (3, ("The %Ix portion of mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
                            lastwrd, lastwrd,
                            mark_array [lastwrd], mark_word_address (lastwrd)));
            FATAL_GC_ERROR();
        }
    }
}
#endif //VERIFY_HEAP && BACKGROUND_GC

allocator::allocator (unsigned int num_b, size_t fbs, alloc_list* b)
{
    assert (num_b < MAX_BUCKET_COUNT);
    num_buckets = num_b;
    frst_bucket_size = fbs;
    buckets = b;
}

alloc_list& allocator::alloc_list_of (unsigned int bn)
{
    assert (bn < num_buckets);
    if (bn == 0)
        return first_bucket;
    else
        return buckets [bn-1];
}

size_t& allocator::alloc_list_damage_count_of (unsigned int bn)
{
    assert (bn < num_buckets);
    if (bn == 0)
        return first_bucket.alloc_list_damage_count();
    else
        return buckets [bn-1].alloc_list_damage_count();
}

void allocator::unlink_item (unsigned int bn, uint8_t* item, uint8_t* prev_item, BOOL use_undo_p)
{
    //unlink the free_item
    alloc_list* al = &alloc_list_of (bn);
    if (prev_item)
    {
        if (use_undo_p && (free_list_undo (prev_item) == UNDO_EMPTY))
        {
            assert (item == free_list_slot (prev_item));
            free_list_undo (prev_item) = item;
            alloc_list_damage_count_of (bn)++;
        }
        free_list_slot (prev_item) = free_list_slot(item);
    }
    else
    {
        al->alloc_list_head() = (uint8_t*)free_list_slot(item);
    }
    if (al->alloc_list_tail() == item)
    {
        al->alloc_list_tail() = prev_item;
    }
}

void allocator::clear()
{
    for (unsigned int i = 0; i < num_buckets; i++)
    {
        alloc_list_head_of (i) = 0;
        alloc_list_tail_of (i) = 0;
    }
}

//always thread to the end.
void allocator::thread_free_item (uint8_t* item, uint8_t*& head, uint8_t*& tail)
{
    free_list_slot (item) = 0;
    free_list_undo (item) = UNDO_EMPTY;
    assert (item != head);

    if (head == 0)
    {
       head = item;
    }
    //TODO: This shouldn't happen anymore - verify that's the case.
    //the following is necessary because the last free element
    //may have been truncated, and tail isn't updated.
    else if (free_list_slot (head) == 0)
    {
        free_list_slot (head) = item;
    }
    else
    {
        assert (item != tail);
        assert (free_list_slot(tail) == 0);
        free_list_slot (tail) = item;
    }
    tail = item;
}

void allocator::thread_item (uint8_t* item, size_t size)
{
    size_t sz = frst_bucket_size;
    unsigned int a_l_number = 0;

    for (; a_l_number < (num_buckets-1); a_l_number++)
    {
        if (size < sz)
        {
            break;
        }
        sz = sz * 2;
    }
    alloc_list* al = &alloc_list_of (a_l_number);
    thread_free_item (item,
                      al->alloc_list_head(),
                      al->alloc_list_tail());
}

void allocator::thread_item_front (uint8_t* item, size_t size)
{
    //find right free list
    size_t sz = frst_bucket_size;
    unsigned int a_l_number = 0;
    for (; a_l_number < (num_buckets-1); a_l_number++)
    {
        if (size < sz)
        {
            break;
        }
        sz = sz * 2;
    }
    alloc_list* al = &alloc_list_of (a_l_number);
    free_list_slot (item) = al->alloc_list_head();
    free_list_undo (item) = UNDO_EMPTY;

    if (al->alloc_list_tail() == 0)
    {
        al->alloc_list_tail() = al->alloc_list_head();
    }
    al->alloc_list_head() = item;
    if (al->alloc_list_tail() == 0)
    {
        al->alloc_list_tail() = item;
    }
}

void allocator::copy_to_alloc_list (alloc_list* toalist)
{
    for (unsigned int i = 0; i < num_buckets; i++)
    {
        toalist [i] = alloc_list_of (i);
#ifdef FL_VERIFICATION
        uint8_t* free_item = alloc_list_head_of (i);
        size_t count = 0;
        while (free_item)
        {
            count++;
            free_item = free_list_slot (free_item);
        }

        toalist[i].item_count = count;
#endif //FL_VERIFICATION
    }
}

void allocator::copy_from_alloc_list (alloc_list* fromalist)
{
    BOOL repair_list = !discard_if_no_fit_p ();
    for (unsigned int i = 0; i < num_buckets; i++)
    {
        size_t count = alloc_list_damage_count_of (i);
        alloc_list_of (i) = fromalist [i];
        assert (alloc_list_damage_count_of (i) == 0);

        if (repair_list)
        {
            //repair the the list
            //new items may have been added during the plan phase
            //items may have been unlinked.
            uint8_t* free_item = alloc_list_head_of (i);
            while (free_item && count)
            {
                assert (((CObjectHeader*)free_item)->IsFree());
                if ((free_list_undo (free_item) != UNDO_EMPTY))
                {
                    count--;
                    free_list_slot (free_item) = free_list_undo (free_item);
                    free_list_undo (free_item) = UNDO_EMPTY;
                }

                free_item = free_list_slot (free_item);
            }

#ifdef FL_VERIFICATION
            free_item = alloc_list_head_of (i);
            size_t item_count = 0;
            while (free_item)
            {
                item_count++;
                free_item = free_list_slot (free_item);
            }

            assert (item_count == alloc_list_of (i).item_count);
#endif //FL_VERIFICATION
        }
#ifdef DEBUG
        uint8_t* tail_item = alloc_list_tail_of (i);
        assert ((tail_item == 0) || (free_list_slot (tail_item) == 0));
#endif
    }
}

void allocator::commit_alloc_list_changes()
{
    BOOL repair_list = !discard_if_no_fit_p ();
    if (repair_list)
    {
        for (unsigned int i = 0; i < num_buckets; i++)
        {
            //remove the undo info from list.
            uint8_t* free_item = alloc_list_head_of (i);
            size_t count = alloc_list_damage_count_of (i);
            while (free_item && count)
            {
                assert (((CObjectHeader*)free_item)->IsFree());

                if (free_list_undo (free_item) != UNDO_EMPTY)
                {
                    free_list_undo (free_item) = UNDO_EMPTY;
                    count--;
                }

                free_item = free_list_slot (free_item);
            }

            alloc_list_damage_count_of (i) = 0;
        }
    }
}

void gc_heap::adjust_limit_clr (uint8_t* start, size_t limit_size, size_t size,
                                alloc_context* acontext, uint32_t flags,
                                heap_segment* seg, int align_const, int gen_number)
{
    bool loh_p = (gen_number > 0);
    GCSpinLock* msl = loh_p ? &more_space_lock_loh : &more_space_lock_soh;
    uint64_t& total_alloc_bytes = loh_p ? total_alloc_bytes_loh : total_alloc_bytes_soh;

    size_t aligned_min_obj_size = Align(min_obj_size, align_const);

    if (seg)
    {
        assert (heap_segment_used (seg) <= heap_segment_committed (seg));
    }

#ifdef MULTIPLE_HEAPS
    if (gen_number == 0)
    {
        if (!gen0_allocated_after_gc_p)
        {
            gen0_allocated_after_gc_p = true;
        }
    }
#endif //MULTIPLE_HEAPS

    dprintf (3, ("Expanding segment allocation [%Ix, %Ix[", (size_t)start,
               (size_t)start + limit_size - aligned_min_obj_size));

    if ((acontext->alloc_limit != start) &&
        (acontext->alloc_limit + aligned_min_obj_size)!= start)
    {
        uint8_t*  hole = acontext->alloc_ptr;
        if (hole != 0)
        {
            size_t  ac_size = (acontext->alloc_limit - acontext->alloc_ptr);
            dprintf (3, ("filling up hole [%Ix, %Ix[", (size_t)hole, (size_t)hole + ac_size + Align (min_obj_size, align_const)));
            // when we are finishing an allocation from a free list
            // we know that the free area was Align(min_obj_size) larger
            acontext->alloc_bytes -= ac_size;
            total_alloc_bytes -= ac_size;
            size_t free_obj_size = ac_size + aligned_min_obj_size;
            make_unused_array (hole, free_obj_size);
            generation_free_obj_space (generation_of (gen_number)) += free_obj_size;
        }
        acontext->alloc_ptr = start;
    }
    else
    {
        if (gen_number == 0)
        {
            size_t pad_size = Align (min_obj_size, align_const);
            dprintf (3, ("contigous ac: making min obj gap %Ix->%Ix(%Id)",
                acontext->alloc_ptr, (acontext->alloc_ptr + pad_size), pad_size));
            make_unused_array (acontext->alloc_ptr, pad_size);
            acontext->alloc_ptr += pad_size;
        }
    }
    acontext->alloc_limit = (start + limit_size - aligned_min_obj_size);
    size_t added_bytes = limit_size - ((gen_number < max_generation + 1) ? aligned_min_obj_size : 0);
    acontext->alloc_bytes += added_bytes;
    total_alloc_bytes     += added_bytes;

    uint8_t* saved_used = 0;

    if (seg)
    {
        saved_used = heap_segment_used (seg);
    }

    if (seg == ephemeral_heap_segment)
    {
        //Sometimes the allocated size is advanced without clearing the
        //memory. Let's catch up here
        if (heap_segment_used (seg) < (alloc_allocated - plug_skew))
        {
#ifdef MARK_ARRAY
#ifndef BACKGROUND_GC
            clear_mark_array (heap_segment_used (seg) + plug_skew, alloc_allocated);
#endif //BACKGROUND_GC
#endif //MARK_ARRAY
            heap_segment_used (seg) = alloc_allocated - plug_skew;
        }
    }
#ifdef BACKGROUND_GC
    else if (seg)
    {
        uint8_t* old_allocated = heap_segment_allocated (seg) - plug_skew - limit_size;
#ifdef FEATURE_LOH_COMPACTION
        old_allocated -= Align (loh_padding_obj_size, align_const);
#endif //FEATURE_LOH_COMPACTION

        assert (heap_segment_used (seg) >= old_allocated);
    }
#endif //BACKGROUND_GC

    // we are going to clear a right-edge exclusive span [clear_start, clear_limit)
    // but will adjust for cases when object is ok to stay dirty or the space has not seen any use yet
    // NB: the size and limit_size include syncblock, which is to the -1 of the object start
    //     that effectively shifts the allocation by `plug_skew`
    uint8_t* clear_start = start - plug_skew;
    uint8_t* clear_limit = start + limit_size - plug_skew;

    if (flags & GC_ALLOC_ZEROING_OPTIONAL)
    {
        uint8_t* obj_start = acontext->alloc_ptr;
        assert(start >= obj_start);
        uint8_t* obj_end = obj_start + size - plug_skew;
        assert(obj_end > clear_start);

        // if clearing at the object start, clear the syncblock.
        if(obj_start == start)
        {
            *(PTR_PTR)clear_start = 0;
        }
        // skip the rest of the object
        dprintf(3, ("zeroing optional: skipping object at %Ix->%Ix(%Id)", clear_start, obj_end, obj_end - clear_start));
        clear_start = obj_end;
    }

    // check if space to clear is all dirty from prior use or only partially
    if ((seg == 0) || (clear_limit <= heap_segment_used (seg)))
    {
        add_saved_spinlock_info (loh_p, me_release, mt_clr_mem);
        leave_spin_lock (msl);

        if (clear_start < clear_limit)
        {
            dprintf(3, ("clearing memory at %Ix for %d bytes", clear_start, clear_limit - clear_start));
            memclr(clear_start, clear_limit - clear_start);
        }
    }
    else
    {
        // we only need to clear [clear_start, used) and only if clear_start < used
        uint8_t* used = heap_segment_used (seg);
        heap_segment_used (seg) = clear_limit;

        add_saved_spinlock_info (loh_p, me_release, mt_clr_mem);
        leave_spin_lock (msl);

        if (clear_start < used)
        {
            if (used != saved_used)
            {
                FATAL_GC_ERROR ();
            }

            dprintf (2, ("clearing memory before used at %Ix for %Id bytes", clear_start, used - clear_start));
            memclr (clear_start, used - clear_start);
        }
    }

    //this portion can be done after we release the lock
    if (seg == ephemeral_heap_segment ||
       ((seg == nullptr) && (gen_number == 0) && (limit_size >= CLR_SIZE / 2)))
    {
        if (gen0_must_clear_bricks > 0)
        {
            //set the brick table to speed up find_object
            size_t b = brick_of (acontext->alloc_ptr);
            set_brick (b, acontext->alloc_ptr - brick_address (b));
            b++;
            dprintf (3, ("Allocation Clearing bricks [%Ix, %Ix[",
                         b, brick_of (align_on_brick (start + limit_size))));
            volatile short* x = &brick_table [b];
            short* end_x = &brick_table [brick_of (align_on_brick (start + limit_size))];

            for (;x < end_x;x++)
                *x = -1;
        }
        else
        {
            gen0_bricks_cleared = FALSE;
        }
    }

    // verifying the memory is completely cleared.
    //if (!(flags & GC_ALLOC_ZEROING_OPTIONAL))
    //{
    //    verify_mem_cleared(start - plug_skew, limit_size);
    //}
}

size_t gc_heap::new_allocation_limit (size_t size, size_t physical_limit, int gen_number)
{
    dynamic_data* dd = dynamic_data_of (gen_number);
    ptrdiff_t new_alloc = dd_new_allocation (dd);
    assert (new_alloc == (ptrdiff_t)Align (new_alloc,
        get_alignment_constant (!(gen_number == (max_generation + 1)))));

    ptrdiff_t logical_limit = max (new_alloc, (ptrdiff_t)size);
    size_t limit = min (logical_limit, (ptrdiff_t)physical_limit);
    assert (limit == Align (limit, get_alignment_constant (!(gen_number == (max_generation+1)))));
    dd_new_allocation (dd) = (new_alloc - limit);

    return limit;
}

size_t gc_heap::limit_from_size (size_t size, uint32_t flags, size_t physical_limit, int gen_number,
                                 int align_const)
{
    size_t padded_size = size + Align (min_obj_size, align_const);
    // for LOH this is not true...we could select a physical_limit that's exactly the same
    // as size.
    assert ((gen_number != 0) || (physical_limit >= padded_size));

    // For SOH if the size asked for is very small, we want to allocate more than just what's asked for if possible.
    // Unless we were told not to clean, then we will not force it.
    size_t min_size_to_allocate = ((gen_number == 0 && !(flags & GC_ALLOC_ZEROING_OPTIONAL)) ? allocation_quantum : 0);

    size_t desired_size_to_allocate  = max (padded_size, min_size_to_allocate);
    size_t new_physical_limit = min (physical_limit, desired_size_to_allocate);

    size_t new_limit = new_allocation_limit (padded_size,
                                             new_physical_limit,
                                             gen_number);
    assert (new_limit >= (size + Align (min_obj_size, align_const)));
    dprintf (100, ("requested to allocate %Id bytes, actual size is %Id", size, new_limit));
    return new_limit;
}

void gc_heap::add_to_oom_history_per_heap()
{
    oom_history* current_hist = &oomhist_per_heap[oomhist_index_per_heap];
    memcpy (current_hist, &oom_info, sizeof (oom_info));
    oomhist_index_per_heap++;
    if (oomhist_index_per_heap == max_oom_history_count)
    {
        oomhist_index_per_heap = 0;
    }
}

void gc_heap::handle_oom (int heap_num, oom_reason reason, size_t alloc_size,
                          uint8_t* allocated, uint8_t* reserved)
{
    UNREFERENCED_PARAMETER(heap_num);

    if (reason == oom_budget)
    {
        alloc_size = dd_min_size (dynamic_data_of (0)) / 2;
    }

    if ((reason == oom_budget) && ((!fgm_result.loh_p) && (fgm_result.fgm != fgm_no_failure)))
    {
        // This means during the last GC we needed to reserve and/or commit more memory
        // but we couldn't. We proceeded with the GC and ended up not having enough
        // memory at the end. This is a legitimate OOM situtation. Otherwise we
        // probably made a mistake and didn't expand the heap when we should have.
        reason = oom_low_mem;
    }

    oom_info.reason = reason;
    oom_info.allocated = allocated;
    oom_info.reserved = reserved;
    oom_info.alloc_size = alloc_size;
    oom_info.gc_index = settings.gc_index;
    oom_info.fgm = fgm_result.fgm;
    oom_info.size = fgm_result.size;
    oom_info.available_pagefile_mb = fgm_result.available_pagefile_mb;
    oom_info.loh_p = fgm_result.loh_p;

    add_to_oom_history_per_heap();
    fgm_result.fgm = fgm_no_failure;

    // Break early - before the more_space_lock is release so no other threads
    // could have allocated on the same heap when OOM happened.
    if (GCConfig::GetBreakOnOOM())
    {
        GCToOSInterface::DebugBreak();
    }
}

#ifdef BACKGROUND_GC
BOOL gc_heap::background_allowed_p()
{
    return ( gc_can_use_concurrent && ((settings.pause_mode == pause_interactive) || (settings.pause_mode == pause_sustained_low_latency)) );
}
#endif //BACKGROUND_GC

void gc_heap::check_for_full_gc (int gen_num, size_t size)
{
    BOOL should_notify = FALSE;
    // if we detect full gc because of the allocation budget specified this is TRUE;
    // it's FALSE if it's due to other factors.
    BOOL alloc_factor = TRUE;
    int i = 0;
    int n = 0;
    int n_initial = gen_num;
    BOOL local_blocking_collection = FALSE;
    BOOL local_elevation_requested = FALSE;
    int new_alloc_remain_percent = 0;

    if (full_gc_approach_event_set)
    {
        return;
    }

    if (gen_num != (max_generation + 1))
    {
        gen_num = max_generation;
    }

    dynamic_data* dd_full = dynamic_data_of (gen_num);
    ptrdiff_t new_alloc_remain = 0;
    uint32_t pct = ((gen_num == (max_generation + 1)) ? fgn_loh_percent : fgn_maxgen_percent);

    for (int gen_index = 0; gen_index <= (max_generation + 1); gen_index++)
    {
        dprintf (2, ("FGN: h#%d: gen%d: %Id(%Id)",
                     heap_number, gen_index,
                     dd_new_allocation (dynamic_data_of (gen_index)),
                     dd_desired_allocation (dynamic_data_of (gen_index))));
    }

    // For small object allocations we only check every fgn_check_quantum bytes.
    if (n_initial == 0)
    {
        dprintf (2, ("FGN: gen0 last recorded alloc: %Id", fgn_last_alloc));
        dynamic_data* dd_0 = dynamic_data_of (n_initial);
        if (((fgn_last_alloc - dd_new_allocation (dd_0)) < fgn_check_quantum) &&
            (dd_new_allocation (dd_0) >= 0))
        {
            return;
        }
        else
        {
            fgn_last_alloc = dd_new_allocation (dd_0);
            dprintf (2, ("FGN: gen0 last recorded alloc is now: %Id", fgn_last_alloc));
        }

        // We don't consider the size that came from soh 'cause it doesn't contribute to the
        // gen2 budget.
        size = 0;
    }

    for (i = n+1; i <= max_generation; i++)
    {
        if (get_new_allocation (i) <= 0)
        {
            n = min (i, max_generation);
        }
        else
            break;
    }

    dprintf (2, ("FGN: h#%d: gen%d budget exceeded", heap_number, n));
    if (gen_num == max_generation)
    {
        // If it's small object heap we should first see if we will even be looking at gen2 budget
        // in the next GC or not. If not we should go directly to checking other factors.
        if (n < (max_generation - 1))
        {
            goto check_other_factors;
        }
    }

    new_alloc_remain = dd_new_allocation (dd_full) - size;

    new_alloc_remain_percent = (int)(((float)(new_alloc_remain) / (float)dd_desired_allocation (dd_full)) * 100);

    dprintf (2, ("FGN: alloc threshold for gen%d is %d%%, current threshold is %d%%",
                 gen_num, pct, new_alloc_remain_percent));

    if (new_alloc_remain_percent <= (int)pct)
    {
#ifdef BACKGROUND_GC
        // If background GC is enabled, we still want to check whether this will
        // be a blocking GC or not because we only want to notify when it's a
        // blocking full GC.
        if (background_allowed_p())
        {
            goto check_other_factors;
        }
#endif //BACKGROUND_GC

        should_notify = TRUE;
        goto done;
    }

check_other_factors:

    dprintf (2, ("FGC: checking other factors"));
    n = generation_to_condemn (n,
                               &local_blocking_collection,
                               &local_elevation_requested,
                               TRUE);

    if (local_elevation_requested && (n == max_generation))
    {
        if (settings.should_lock_elevation)
        {
            int local_elevation_locked_count = settings.elevation_locked_count + 1;
            if (local_elevation_locked_count != 6)
            {
                dprintf (2, ("FGN: lock count is %d - Condemning max_generation-1",
                    local_elevation_locked_count));
                n = max_generation - 1;
            }
        }
    }

    dprintf (2, ("FGN: we estimate gen%d will be collected", n));

#ifdef BACKGROUND_GC
    // When background GC is enabled it decreases the accuracy of our predictability -
    // by the time the GC happens, we may not be under BGC anymore. If we try to
    // predict often enough it should be ok.
    if ((n == max_generation) &&
        (recursive_gc_sync::background_running_p()))
    {
        n = max_generation - 1;
        dprintf (2, ("FGN: bgc - 1 instead of 2"));
    }

    if ((n == max_generation) && !local_blocking_collection)
    {
        if (!background_allowed_p())
        {
            local_blocking_collection = TRUE;
        }
    }
#endif //BACKGROUND_GC

    dprintf (2, ("FGN: we estimate gen%d will be collected: %s",
                       n,
                       (local_blocking_collection ? "blocking" : "background")));

    if ((n == max_generation) && local_blocking_collection)
    {
        alloc_factor = FALSE;
        should_notify = TRUE;
        goto done;
    }

done:

    if (should_notify)
    {
        dprintf (2, ("FGN: gen%d detecting full GC approaching(%s) (GC#%d) (%Id%% left in gen%d)",
                     n_initial,
                     (alloc_factor ? "alloc" : "other"),
                     dd_collection_count (dynamic_data_of (0)),
                     new_alloc_remain_percent,
                     gen_num));

        send_full_gc_notification (n_initial, alloc_factor);
    }
}

void gc_heap::send_full_gc_notification (int gen_num, BOOL due_to_alloc_p)
{
    if (!full_gc_approach_event_set)
    {
        assert (full_gc_approach_event.IsValid());
        FIRE_EVENT(GCFullNotify_V1, gen_num, due_to_alloc_p);

        full_gc_end_event.Reset();
        full_gc_approach_event.Set();
        full_gc_approach_event_set = true;
    }
}

wait_full_gc_status gc_heap::full_gc_wait (GCEvent *event, int time_out_ms)
{
#ifdef MULTIPLE_HEAPS
    gc_heap* hp = gc_heap::g_heaps[0];
#else
    gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS

    if (hp->fgn_maxgen_percent == 0)
    {
        return wait_full_gc_na;
    }

    uint32_t wait_result = user_thread_wait(event, FALSE, time_out_ms);

    if ((wait_result == WAIT_OBJECT_0) || (wait_result == WAIT_TIMEOUT))
    {
        if (hp->fgn_maxgen_percent == 0)
        {
            return wait_full_gc_cancelled;
        }

        if (wait_result == WAIT_OBJECT_0)
        {
#ifdef BACKGROUND_GC
            if (fgn_last_gc_was_concurrent)
            {
                fgn_last_gc_was_concurrent = FALSE;
                return wait_full_gc_na;
            }
            else
#endif //BACKGROUND_GC
            {
                return wait_full_gc_success;
            }
        }
        else
        {
            return wait_full_gc_timeout;
        }
    }
    else
    {
        return wait_full_gc_failed;
    }
}

size_t gc_heap::get_full_compact_gc_count()
{
    return full_gc_counts[gc_type_compacting];
}

// DTREVIEW - we should check this in dt_low_ephemeral_space_p
// as well.
inline
BOOL gc_heap::short_on_end_of_seg (int gen_number,
                                   heap_segment* seg,
                                   int align_const)
{
    UNREFERENCED_PARAMETER(gen_number);
    uint8_t* allocated = heap_segment_allocated(seg);

    BOOL sufficient_p = sufficient_space_end_seg (allocated,
                                                  heap_segment_reserved (seg),
                                                  end_space_after_gc(),
                                                  tuning_deciding_short_on_seg);
    if (!sufficient_p)
    {
        if (sufficient_gen0_space_p)
        {
            dprintf (GTC_LOG, ("gen0 has enough free space"));
        }

        sufficient_p = sufficient_gen0_space_p;
    }

    return !sufficient_p;
}

#ifdef _MSC_VER
#pragma warning(disable:4706) // "assignment within conditional expression" is intentional in this function.
#endif // _MSC_VER

inline
BOOL gc_heap::a_fit_free_list_p (int gen_number,
                                 size_t size,
                                 alloc_context* acontext,
                                 uint32_t flags,
                                 int align_const)
{
    BOOL can_fit = FALSE;
    generation* gen = generation_of (gen_number);
    allocator* gen_allocator = generation_allocator (gen);
    size_t sz_list = gen_allocator->first_bucket_size();
    for (unsigned int a_l_idx = 0; a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++)
    {
        if ((size < sz_list) || (a_l_idx == (gen_allocator->number_of_buckets()-1)))
        {
            uint8_t* free_list = gen_allocator->alloc_list_head_of (a_l_idx);
            uint8_t* prev_free_item = 0;

            while (free_list != 0)
            {
                dprintf (3, ("considering free list %Ix", (size_t)free_list));
                size_t free_list_size = unused_array_size (free_list);
                if ((size + Align (min_obj_size, align_const)) <= free_list_size)
                {
                    dprintf (3, ("Found adequate unused area: [%Ix, size: %Id",
                                 (size_t)free_list, free_list_size));

                    gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE);
                    // We ask for more Align (min_obj_size)
                    // to make sure that we can insert a free object
                    // in adjust_limit will set the limit lower
                    size_t limit = limit_from_size (size, flags, free_list_size, gen_number, align_const);

                    uint8_t*  remain = (free_list + limit);
                    size_t remain_size = (free_list_size - limit);
                    if (remain_size >= Align(min_free_list, align_const))
                    {
                        make_unused_array (remain, remain_size);
                        gen_allocator->thread_item_front (remain, remain_size);
                        assert (remain_size >= Align (min_obj_size, align_const));
                    }
                    else
                    {
                        //absorb the entire free list
                        limit += remain_size;
                    }
                    generation_free_list_space (gen) -= limit;

                    adjust_limit_clr (free_list, limit, size, acontext, flags, 0, align_const, gen_number);

                    can_fit = TRUE;
                    goto end;
                }
                else if (gen_allocator->discard_if_no_fit_p())
                {
                    assert (prev_free_item == 0);
                    dprintf (3, ("couldn't use this free area, discarding"));
                    generation_free_obj_space (gen) += free_list_size;

                    gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE);
                    generation_free_list_space (gen) -= free_list_size;
                }
                else
                {
                    prev_free_item = free_list;
                }
                free_list = free_list_slot (free_list);
            }
        }
        sz_list = sz_list * 2;
    }
end:
    return can_fit;
}


#ifdef BACKGROUND_GC
void gc_heap::bgc_loh_alloc_clr (uint8_t* alloc_start,
                                 size_t size,
                                 alloc_context* acontext,
                                 uint32_t flags,
                                 int align_const,
                                 int lock_index,
                                 BOOL check_used_p,
                                 heap_segment* seg)
{
    make_unused_array (alloc_start, size);

    size_t size_of_array_base = sizeof(ArrayBase);

    bgc_alloc_lock->loh_alloc_done_with_index (lock_index);

    // clear memory while not holding the lock.
    size_t size_to_skip = size_of_array_base;
    size_t size_to_clear = size - size_to_skip - plug_skew;
    size_t saved_size_to_clear = size_to_clear;
    if (check_used_p)
    {
        uint8_t* end = alloc_start + size - plug_skew;
        uint8_t* used = heap_segment_used (seg);
        if (used < end)
        {
            if ((alloc_start + size_to_skip) < used)
            {
                size_to_clear = used - (alloc_start + size_to_skip);
            }
            else
            {
                size_to_clear = 0;
            }
            dprintf (2, ("bgc loh: setting used to %Ix", end));
            heap_segment_used (seg) = end;
        }

        dprintf (2, ("bgc loh: used: %Ix, alloc: %Ix, end of alloc: %Ix, clear %Id bytes",
                     used, alloc_start, end, size_to_clear));
    }
    else
    {
        dprintf (2, ("bgc loh: [%Ix-[%Ix(%Id)", alloc_start, alloc_start+size, size));
    }

#ifdef VERIFY_HEAP
    // since we filled in 0xcc for free object when we verify heap,
    // we need to make sure we clear those bytes.
    if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
    {
        if (size_to_clear < saved_size_to_clear)
        {
            size_to_clear = saved_size_to_clear;
        }
    }
#endif //VERIFY_HEAP

    total_alloc_bytes_loh += size - Align (min_obj_size, align_const);

    dprintf (SPINLOCK_LOG, ("[%d]Lmsl to clear large obj", heap_number));
    add_saved_spinlock_info (true, me_release, mt_clr_large_mem);
    leave_spin_lock (&more_space_lock_loh);

    ((void**) alloc_start)[-1] = 0;     //clear the sync block
    if (!(flags & GC_ALLOC_ZEROING_OPTIONAL))
    {
        memclr(alloc_start + size_to_skip, size_to_clear);
    }

    bgc_alloc_lock->loh_alloc_set (alloc_start);

    acontext->alloc_ptr = alloc_start;
    acontext->alloc_limit = (alloc_start + size - Align (min_obj_size, align_const));

    // need to clear the rest of the object before we hand it out.
    clear_unused_array(alloc_start, size);
}
#endif //BACKGROUND_GC

BOOL gc_heap::a_fit_free_list_large_p (size_t size,
                                       alloc_context* acontext,
                                       uint32_t flags,
                                       int align_const)
{
    BOOL can_fit = FALSE;
    int gen_number = max_generation + 1;
    generation* gen = generation_of (gen_number);
    allocator* loh_allocator = generation_allocator (gen);

#ifdef FEATURE_LOH_COMPACTION
    size_t loh_pad = Align (loh_padding_obj_size, align_const);
#endif //FEATURE_LOH_COMPACTION

#ifdef BACKGROUND_GC
    int cookie = -1;
#endif //BACKGROUND_GC
    size_t sz_list = loh_allocator->first_bucket_size();
    for (unsigned int a_l_idx = 0; a_l_idx < loh_allocator->number_of_buckets(); a_l_idx++)
    {
        if ((size < sz_list) || (a_l_idx == (loh_allocator->number_of_buckets()-1)))
        {
            uint8_t* free_list = loh_allocator->alloc_list_head_of (a_l_idx);
            uint8_t* prev_free_item = 0;
            while (free_list != 0)
            {
                dprintf (3, ("considering free list %Ix", (size_t)free_list));

                size_t free_list_size = unused_array_size(free_list);

#ifdef FEATURE_LOH_COMPACTION
                if ((size + loh_pad) <= free_list_size)
#else
                if (((size + Align (min_obj_size, align_const)) <= free_list_size)||
                    (size == free_list_size))
#endif //FEATURE_LOH_COMPACTION
                {
#ifdef BACKGROUND_GC
                    cookie = bgc_alloc_lock->loh_alloc_set (free_list);
                    bgc_track_loh_alloc();
#endif //BACKGROUND_GC

                    //unlink the free_item
                    loh_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE);

                    // Substract min obj size because limit_from_size adds it. Not needed for LOH
                    size_t limit = limit_from_size (size - Align(min_obj_size, align_const), flags, free_list_size,
                                                    gen_number, align_const);

#ifdef FEATURE_LOH_COMPACTION
                    make_unused_array (free_list, loh_pad);
                    limit -= loh_pad;
                    free_list += loh_pad;
                    free_list_size -= loh_pad;
#endif //FEATURE_LOH_COMPACTION

                    uint8_t*  remain = (free_list + limit);
                    size_t remain_size = (free_list_size - limit);
                    if (remain_size != 0)
                    {
                        assert (remain_size >= Align (min_obj_size, align_const));
                        make_unused_array (remain, remain_size);
                    }
                    if (remain_size >= Align(min_free_list, align_const))
                    {
                        loh_thread_gap_front (remain, remain_size, gen);
                        assert (remain_size >= Align (min_obj_size, align_const));
                    }
                    else
                    {
                        generation_free_obj_space (gen) += remain_size;
                    }
                    generation_free_list_space (gen) -= free_list_size;
                    dprintf (3, ("found fit on loh at %Ix", free_list));
#ifdef BACKGROUND_GC
                    if (cookie != -1)
                    {
                        bgc_loh_alloc_clr (free_list, limit, acontext, flags, align_const, cookie, FALSE, 0);
                    }
                    else
#endif //BACKGROUND_GC
                    {
                        adjust_limit_clr (free_list, limit, size, acontext, flags, 0, align_const, gen_number);
                    }

                    //fix the limit to compensate for adjust_limit_clr making it too short
                    acontext->alloc_limit += Align (min_obj_size, align_const);
                    can_fit = TRUE;
                    goto exit;
                }
                prev_free_item = free_list;
                free_list = free_list_slot (free_list);
            }
        }
        sz_list = sz_list * 2;
    }
exit:
    return can_fit;
}

#ifdef _MSC_VER
#pragma warning(default:4706)
#endif // _MSC_VER

BOOL gc_heap::a_fit_segment_end_p (int gen_number,
                                   heap_segment* seg,
                                   size_t size,
                                   alloc_context* acontext,
                                   uint32_t flags,
                                   int align_const,
                                   BOOL* commit_failed_p)
{
    *commit_failed_p = FALSE;
    size_t limit = 0;
    bool hard_limit_short_seg_end_p = false;
#ifdef BACKGROUND_GC
    int cookie = -1;
#endif //BACKGROUND_GC

    uint8_t*& allocated = ((gen_number == 0) ?
                                    alloc_allocated :
                                    heap_segment_allocated(seg));

    size_t pad = Align (min_obj_size, align_const);

#ifdef FEATURE_LOH_COMPACTION
    size_t loh_pad = Align (loh_padding_obj_size, align_const);
    if (gen_number == (max_generation + 1))
    {
        pad += loh_pad;
    }
#endif //FEATURE_LOH_COMPACTION

    uint8_t* end = heap_segment_committed (seg) - pad;

    if (a_size_fit_p (size, allocated, end, align_const))
    {
        limit = limit_from_size (size,
                                 flags,
                                 (end - allocated),
                                 gen_number, align_const);
        goto found_fit;
    }

    end = heap_segment_reserved (seg) - pad;

    if (a_size_fit_p (size, allocated, end, align_const))
    {
        limit = limit_from_size (size,
                                 flags,
                                 (end - allocated),
                                 gen_number, align_const);

        if (grow_heap_segment (seg, (allocated + limit), &hard_limit_short_seg_end_p))
        {
            goto found_fit;
        }
        else
        {
            if (!hard_limit_short_seg_end_p)
            {
                dprintf (2, ("can't grow segment, doing a full gc"));
                *commit_failed_p = TRUE;
            }
            else
            {
                assert (heap_hard_limit);
            }
        }
    }

    goto found_no_fit;

found_fit:

#ifdef BACKGROUND_GC
    if (gen_number != 0)
    {
        cookie = bgc_alloc_lock->loh_alloc_set (allocated);
        bgc_track_loh_alloc();
    }
#endif //BACKGROUND_GC

#ifdef FEATURE_LOH_COMPACTION
    if (gen_number == (max_generation + 1))
    {
        make_unused_array (allocated, loh_pad);
        allocated += loh_pad;
        limit -= loh_pad;
    }
#endif //FEATURE_LOH_COMPACTION

#if defined (VERIFY_HEAP) && defined (_DEBUG)
    // we are responsible for cleaning the syncblock and we will do it later
    // as a part of cleanup routine and when not holding the heap lock.
    // However, once we move "allocated" forward and if another thread initiate verification of
    // the previous object, it may consider the syncblock in the "next" eligible for validation.
    // (see also: object.cpp/Object::ValidateInner)
    // Make sure it will see cleaned up state to prevent triggering occasional verification failures.
    // And make sure the write happens before updating "allocated"
    VolatileStore(((void**)allocated - 1), (void*)0);     //clear the sync block
#endif //VERIFY_HEAP && _DEBUG

    uint8_t* old_alloc;
    old_alloc = allocated;
    dprintf (3, ("found fit at end of seg: %Ix", old_alloc));

#ifdef BACKGROUND_GC
    if (cookie != -1)
    {
        allocated += limit;
        bgc_loh_alloc_clr (old_alloc, limit, acontext, flags, align_const, cookie, TRUE, seg);
    }
    else
#endif //BACKGROUND_GC
    {
        // In a contiguous AC case with GC_ALLOC_ZEROING_OPTIONAL, deduct unspent space from the limit to clear only what is necessary.
        if ((flags & GC_ALLOC_ZEROING_OPTIONAL) &&
            ((allocated == acontext->alloc_limit) || (allocated == (acontext->alloc_limit + Align (min_obj_size, align_const)))))
        {
            assert(gen_number == 0);
            assert(allocated > acontext->alloc_ptr);

            limit -= (allocated - acontext->alloc_ptr);
            // add space for an AC continuity divider
            limit += Align(min_obj_size, align_const);
        }

        allocated += limit;
        adjust_limit_clr (old_alloc, limit, size, acontext, flags, seg, align_const, gen_number);
    }

    return TRUE;

found_no_fit:

    return FALSE;
}

BOOL gc_heap::loh_a_fit_segment_end_p (int gen_number,
                                       size_t size,
                                       alloc_context* acontext,
                                       uint32_t flags,
                                       int align_const,
                                       BOOL* commit_failed_p,
                                       oom_reason* oom_r)
{
    *commit_failed_p = FALSE;
    heap_segment* seg = generation_allocation_segment (generation_of (gen_number));
    BOOL can_allocate_p = FALSE;

    while (seg)
    {
#ifdef BACKGROUND_GC
        if (seg->flags & heap_segment_flags_loh_delete)
        {
            dprintf (3, ("h%d skipping seg %Ix to be deleted", heap_number, (size_t)seg));
        }
        else
#endif //BACKGROUND_GC
        {
            if (a_fit_segment_end_p (gen_number, seg, (size - Align (min_obj_size, align_const)),
                                        acontext, flags, align_const, commit_failed_p))
            {
                acontext->alloc_limit += Align (min_obj_size, align_const);
                can_allocate_p = TRUE;
                break;
            }

            if (*commit_failed_p)
            {
                *oom_r = oom_cant_commit;
                break;
            }
        }

        seg = heap_segment_next_rw (seg);
    }

    return can_allocate_p;
}

#ifdef BACKGROUND_GC
inline
void gc_heap::wait_for_background (alloc_wait_reason awr, bool loh_p)
{
    GCSpinLock* msl = loh_p ? &more_space_lock_loh : &more_space_lock_soh;

    dprintf (2, ("BGC is already in progress, waiting for it to finish"));
    add_saved_spinlock_info (loh_p, me_release, mt_wait_bgc);
    leave_spin_lock (msl);
    background_gc_wait (awr);
    enter_spin_lock (msl);
    add_saved_spinlock_info (loh_p, me_acquire, mt_wait_bgc);
}

void gc_heap::wait_for_bgc_high_memory (alloc_wait_reason awr, bool loh_p)
{
    if (recursive_gc_sync::background_running_p())
    {
        uint32_t memory_load;
        get_memory_info (&memory_load);
        if (memory_load >= m_high_memory_load_th)
        {
            dprintf (GTC_LOG, ("high mem - wait for BGC to finish, wait reason: %d", awr));
            wait_for_background (awr, loh_p);
        }
    }
}

#endif //BACKGROUND_GC

// We request to trigger an ephemeral GC but we may get a full compacting GC.
// return TRUE if that's the case.
BOOL gc_heap::trigger_ephemeral_gc (gc_reason gr)
{
#ifdef BACKGROUND_GC
    wait_for_bgc_high_memory (awr_loh_oos_bgc, false);
#endif //BACKGROUND_GC

    BOOL did_full_compact_gc = FALSE;

    dprintf (1, ("h%d triggering a gen1 GC", heap_number));
    size_t last_full_compact_gc_count = get_full_compact_gc_count();
    vm_heap->GarbageCollectGeneration(max_generation - 1, gr);

#ifdef MULTIPLE_HEAPS
    enter_spin_lock (&more_space_lock_soh);
    add_saved_spinlock_info (false, me_acquire, mt_t_eph_gc);
#endif //MULTIPLE_HEAPS

    size_t current_full_compact_gc_count = get_full_compact_gc_count();

    if (current_full_compact_gc_count > last_full_compact_gc_count)
    {
        dprintf (2, ("attempted to trigger an ephemeral GC and got a full compacting GC"));
        did_full_compact_gc = TRUE;
    }

    return did_full_compact_gc;
}

BOOL gc_heap::soh_try_fit (int gen_number,
                           size_t size,
                           alloc_context* acontext,
                           uint32_t flags,
                           int align_const,
                           BOOL* commit_failed_p,
                           BOOL* short_seg_end_p)
{
    BOOL can_allocate = TRUE;
    if (short_seg_end_p)
    {
        *short_seg_end_p = FALSE;
    }

    can_allocate = a_fit_free_list_p (gen_number, size, acontext, flags, align_const);
    if (!can_allocate)
    {
        if (short_seg_end_p)
        {
            *short_seg_end_p = short_on_end_of_seg (gen_number, ephemeral_heap_segment, align_const);
        }
        // If the caller doesn't care, we always try to fit at the end of seg;
        // otherwise we would only try if we are actually not short at end of seg.
        if (!short_seg_end_p || !(*short_seg_end_p))
        {
            can_allocate = a_fit_segment_end_p (gen_number, ephemeral_heap_segment, size,
                                                acontext, flags, align_const, commit_failed_p);
        }
    }

    return can_allocate;
}

allocation_state gc_heap::allocate_small (int gen_number,
                                          size_t size,
                                          alloc_context* acontext,
                                          uint32_t flags,
                                          int align_const)
{
#if defined (BACKGROUND_GC) && !defined (MULTIPLE_HEAPS)
    if (recursive_gc_sync::background_running_p())
    {
        background_soh_alloc_count++;
        if ((background_soh_alloc_count % bgc_alloc_spin_count) == 0)
        {
            add_saved_spinlock_info (false, me_release, mt_alloc_small);
            leave_spin_lock (&more_space_lock_soh);
            bool cooperative_mode = enable_preemptive();
            GCToOSInterface::Sleep (bgc_alloc_spin);
            disable_preemptive (cooperative_mode);
            enter_spin_lock (&more_space_lock_soh);
            add_saved_spinlock_info (false, me_acquire, mt_alloc_small);
        }
        else
        {
            //GCToOSInterface::YieldThread (0);
        }
    }
#endif //BACKGROUND_GC && !MULTIPLE_HEAPS

    gc_reason gr = reason_oos_soh;
    oom_reason oom_r = oom_no_failure;

    // No variable values should be "carried over" from one state to the other.
    // That's why there are local variable for each state

    allocation_state soh_alloc_state = a_state_start;

    // If we can get a new seg it means allocation will succeed.
    while (1)
    {
        dprintf (3, ("[h%d]soh state is %s", heap_number, allocation_state_str[soh_alloc_state]));

        switch (soh_alloc_state)
        {
            case a_state_can_allocate:
            case a_state_cant_allocate:
            {
                goto exit;
            }
            case a_state_start:
            {
                soh_alloc_state = a_state_try_fit;
                break;
            }
            case a_state_try_fit:
            {
                BOOL commit_failed_p = FALSE;
                BOOL can_use_existing_p = FALSE;

                can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags,
                                                  align_const, &commit_failed_p,
                                                  NULL);
                soh_alloc_state = (can_use_existing_p ?
                                        a_state_can_allocate :
                                        (commit_failed_p ?
                                            a_state_trigger_full_compact_gc :
                                            a_state_trigger_ephemeral_gc));
                break;
            }
            case a_state_try_fit_after_bgc:
            {
                BOOL commit_failed_p = FALSE;
                BOOL can_use_existing_p = FALSE;
                BOOL short_seg_end_p = FALSE;

                can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags,
                                                  align_const, &commit_failed_p,
                                                  &short_seg_end_p);
                soh_alloc_state = (can_use_existing_p ?
                                        a_state_can_allocate :
                                        (short_seg_end_p ?
                                            a_state_trigger_2nd_ephemeral_gc :
                                            a_state_trigger_full_compact_gc));
                break;
            }
            case a_state_try_fit_after_cg:
            {
                BOOL commit_failed_p = FALSE;
                BOOL can_use_existing_p = FALSE;
                BOOL short_seg_end_p = FALSE;

                can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags,
                                                  align_const, &commit_failed_p,
                                                  &short_seg_end_p);

                if (can_use_existing_p)
                {
                    soh_alloc_state = a_state_can_allocate;
                }
#ifdef MULTIPLE_HEAPS
                else if (gen0_allocated_after_gc_p)
                {
                    // some other threads already grabbed the more space lock and allocated
                    // so we should attempt an ephemeral GC again.
                    soh_alloc_state = a_state_trigger_ephemeral_gc;
                }
#endif //MULTIPLE_HEAPS
                else if (short_seg_end_p)
                {
                    soh_alloc_state = a_state_cant_allocate;
                    oom_r = oom_budget;
                }
                else
                {
                    assert (commit_failed_p);
                    soh_alloc_state = a_state_cant_allocate;
                    oom_r = oom_cant_commit;
                }
                break;
            }
            case a_state_check_and_wait_for_bgc:
            {
                BOOL bgc_in_progress_p = FALSE;
                BOOL did_full_compacting_gc = FALSE;

                bgc_in_progress_p = check_and_wait_for_bgc (awr_gen0_oos_bgc, &did_full_compacting_gc, false);
                soh_alloc_state = (did_full_compacting_gc ?
                                        a_state_try_fit_after_cg :
                                        a_state_try_fit_after_bgc);
                break;
            }
            case a_state_trigger_ephemeral_gc:
            {
                BOOL commit_failed_p = FALSE;
                BOOL can_use_existing_p = FALSE;
                BOOL short_seg_end_p = FALSE;
                BOOL bgc_in_progress_p = FALSE;
                BOOL did_full_compacting_gc = FALSE;

                did_full_compacting_gc = trigger_ephemeral_gc (gr);
                if (did_full_compacting_gc)
                {
                    soh_alloc_state = a_state_try_fit_after_cg;
                }
                else
                {
                    can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags,
                                                      align_const, &commit_failed_p,
                                                      &short_seg_end_p);
#ifdef BACKGROUND_GC
                    bgc_in_progress_p = recursive_gc_sync::background_running_p();
#endif //BACKGROUND_GC

                    if (can_use_existing_p)
                    {
                        soh_alloc_state = a_state_can_allocate;
                    }
                    else
                    {
                        if (short_seg_end_p)
                        {
                            if (should_expand_in_full_gc)
                            {
                                dprintf (2, ("gen1 GC wanted to expand!"));
                                soh_alloc_state = a_state_trigger_full_compact_gc;
                            }
                            else
                            {
                                soh_alloc_state = (bgc_in_progress_p ?
                                                        a_state_check_and_wait_for_bgc :
                                                        a_state_trigger_full_compact_gc);
                            }
                        }
                        else if (commit_failed_p)
                        {
                            soh_alloc_state = a_state_trigger_full_compact_gc;
                        }
                        else
                        {
#ifdef MULTIPLE_HEAPS
                            // some other threads already grabbed the more space lock and allocated
                            // so we should attempt an ephemeral GC again.
                            assert (gen0_allocated_after_gc_p);
                            soh_alloc_state = a_state_trigger_ephemeral_gc;
#else //MULTIPLE_HEAPS
                            assert (!"shouldn't get here");
#endif //MULTIPLE_HEAPS
                        }
                    }
                }
                break;
            }
            case a_state_trigger_2nd_ephemeral_gc:
            {
                BOOL commit_failed_p = FALSE;
                BOOL can_use_existing_p = FALSE;
                BOOL short_seg_end_p = FALSE;
                BOOL did_full_compacting_gc = FALSE;


                did_full_compacting_gc = trigger_ephemeral_gc (gr);

                if (did_full_compacting_gc)
                {
                    soh_alloc_state = a_state_try_fit_after_cg;
                }
                else
                {
                    can_use_existing_p = soh_try_fit (gen_number, size, acontext, flags,
                                                      align_const, &commit_failed_p,
                                                      &short_seg_end_p);
                    if (short_seg_end_p || commit_failed_p)
                    {
                        soh_alloc_state = a_state_trigger_full_compact_gc;
                    }
                    else
                    {
                        assert (can_use_existing_p);
                        soh_alloc_state = a_state_can_allocate;
                    }
                }
                break;
            }
            case a_state_trigger_full_compact_gc:
            {
                if (fgn_maxgen_percent)
                {
                    dprintf (2, ("FGN: SOH doing last GC before we throw OOM"));
                    send_full_gc_notification (max_generation, FALSE);
                }

                BOOL got_full_compacting_gc = FALSE;

                got_full_compacting_gc = trigger_full_compact_gc (gr, &oom_r, false);
                soh_alloc_state = (got_full_compacting_gc ? a_state_try_fit_after_cg : a_state_cant_allocate);
                break;
            }
            default:
            {
                assert (!"Invalid state!");
                break;
            }
        }
    }

exit:
    if (soh_alloc_state == a_state_cant_allocate)
    {
        assert (oom_r != oom_no_failure);
        handle_oom (heap_number,
                    oom_r,
                    size,
                    heap_segment_allocated (ephemeral_heap_segment),
                    heap_segment_reserved (ephemeral_heap_segment));

        add_saved_spinlock_info (false, me_release, mt_alloc_small_cant);
        leave_spin_lock (&more_space_lock_soh);
    }

    assert ((soh_alloc_state == a_state_can_allocate) ||
            (soh_alloc_state == a_state_cant_allocate) ||
            (soh_alloc_state == a_state_retry_allocate));

    return soh_alloc_state;
}

#ifdef BACKGROUND_GC
inline
void gc_heap::bgc_track_loh_alloc()
{
    if (current_c_gc_state == c_gc_state_planning)
    {
        Interlocked::Increment (&loh_alloc_thread_count);
        dprintf (3, ("h%d: inc lc: %d", heap_number, (int32_t)loh_alloc_thread_count));
    }
}

inline
void gc_heap::bgc_untrack_loh_alloc()
{
    if (current_c_gc_state == c_gc_state_planning)
    {
        Interlocked::Decrement (&loh_alloc_thread_count);
        dprintf (3, ("h%d: dec lc: %d", heap_number, (int32_t)loh_alloc_thread_count));
    }
}

BOOL gc_heap::bgc_loh_should_allocate()
{
    size_t min_gc_size = dd_min_size (dynamic_data_of (max_generation + 1));

    if ((bgc_begin_loh_size + bgc_loh_size_increased) < (min_gc_size * 10))
    {
        return TRUE;
    }

    if (((bgc_begin_loh_size / end_loh_size) >= 2) || (bgc_loh_size_increased >= bgc_begin_loh_size))
    {
        if ((bgc_begin_loh_size / end_loh_size) > 2)
        {
            dprintf (3, ("alloc-ed too much before bgc started"));
        }
        else
        {
            dprintf (3, ("alloc-ed too much after bgc started"));
        }
        return FALSE;
    }
    else
    {
        bgc_alloc_spin_loh = (uint32_t)(((float)bgc_loh_size_increased / (float)bgc_begin_loh_size) * 10);
        return TRUE;
    }
}
#endif //BACKGROUND_GC

size_t gc_heap::get_large_seg_size (size_t size)
{
    size_t default_seg_size = min_loh_segment_size;
#ifdef SEG_MAPPING_TABLE
    size_t align_size =  default_seg_size;
#else //SEG_MAPPING_TABLE
    size_t align_size =  default_seg_size / 2;
#endif //SEG_MAPPING_TABLE
    int align_const = get_alignment_constant (FALSE);
    size_t large_seg_size = align_on_page (
        max (default_seg_size,
            ((size + 2 * Align(min_obj_size, align_const) + OS_PAGE_SIZE +
            align_size) / align_size * align_size)));
    return large_seg_size;
}

BOOL gc_heap::loh_get_new_seg (generation* gen,
                               size_t size,
                               int align_const,
                               BOOL* did_full_compact_gc,
                               oom_reason* oom_r)
{
    UNREFERENCED_PARAMETER(gen);
    UNREFERENCED_PARAMETER(align_const);

    *did_full_compact_gc = FALSE;

    size_t seg_size = get_large_seg_size (size);

    heap_segment* new_seg = get_large_segment (seg_size, did_full_compact_gc);

    if (new_seg)
    {
        loh_alloc_since_cg += seg_size;
    }
    else
    {
        *oom_r = oom_loh;
    }

    return (new_seg != 0);
}

// PERF TODO: this is too aggressive; and in hard limit we should
// count the actual allocated bytes instead of only updating it during
// getting a new seg.
BOOL gc_heap::retry_full_compact_gc (size_t size)
{
    size_t seg_size = get_large_seg_size (size);

    if (loh_alloc_since_cg >= (2 * (uint64_t)seg_size))
    {
        return TRUE;
    }

#ifdef MULTIPLE_HEAPS
    uint64_t total_alloc_size = 0;
    for (int i = 0; i < n_heaps; i++)
    {
        total_alloc_size += g_heaps[i]->loh_alloc_since_cg;
    }

    if (total_alloc_size >= (2 * (uint64_t)seg_size))
    {
        return TRUE;
    }
#endif //MULTIPLE_HEAPS

    return FALSE;
}

BOOL gc_heap::check_and_wait_for_bgc (alloc_wait_reason awr,
                                      BOOL* did_full_compact_gc,
                                      bool loh_p)
{
    BOOL bgc_in_progress = FALSE;
    *did_full_compact_gc = FALSE;
#ifdef BACKGROUND_GC
    if (recursive_gc_sync::background_running_p())
    {
        bgc_in_progress = TRUE;
        size_t last_full_compact_gc_count = get_full_compact_gc_count();
        wait_for_background (awr, loh_p);
        size_t current_full_compact_gc_count = get_full_compact_gc_count();
        if (current_full_compact_gc_count > last_full_compact_gc_count)
        {
            *did_full_compact_gc = TRUE;
        }
    }
#endif //BACKGROUND_GC

    return bgc_in_progress;
}

BOOL gc_heap::loh_try_fit (int gen_number,
                           size_t size,
                           alloc_context* acontext,
                           uint32_t flags,
                           int align_const,
                           BOOL* commit_failed_p,
                           oom_reason* oom_r)
{
    BOOL can_allocate = TRUE;

    if (!a_fit_free_list_large_p (size, acontext, flags, align_const))
    {
        can_allocate = loh_a_fit_segment_end_p (gen_number, size,
                                                acontext, flags, align_const,
                                                commit_failed_p, oom_r);

#ifdef BACKGROUND_GC
        if (can_allocate && recursive_gc_sync::background_running_p())
        {
            bgc_loh_size_increased += size;
        }
#endif //BACKGROUND_GC
    }
#ifdef BACKGROUND_GC
    else
    {
        if (recursive_gc_sync::background_running_p())
        {
            bgc_loh_allocated_in_free += size;
        }
    }
#endif //BACKGROUND_GC

    return can_allocate;
}

BOOL gc_heap::trigger_full_compact_gc (gc_reason gr,
                                       oom_reason* oom_r,
                                       bool loh_p)
{
    BOOL did_full_compact_gc = FALSE;

    size_t last_full_compact_gc_count = get_full_compact_gc_count();

    // Set this so the next GC will be a full compacting GC.
    if (!last_gc_before_oom)
    {
        last_gc_before_oom = TRUE;
    }

#ifdef BACKGROUND_GC
    if (recursive_gc_sync::background_running_p())
    {
        wait_for_background (((gr == reason_oos_soh) ? awr_gen0_oos_bgc : awr_loh_oos_bgc), loh_p);
        dprintf (2, ("waited for BGC - done"));
    }
#endif //BACKGROUND_GC

    GCSpinLock* msl = loh_p ? &more_space_lock_loh : &more_space_lock_soh;
    size_t current_full_compact_gc_count = get_full_compact_gc_count();
    if (current_full_compact_gc_count > last_full_compact_gc_count)
    {
        dprintf (3, ("a full compacting GC triggered while waiting for BGC (%d->%d)", last_full_compact_gc_count, current_full_compact_gc_count));
        assert (current_full_compact_gc_count > last_full_compact_gc_count);
        did_full_compact_gc = TRUE;
        goto exit;
    }

    dprintf (3, ("h%d full GC", heap_number));

    trigger_gc_for_alloc (max_generation, gr, msl, loh_p, mt_t_full_gc);

    current_full_compact_gc_count = get_full_compact_gc_count();

    if (current_full_compact_gc_count == last_full_compact_gc_count)
    {
        dprintf (2, ("attempted to trigger a full compacting GC but didn't get it"));
        // We requested a full GC but didn't get because of the elevation logic
        // which means we should fail.
        *oom_r = oom_unproductive_full_gc;
    }
    else
    {
        dprintf (3, ("h%d: T full compacting GC (%d->%d)",
            heap_number,
            last_full_compact_gc_count,
            current_full_compact_gc_count));

        assert (current_full_compact_gc_count > last_full_compact_gc_count);
        did_full_compact_gc = TRUE;
    }

exit:
    return did_full_compact_gc;
}

#ifdef RECORD_LOH_STATE
void gc_heap::add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id)
{
    // When the state is can_allocate we already have released the more
    // space lock. So we are not logging states here since this code
    // is not thread safe.
    if (loh_state_to_save != a_state_can_allocate)
    {
        last_loh_states[loh_state_index].alloc_state = loh_state_to_save;
        last_loh_states[loh_state_index].thread_id = thread_id;
        loh_state_index++;

        if (loh_state_index == max_saved_loh_states)
        {
            loh_state_index = 0;
        }

        assert (loh_state_index < max_saved_loh_states);
    }
}
#endif //RECORD_LOH_STATE

bool gc_heap::should_retry_other_heap (size_t size)
{
#ifdef MULTIPLE_HEAPS
    if (heap_hard_limit)
    {
        size_t total_heap_committed_recorded =
            current_total_committed - current_total_committed_bookkeeping;
        size_t min_size = dd_min_size (g_heaps[0]->dynamic_data_of (max_generation + 1));
        size_t slack_space = max (commit_min_th, min_size);
        bool retry_p = ((total_heap_committed_recorded + size) < (heap_hard_limit - slack_space));
        dprintf (1, ("%Id - %Id - total committed %Id - size %Id = %Id, %s",
            heap_hard_limit, slack_space, total_heap_committed_recorded, size,
            (heap_hard_limit - slack_space - total_heap_committed_recorded - size),
            (retry_p ? "retry" : "no retry")));
        return retry_p;
    }
    else
#endif //MULTIPLE_HEAPS
    {
        return false;
    }
}

allocation_state gc_heap::allocate_large (int gen_number,
                                          size_t size,
                                          alloc_context* acontext,
                                          uint32_t flags,
                                          int align_const)
{
#ifdef BACKGROUND_GC
    if (recursive_gc_sync::background_running_p())
    {
#ifdef BGC_SERVO_TUNING
        bool planning_p = (current_c_gc_state == c_gc_state_planning);
#endif //BGC_SERVO_TUNING

        background_loh_alloc_count++;
        //if ((background_loh_alloc_count % bgc_alloc_spin_count_loh) == 0)
        {
#ifdef BGC_SERVO_TUNING
            if (planning_p)
            {
                loh_a_bgc_planning += size;
            }
            else
            {
                loh_a_bgc_marking += size;
            }
#endif //BGC_SERVO_TUNING

            if (bgc_loh_should_allocate())
            {
                if (!bgc_alloc_spin_loh)
                {
                    add_saved_spinlock_info (true, me_release, mt_alloc_large);
                    leave_spin_lock (&more_space_lock_loh);
                    bool cooperative_mode = enable_preemptive();
                    GCToOSInterface::YieldThread (bgc_alloc_spin_loh);
                    disable_preemptive (cooperative_mode);
                    enter_spin_lock (&more_space_lock_loh);
                    add_saved_spinlock_info (true, me_acquire, mt_alloc_large);
                    dprintf (SPINLOCK_LOG, ("[%d]spin Emsl loh", heap_number));
                }
            }
            else
            {
                wait_for_background (awr_loh_alloc_during_bgc, true);
            }
        }
    }
#ifdef BGC_SERVO_TUNING
    else
    {
        loh_a_no_bgc += size;
    }
#endif //BGC_SERVO_TUNING
#endif //BACKGROUND_GC

    gc_reason gr = reason_oos_loh;
    generation* gen = generation_of (gen_number);
    oom_reason oom_r = oom_no_failure;
    size_t current_full_compact_gc_count = 0;

    // No variable values should be "carried over" from one state to the other.
    // That's why there are local variable for each state
    allocation_state loh_alloc_state = a_state_start;
#ifdef RECORD_LOH_STATE
    EEThreadId current_thread_id;
    current_thread_id.SetToCurrentThread();
#endif //RECORD_LOH_STATE

    // If we can get a new seg it means allocation will succeed.
    while (1)
    {
        dprintf (3, ("[h%d]loh state is %s", heap_number, allocation_state_str[loh_alloc_state]));

#ifdef RECORD_LOH_STATE
        add_saved_loh_state (loh_alloc_state, current_thread_id);
#endif //RECORD_LOH_STATE
        switch (loh_alloc_state)
        {
            case a_state_can_allocate:
            case a_state_cant_allocate:
            {
                goto exit;
            }
            case a_state_start:
            {
                loh_alloc_state = a_state_try_fit;
                break;
            }
            case a_state_try_fit:
            {
                BOOL commit_failed_p = FALSE;
                BOOL can_use_existing_p = FALSE;

                can_use_existing_p = loh_try_fit (gen_number, size, acontext, flags,
                                                  align_const, &commit_failed_p, &oom_r);
                loh_alloc_state = (can_use_existing_p ?
                                        a_state_can_allocate :
                                        (commit_failed_p ?
                                            a_state_trigger_full_compact_gc :
                                            a_state_acquire_seg));
                assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
                break;
            }
            case a_state_try_fit_new_seg:
            {
                BOOL commit_failed_p = FALSE;
                BOOL can_use_existing_p = FALSE;

                can_use_existing_p = loh_try_fit (gen_number, size, acontext, flags,
                                                  align_const, &commit_failed_p, &oom_r);
                // Even after we got a new seg it doesn't necessarily mean we can allocate,
                // another LOH allocating thread could have beat us to acquire the msl so
                // we need to try again.
                loh_alloc_state = (can_use_existing_p ? a_state_can_allocate : a_state_try_fit);
                assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
                break;
            }
            case a_state_try_fit_after_cg:
            {
                BOOL commit_failed_p = FALSE;
                BOOL can_use_existing_p = FALSE;

                can_use_existing_p = loh_try_fit (gen_number, size, acontext, flags,
                                                  align_const, &commit_failed_p, &oom_r);
                // If we failed to commit, we bail right away 'cause we already did a
                // full compacting GC.
                loh_alloc_state = (can_use_existing_p ?
                                        a_state_can_allocate :
                                        (commit_failed_p ?
                                            a_state_cant_allocate :
                                            a_state_acquire_seg_after_cg));
                assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
                break;
            }
            case a_state_try_fit_after_bgc:
            {
                BOOL commit_failed_p = FALSE;
                BOOL can_use_existing_p = FALSE;

                can_use_existing_p = loh_try_fit (gen_number, size, acontext, flags,
                                                  align_const, &commit_failed_p, &oom_r);
                loh_alloc_state = (can_use_existing_p ?
                                        a_state_can_allocate :
                                        (commit_failed_p ?
                                            a_state_trigger_full_compact_gc :
                                            a_state_acquire_seg_after_bgc));
                assert ((loh_alloc_state == a_state_can_allocate) == (acontext->alloc_ptr != 0));
                break;
            }
            case a_state_acquire_seg:
            {
                BOOL can_get_new_seg_p = FALSE;
                BOOL did_full_compacting_gc = FALSE;

                current_full_compact_gc_count = get_full_compact_gc_count();

                can_get_new_seg_p = loh_get_new_seg (gen, size, align_const, &did_full_compacting_gc, &oom_r);
                loh_alloc_state = (can_get_new_seg_p ?
                                        a_state_try_fit_new_seg :
                                        (did_full_compacting_gc ?
                                            a_state_check_retry_seg :
                                            a_state_check_and_wait_for_bgc));
                break;
            }
            case a_state_acquire_seg_after_cg:
            {
                BOOL can_get_new_seg_p = FALSE;
                BOOL did_full_compacting_gc = FALSE;

                current_full_compact_gc_count = get_full_compact_gc_count();

                can_get_new_seg_p = loh_get_new_seg (gen, size, align_const, &did_full_compacting_gc, &oom_r);
                // Since we release the msl before we try to allocate a seg, other
                // threads could have allocated a bunch of segments before us so
                // we might need to retry.
                loh_alloc_state = (can_get_new_seg_p ?
                                        a_state_try_fit_after_cg :
                                        a_state_check_retry_seg);
                break;
            }
            case a_state_acquire_seg_after_bgc:
            {
                BOOL can_get_new_seg_p = FALSE;
                BOOL did_full_compacting_gc = FALSE;

                current_full_compact_gc_count = get_full_compact_gc_count();

                can_get_new_seg_p = loh_get_new_seg (gen, size, align_const, &did_full_compacting_gc, &oom_r);
                loh_alloc_state = (can_get_new_seg_p ?
                                        a_state_try_fit_new_seg :
                                        (did_full_compacting_gc ?
                                            a_state_check_retry_seg :
                                            a_state_trigger_full_compact_gc));
                assert ((loh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure));
                break;
            }
            case a_state_check_and_wait_for_bgc:
            {
                BOOL bgc_in_progress_p = FALSE;
                BOOL did_full_compacting_gc = FALSE;

                bgc_in_progress_p = check_and_wait_for_bgc (awr_loh_oos_bgc, &did_full_compacting_gc, true);
                loh_alloc_state = (!bgc_in_progress_p ?
                                        a_state_trigger_full_compact_gc :
                                        (did_full_compacting_gc ?
                                            a_state_try_fit_after_cg :
                                            a_state_try_fit_after_bgc));
                break;
            }
            case a_state_trigger_full_compact_gc:
            {
                if (fgn_maxgen_percent)
                {
                    dprintf (2, ("FGN: LOH doing last GC before we throw OOM"));
                    send_full_gc_notification (max_generation, FALSE);
                }

                BOOL got_full_compacting_gc = FALSE;

                got_full_compacting_gc = trigger_full_compact_gc (gr, &oom_r, true);
                loh_alloc_state = (got_full_compacting_gc ? a_state_try_fit_after_cg : a_state_cant_allocate);
                assert ((loh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure));
                break;
            }
            case a_state_check_retry_seg:
            {
                BOOL should_retry_gc = retry_full_compact_gc (size);
                BOOL should_retry_get_seg = FALSE;
                if (!should_retry_gc)
                {
                    size_t last_full_compact_gc_count = current_full_compact_gc_count;
                    current_full_compact_gc_count = get_full_compact_gc_count();
                    if (current_full_compact_gc_count > last_full_compact_gc_count)
                    {
                        should_retry_get_seg = TRUE;
                    }
                }

                loh_alloc_state = (should_retry_gc ?
                                        a_state_trigger_full_compact_gc :
                                        (should_retry_get_seg ?
                                            a_state_try_fit_after_cg :
                                            a_state_cant_allocate));
                assert ((loh_alloc_state != a_state_cant_allocate) || (oom_r != oom_no_failure));
                break;
            }
            default:
            {
                assert (!"Invalid state!");
                break;
            }
        }
    }

exit:
    if (loh_alloc_state == a_state_cant_allocate)
    {
        assert (oom_r != oom_no_failure);

        if ((oom_r != oom_cant_commit) && should_retry_other_heap (size))
        {
            loh_alloc_state = a_state_retry_allocate;
        }
        else
        {
            handle_oom (heap_number,
                        oom_r,
                        size,
                        0,
                        0);
        }
        add_saved_spinlock_info (true, me_release, mt_alloc_large_cant);
        leave_spin_lock (&more_space_lock_loh);
    }

    assert ((loh_alloc_state == a_state_can_allocate) ||
            (loh_alloc_state == a_state_cant_allocate) ||
            (loh_alloc_state == a_state_retry_allocate));
    return loh_alloc_state;
}

// BGC's final mark phase will acquire the msl, so release it here and re-acquire.
void gc_heap::trigger_gc_for_alloc (int gen_number, gc_reason gr,
                                    GCSpinLock* msl, bool loh_p,
                                    msl_take_state take_state)
{
#ifdef BACKGROUND_GC
    if (loh_p)
    {
        add_saved_spinlock_info (loh_p, me_release, take_state);
        leave_spin_lock (msl);
    }
#endif //BACKGROUND_GC

    vm_heap->GarbageCollectGeneration (gen_number, gr);

#ifdef MULTIPLE_HEAPS
    if (!loh_p)
    {
        enter_spin_lock (msl);
        add_saved_spinlock_info (loh_p, me_acquire, take_state);
    }
#endif //MULTIPLE_HEAPS

#ifdef BACKGROUND_GC
    if (loh_p)
    {
        enter_spin_lock (msl);
        add_saved_spinlock_info (loh_p, me_acquire, take_state);
    }
#endif //BACKGROUND_GC
}

allocation_state gc_heap::try_allocate_more_space (alloc_context* acontext, size_t size,
                                    uint32_t flags, int gen_number)
{
    if (gc_heap::gc_started)
    {
        wait_for_gc_done();
        return a_state_retry_allocate;
    }

    bool loh_p = (gen_number > 0);
    GCSpinLock* msl = loh_p ? &more_space_lock_loh : &more_space_lock_soh;

#ifdef SYNCHRONIZATION_STATS
    int64_t msl_acquire_start = GCToOSInterface::QueryPerformanceCounter();
#endif //SYNCHRONIZATION_STATS
    enter_spin_lock (msl);
    add_saved_spinlock_info (loh_p, me_acquire, mt_try_alloc);
    dprintf (SPINLOCK_LOG, ("[%d]Emsl for alloc", heap_number));
#ifdef SYNCHRONIZATION_STATS
    int64_t msl_acquire = GCToOSInterface::QueryPerformanceCounter() - msl_acquire_start;
    total_msl_acquire += msl_acquire;
    num_msl_acquired++;
    if (msl_acquire > 200)
    {
        num_high_msl_acquire++;
    }
    else
    {
        num_low_msl_acquire++;
    }
#endif //SYNCHRONIZATION_STATS

    /*
    // We are commenting this out 'cause we don't see the point - we already
    // have checked gc_started when we were acquiring the msl - no need to check
    // again. This complicates the logic in bgc_suspend_EE 'cause that one would
    // need to release msl which causes all sorts of trouble.
    if (gc_heap::gc_started)
    {
#ifdef SYNCHRONIZATION_STATS
        good_suspension++;
#endif //SYNCHRONIZATION_STATS
        BOOL fStress = (g_pConfig->GetGCStressLevel() & GCConfig::GCSTRESS_TRANSITION) != 0;
        if (!fStress)
        {
            //Rendez vous early (MP scaling issue)
            //dprintf (1, ("[%d]waiting for gc", heap_number));
            wait_for_gc_done();
#ifdef MULTIPLE_HEAPS
            return -1;
#endif //MULTIPLE_HEAPS
        }
    }
    */

    dprintf (3, ("requested to allocate %d bytes on gen%d", size, gen_number));

    int align_const = get_alignment_constant (gen_number != (max_generation+1));

    if (fgn_maxgen_percent)
    {
        check_for_full_gc (gen_number, size);
    }

#ifdef BGC_SERVO_TUNING
    if ((gen_number != 0) && bgc_tuning::should_trigger_bgc_loh())
    {
        trigger_gc_for_alloc (max_generation, reason_bgc_tuning_loh, msl, loh_p, mt_try_servo_budget);
    }
    else
#endif //BGC_SERVO_TUNING
    {
        bool trigger_on_budget_loh_p =
#ifdef BGC_SERVO_TUNING
            !bgc_tuning::enable_fl_tuning;
#else
            true;
#endif //BGC_SERVO_TUNING

        bool check_budget_p = true;
        if (gen_number != 0)
        {
            check_budget_p = trigger_on_budget_loh_p;
        }

        if (check_budget_p && !(new_allocation_allowed (gen_number)))
        {
            if (fgn_maxgen_percent && (gen_number == 0))
            {
                // We only check gen0 every so often, so take this opportunity to check again.
                check_for_full_gc (gen_number, size);
            }

#ifdef BACKGROUND_GC
            wait_for_bgc_high_memory (awr_gen0_alloc, loh_p);
#endif //BACKGROUND_GC

#ifdef SYNCHRONIZATION_STATS
            bad_suspension++;
#endif //SYNCHRONIZATION_STATS
            dprintf (2, ("h%d running out of budget on gen%d, gc", heap_number, gen_number));

            if (!settings.concurrent || (gen_number == 0))
            {
                trigger_gc_for_alloc (0, ((gen_number == 0) ? reason_alloc_soh : reason_alloc_loh),
                                    msl, loh_p, mt_try_budget);
            }
        }
    }

    allocation_state can_allocate = ((gen_number == 0) ?
        allocate_small (gen_number, size, acontext, flags, align_const) :
        allocate_large (gen_number, size, acontext, flags, align_const));

    if (can_allocate == a_state_can_allocate)
    {
        size_t alloc_context_bytes = acontext->alloc_limit + Align (min_obj_size, align_const) - acontext->alloc_ptr;
        int etw_allocation_index = ((gen_number == 0) ? 0 : 1);

        etw_allocation_running_amount[etw_allocation_index] += alloc_context_bytes;

        allocated_since_last_gc += alloc_context_bytes;

        if (etw_allocation_running_amount[etw_allocation_index] > etw_allocation_tick)
        {
#ifdef FEATURE_REDHAWK
            FIRE_EVENT(GCAllocationTick_V1, (uint32_t)etw_allocation_running_amount[etw_allocation_index],
                                            (gen_number == 0) ? gc_etw_alloc_soh : gc_etw_alloc_loh);
#else

#if defined(FEATURE_EVENT_TRACE)
            // We are explicitly checking whether the event is enabled here.
            // Unfortunately some of the ETW macros do not check whether the ETW feature is enabled.
            // The ones that do are much less efficient.
            if (EVENT_ENABLED(GCAllocationTick_V3))
            {
                fire_etw_allocation_event (etw_allocation_running_amount[etw_allocation_index], gen_number, acontext->alloc_ptr);
            }

#endif //FEATURE_EVENT_TRACE
#endif //FEATURE_REDHAWK
            etw_allocation_running_amount[etw_allocation_index] = 0;
        }
    }

    return can_allocate;
}

#ifdef MULTIPLE_HEAPS
void gc_heap::balance_heaps (alloc_context* acontext)
{
    if (acontext->alloc_count < 4)
    {
        if (acontext->alloc_count == 0)
        {
            int home_hp_num = heap_select::select_heap (acontext);
            acontext->set_home_heap (GCHeap::GetHeap (home_hp_num));
            gc_heap* hp = acontext->get_home_heap ()->pGenGCHeap;
            acontext->set_alloc_heap (acontext->get_home_heap ());
            hp->alloc_context_count++;

#ifdef HEAP_BALANCE_INSTRUMENTATION
            uint16_t ideal_proc_no = 0;
            GCToOSInterface::GetCurrentThreadIdealProc (&ideal_proc_no);

            uint32_t proc_no = GCToOSInterface::GetCurrentProcessorNumber ();

            add_to_hb_numa (proc_no, ideal_proc_no,
                home_hp_num, false, true, false);

            dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPafter GC: 1st alloc on p%3d, h%d, ip: %d",
                proc_no, home_hp_num, ideal_proc_no));
#endif //HEAP_BALANCE_INSTRUMENTATION
        }
    }
    else
    {
        BOOL set_home_heap = FALSE;
        gc_heap* home_hp = NULL;
        int proc_hp_num = 0;

#ifdef HEAP_BALANCE_INSTRUMENTATION
        bool alloc_count_p = true;
        bool multiple_procs_p = false;
        bool set_ideal_p = false;
        uint32_t proc_no = GCToOSInterface::GetCurrentProcessorNumber ();
        uint32_t last_proc_no = proc_no;
#endif //HEAP_BALANCE_INSTRUMENTATION

        if (heap_select::can_find_heap_fast ())
        {
            assert (acontext->get_home_heap () != NULL);
            home_hp = acontext->get_home_heap ()->pGenGCHeap;
            proc_hp_num = heap_select::select_heap (acontext);

            if (acontext->get_home_heap () != GCHeap::GetHeap (proc_hp_num))
            {
#ifdef HEAP_BALANCE_INSTRUMENTATION
                alloc_count_p = false;
#endif //HEAP_BALANCE_INSTRUMENTATION
                set_home_heap = TRUE;
            }
            else if ((acontext->alloc_count & 15) == 0)
                set_home_heap = TRUE;

            if (set_home_heap)
            {
            }
        }
        else
        {
            if ((acontext->alloc_count & 3) == 0)
                set_home_heap = TRUE;
        }

        if (set_home_heap)
        {
            /*
                        // Since we are balancing up to MAX_SUPPORTED_CPUS, no need for this.
                        if (n_heaps > MAX_SUPPORTED_CPUS)
                        {
                            // on machines with many processors cache affinity is really king, so don't even try
                            // to balance on these.
                            acontext->home_heap = GCHeap::GetHeap( heap_select::select_heap(acontext));
                            acontext->alloc_heap = acontext->home_heap;
                        }
                        else
            */
            {
                gc_heap* org_hp = acontext->get_alloc_heap ()->pGenGCHeap;
                int org_hp_num = org_hp->heap_number;
                int final_alloc_hp_num = org_hp_num;

                dynamic_data* dd = org_hp->dynamic_data_of (0);
                ptrdiff_t org_size = dd_new_allocation (dd);
                ptrdiff_t total_size = (ptrdiff_t)dd_desired_allocation (dd);

#ifdef HEAP_BALANCE_INSTRUMENTATION
                dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMP[p%3d] ph h%3d, hh: %3d, ah: %3d (%dmb-%dmb), ac: %5d(%s)",
                    proc_no, proc_hp_num, home_hp->heap_number,
                    org_hp_num, (total_size / 1024 / 1024), (org_size / 1024 / 1024),
                    acontext->alloc_count,
                    ((proc_hp_num == home_hp->heap_number) ? "AC" : "H")));
#endif //HEAP_BALANCE_INSTRUMENTATION

                int org_alloc_context_count;
                int max_alloc_context_count;
                gc_heap* max_hp;
                int max_hp_num = 0;
                ptrdiff_t max_size;
                size_t local_delta = max (((size_t)org_size >> 6), min_gen0_balance_delta);
                size_t delta = local_delta;

                if (((size_t)org_size + 2 * delta) >= (size_t)total_size)
                {
                    acontext->alloc_count++;
                    return;
                }

                int start, end, finish;
                heap_select::get_heap_range_for_heap (org_hp->heap_number, &start, &end);
                finish = start + n_heaps;

try_again:
                gc_heap* new_home_hp = 0;

                do
                {
                    max_hp = org_hp;
                    max_hp_num = org_hp_num;
                    max_size = org_size + delta;
#ifdef HEAP_BALANCE_INSTRUMENTATION
                    proc_no = GCToOSInterface::GetCurrentProcessorNumber ();
                    if (proc_no != last_proc_no)
                    {
                        dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPSP: %d->%d", last_proc_no, proc_no));
                        multiple_procs_p = true;
                        last_proc_no = proc_no;
                    }

                    int current_hp_num = heap_select::proc_no_to_heap_no[proc_no];
                    acontext->set_home_heap (GCHeap::GetHeap (current_hp_num));
#else
                    acontext->set_home_heap (GCHeap::GetHeap (heap_select::select_heap (acontext)));
#endif //HEAP_BALANCE_INSTRUMENTATION
                    new_home_hp = acontext->get_home_heap ()->pGenGCHeap;
                    if (org_hp == new_home_hp)
                        max_size = max_size + delta;

                    org_alloc_context_count = org_hp->alloc_context_count;
                    max_alloc_context_count = org_alloc_context_count;
                    if (max_alloc_context_count > 1)
                        max_size /= max_alloc_context_count;

                    int actual_start = start;
                    int actual_end = (end - 1);

                    for (int i = start; i < end; i++)
                    {
                        gc_heap* hp = GCHeap::GetHeap (i % n_heaps)->pGenGCHeap;
                        dd = hp->dynamic_data_of (0);
                        ptrdiff_t size = dd_new_allocation (dd);

                        if (hp == new_home_hp)
                        {
                            size = size + delta;
                        }
                        int hp_alloc_context_count = hp->alloc_context_count;

                        if (hp_alloc_context_count > 0)
                        {
                            size /= (hp_alloc_context_count + 1);
                        }
                        if (size > max_size)
                        {
#ifdef HEAP_BALANCE_INSTRUMENTATION
                            dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPorg h%d(%dmb), m h%d(%dmb)",
                                org_hp_num, (max_size / 1024 / 1024),
                                hp->heap_number, (size / 1024 / 1024)));
#endif //HEAP_BALANCE_INSTRUMENTATION

                            max_hp = hp;
                            max_size = size;
                            max_hp_num = max_hp->heap_number;
                            max_alloc_context_count = hp_alloc_context_count;
                        }
                    }
                }
                while (org_alloc_context_count != org_hp->alloc_context_count ||
                    max_alloc_context_count != max_hp->alloc_context_count);

                if ((max_hp == org_hp) && (end < finish))
                {
                    start = end; end = finish;
                    delta = local_delta * 2; // Make it twice as hard to balance to remote nodes on NUMA.
                    goto try_again;
                }

#ifdef HEAP_BALANCE_INSTRUMENTATION
                uint16_t ideal_proc_no_before_set_ideal = 0;
                GCToOSInterface::GetCurrentThreadIdealProc (&ideal_proc_no_before_set_ideal);
#endif //HEAP_BALANCE_INSTRUMENTATION

                if (max_hp != org_hp)
                {
                    final_alloc_hp_num = max_hp->heap_number;

                    org_hp->alloc_context_count--;
                    max_hp->alloc_context_count++;

                    acontext->set_alloc_heap (GCHeap::GetHeap (final_alloc_hp_num));
                    if (!gc_thread_no_affinitize_p)
                    {
                        uint16_t src_proc_no = heap_select::find_proc_no_from_heap_no (org_hp->heap_number);
                        uint16_t dst_proc_no = heap_select::find_proc_no_from_heap_no (max_hp->heap_number);

                        dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPSW! h%d(p%d)->h%d(p%d)",
                            org_hp_num, src_proc_no, final_alloc_hp_num, dst_proc_no));

#ifdef HEAP_BALANCE_INSTRUMENTATION
                        int current_proc_no_before_set_ideal = GCToOSInterface::GetCurrentProcessorNumber ();
                        if (current_proc_no_before_set_ideal != last_proc_no)
                        {
                            dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPSPa: %d->%d", last_proc_no, current_proc_no_before_set_ideal));
                            multiple_procs_p = true;
                        }
#endif //HEAP_BALANCE_INSTRUMENTATION

                        if (!GCToOSInterface::SetCurrentThreadIdealAffinity (src_proc_no, dst_proc_no))
                        {
                            dprintf (HEAP_BALANCE_TEMP_LOG, ("TEMPFailed to set the ideal processor for heap %d %d->%d",
                                org_hp->heap_number, (int)src_proc_no, (int)dst_proc_no));
                        }
#ifdef HEAP_BALANCE_INSTRUMENTATION
                        else
                        {
                            set_ideal_p = true;
                        }
#endif //HEAP_BALANCE_INSTRUMENTATION
                    }
                }

#ifdef HEAP_BALANCE_INSTRUMENTATION
                add_to_hb_numa (proc_no, ideal_proc_no_before_set_ideal,
                    final_alloc_hp_num, multiple_procs_p, alloc_count_p, set_ideal_p);
#endif //HEAP_BALANCE_INSTRUMENTATION
            }
        }
    }
    acontext->alloc_count++;
}

ptrdiff_t gc_heap::get_balance_heaps_loh_effective_budget ()
{
    if (heap_hard_limit)
    {
        const ptrdiff_t free_list_space = generation_free_list_space (generation_of (max_generation + 1));
        heap_segment* seg = generation_start_segment (generation_of (max_generation + 1));
        assert (heap_segment_next (seg) == nullptr);
        const ptrdiff_t allocated = heap_segment_allocated (seg) - seg->mem;
        // We could calculate the actual end_of_seg_space by taking reserved - allocated,
        // but all heaps have the same reserved memory and this value is only used for comparison.
        return free_list_space - allocated;
    }
    else
    {
        return dd_new_allocation (dynamic_data_of (max_generation + 1));
    }
}

gc_heap* gc_heap::balance_heaps_loh (alloc_context* acontext, size_t alloc_size)
{
    const int home_hp_num = heap_select::select_heap(acontext);
    dprintf (3, ("[h%d] LA: %Id", home_hp_num, alloc_size));
    gc_heap* home_hp = GCHeap::GetHeap(home_hp_num)->pGenGCHeap;
    dynamic_data* dd = home_hp->dynamic_data_of (max_generation + 1);
    const ptrdiff_t home_hp_size = home_hp->get_balance_heaps_loh_effective_budget ();

    size_t delta = dd_min_size (dd) / 2;
    int start, end;
    heap_select::get_heap_range_for_heap(home_hp_num, &start, &end);
    const int finish = start + n_heaps;

try_again:
    gc_heap* max_hp = home_hp;
    ptrdiff_t max_size = home_hp_size + delta;

    dprintf (3, ("home hp: %d, max size: %d",
        home_hp_num,
        max_size));

    for (int i = start; i < end; i++)
    {
        gc_heap* hp = GCHeap::GetHeap(i%n_heaps)->pGenGCHeap;
        const ptrdiff_t size = hp->get_balance_heaps_loh_effective_budget ();

        dprintf (3, ("hp: %d, size: %d", hp->heap_number, size));
        if (size > max_size)
        {
            max_hp = hp;
            max_size = size;
            dprintf (3, ("max hp: %d, max size: %d",
                max_hp->heap_number,
                max_size));
        }
    }

    if ((max_hp == home_hp) && (end < finish))
    {
        start = end; end = finish;
        delta = dd_min_size (dd) * 3 / 2; // Make it harder to balance to remote nodes on NUMA.
        goto try_again;
    }

    if (max_hp != home_hp)
    {
        dprintf (3, ("loh: %d(%Id)->%d(%Id)",
            home_hp->heap_number, dd_new_allocation (home_hp->dynamic_data_of (max_generation + 1)),
            max_hp->heap_number, dd_new_allocation (max_hp->dynamic_data_of (max_generation + 1))));
    }

    return max_hp;
}

gc_heap* gc_heap::balance_heaps_loh_hard_limit_retry (alloc_context* acontext, size_t alloc_size)
{
    assert (heap_hard_limit);
    const int home_heap = heap_select::select_heap(acontext);
    dprintf (3, ("[h%d] balance_heaps_loh_hard_limit_retry alloc_size: %d", home_heap, alloc_size));
    int start, end;
    heap_select::get_heap_range_for_heap (home_heap, &start, &end);
    const int finish = start + n_heaps;

    gc_heap* max_hp = nullptr;
    size_t max_end_of_seg_space = alloc_size; // Must be more than this much, or return NULL

try_again:
    {
        for (int i = start; i < end; i++)
        {
            gc_heap* hp = GCHeap::GetHeap (i%n_heaps)->pGenGCHeap;
            heap_segment* seg = generation_start_segment (hp->generation_of (max_generation + 1));
            // With a hard limit, there is only one segment.
            assert (heap_segment_next (seg) == nullptr);
            const size_t end_of_seg_space = heap_segment_reserved (seg) - heap_segment_allocated (seg);
            if (end_of_seg_space >= max_end_of_seg_space)
            {
                dprintf (3, ("Switching heaps in hard_limit_retry! To: [h%d], New end_of_seg_space: %d", hp->heap_number, end_of_seg_space));
                max_end_of_seg_space = end_of_seg_space;
                max_hp = hp;
            }
        }
    }

    // Only switch to a remote NUMA node if we didn't find space on this one.
    if ((max_hp == nullptr) && (end < finish))
    {
        start = end; end = finish;
        goto try_again;
    }

    return max_hp;
}
#endif //MULTIPLE_HEAPS

BOOL gc_heap::allocate_more_space(alloc_context* acontext, size_t size,
                                   uint32_t flags, int alloc_generation_number)
{
    allocation_state status = a_state_start;
    do
    {
#ifdef MULTIPLE_HEAPS
        if (alloc_generation_number == 0)
        {
            balance_heaps (acontext);
            status = acontext->get_alloc_heap()->pGenGCHeap->try_allocate_more_space (acontext, size, flags, alloc_generation_number);
        }
        else
        {
            gc_heap* alloc_heap;
            if (heap_hard_limit && (status == a_state_retry_allocate))
            {
                alloc_heap = balance_heaps_loh_hard_limit_retry (acontext, size);
                if (alloc_heap == nullptr)
                {
                    return false;
                }
            }
            else
            {
                alloc_heap = balance_heaps_loh (acontext, size);
            }

            status = alloc_heap->try_allocate_more_space (acontext, size, flags, alloc_generation_number);
            if (status == a_state_retry_allocate)
            {
                dprintf (3, ("LOH h%d alloc retry!", alloc_heap->heap_number));
            }
        }
#else
        status = try_allocate_more_space (acontext, size, flags, alloc_generation_number);
#endif //MULTIPLE_HEAPS
    }
    while (status == a_state_retry_allocate);

    return (status == a_state_can_allocate);
}

inline
CObjectHeader* gc_heap::allocate (size_t jsize, alloc_context* acontext, uint32_t flags)
{
    size_t size = Align (jsize);
    assert (size >= Align (min_obj_size));
    {
    retry:
        uint8_t*  result = acontext->alloc_ptr;
        acontext->alloc_ptr+=size;
        if (acontext->alloc_ptr <= acontext->alloc_limit)
        {
            CObjectHeader* obj = (CObjectHeader*)result;
            assert (obj != 0);
            return obj;
        }
        else
        {
            acontext->alloc_ptr -= size;

#ifdef _MSC_VER
#pragma inline_depth(0)
#endif //_MSC_VER

            if (! allocate_more_space (acontext, size, flags, 0))
                return 0;

#ifdef _MSC_VER
#pragma inline_depth(20)
#endif //_MSC_VER

            goto retry;
        }
    }
}

void  gc_heap::leave_allocation_segment (generation* gen)
{
    adjust_limit (0, 0, gen, max_generation);
}

void gc_heap::init_free_and_plug()
{
#ifdef FREE_USAGE_STATS
    for (int i = 0; i <= settings.condemned_generation; i++)
    {
        generation* gen = generation_of (i);
        memset (gen->gen_free_spaces, 0, sizeof (gen->gen_free_spaces));
        memset (gen->gen_plugs, 0, sizeof (gen->gen_plugs));
        memset (gen->gen_current_pinned_free_spaces, 0, sizeof (gen->gen_current_pinned_free_spaces));
    }

    if (settings.condemned_generation != max_generation)
    {
        for (int i = (settings.condemned_generation + 1); i <= max_generation; i++)
        {
            generation* gen = generation_of (i);
            memset (gen->gen_plugs, 0, sizeof (gen->gen_plugs));
        }
    }
#endif //FREE_USAGE_STATS
}

void gc_heap::print_free_and_plug (const char* msg)
{
#if defined(FREE_USAGE_STATS) && defined(SIMPLE_DPRINTF)
    int older_gen = ((settings.condemned_generation == max_generation) ? max_generation : (settings.condemned_generation + 1));
    for (int i = 0; i <= older_gen; i++)
    {
        generation* gen = generation_of (i);
        for (int j = 0; j < NUM_GEN_POWER2; j++)
        {
            if ((gen->gen_free_spaces[j] != 0) || (gen->gen_plugs[j] != 0))
            {
                dprintf (2, ("[%s][h%d][%s#%d]gen%d: 2^%d: F: %Id, P: %Id",
                    msg,
                    heap_number,
                    (settings.concurrent ? "BGC" : "GC"),
                    settings.gc_index,
                    i,
                    (j + 9), gen->gen_free_spaces[j], gen->gen_plugs[j]));
            }
        }
    }
#else
    UNREFERENCED_PARAMETER(msg);
#endif //FREE_USAGE_STATS && SIMPLE_DPRINTF
}

void gc_heap::add_gen_plug (int gen_number, size_t plug_size)
{
#ifdef FREE_USAGE_STATS
    dprintf (3, ("adding plug size %Id to gen%d", plug_size, gen_number));
    generation* gen = generation_of (gen_number);
    size_t sz = BASE_GEN_SIZE;
    int i = 0;

    for (; i < NUM_GEN_POWER2; i++)
    {
        if (plug_size < sz)
        {
            break;
        }
        sz = sz * 2;
    }

    (gen->gen_plugs[i])++;
#else
    UNREFERENCED_PARAMETER(gen_number);
    UNREFERENCED_PARAMETER(plug_size);
#endif //FREE_USAGE_STATS
}

void gc_heap::add_item_to_current_pinned_free (int gen_number, size_t free_size)
{
#ifdef FREE_USAGE_STATS
    generation* gen = generation_of (gen_number);
    size_t sz = BASE_GEN_SIZE;
    int i = 0;

    for (; i < NUM_GEN_POWER2; i++)
    {
        if (free_size < sz)
        {
            break;
        }
        sz = sz * 2;
    }

    (gen->gen_current_pinned_free_spaces[i])++;
    generation_pinned_free_obj_space (gen) += free_size;
    dprintf (3, ("left pin free %Id(2^%d) to gen%d, total %Id bytes (%Id)",
        free_size, (i + 10), gen_number,
        generation_pinned_free_obj_space (gen),
        gen->gen_current_pinned_free_spaces[i]));
#else
    UNREFERENCED_PARAMETER(gen_number);
    UNREFERENCED_PARAMETER(free_size);
#endif //FREE_USAGE_STATS
}

void gc_heap::add_gen_free (int gen_number, size_t free_size)
{
#ifdef FREE_USAGE_STATS
    dprintf (3, ("adding free size %Id to gen%d", free_size, gen_number));
    generation* gen = generation_of (gen_number);
    size_t sz = BASE_GEN_SIZE;
    int i = 0;

    for (; i < NUM_GEN_POWER2; i++)
    {
        if (free_size < sz)
        {
            break;
        }
        sz = sz * 2;
    }

    (gen->gen_free_spaces[i])++;
#else
    UNREFERENCED_PARAMETER(gen_number);
    UNREFERENCED_PARAMETER(free_size);
#endif //FREE_USAGE_STATS
}

void gc_heap::remove_gen_free (int gen_number, size_t free_size)
{
#ifdef FREE_USAGE_STATS
    dprintf (3, ("removing free %Id from gen%d", free_size, gen_number));
    generation* gen = generation_of (gen_number);
    size_t sz = BASE_GEN_SIZE;
    int i = 0;

    for (; i < NUM_GEN_POWER2; i++)
    {
        if (free_size < sz)
        {
            break;
        }
        sz = sz * 2;
    }

    (gen->gen_free_spaces[i])--;
#else
    UNREFERENCED_PARAMETER(gen_number);
    UNREFERENCED_PARAMETER(free_size);
#endif //FREE_USAGE_STATS
}

uint8_t* gc_heap::allocate_in_older_generation (generation* gen, size_t size,
                                             int from_gen_number,
                                             uint8_t* old_loc REQD_ALIGN_AND_OFFSET_DCL)
{
    size = Align (size);
    assert (size >= Align (min_obj_size));
    assert (from_gen_number < max_generation);
    assert (from_gen_number >= 0);
    assert (generation_of (from_gen_number + 1) == gen);

    allocator* gen_allocator = generation_allocator (gen);
    BOOL discard_p = gen_allocator->discard_if_no_fit_p ();
    int pad_in_front = ((old_loc != 0) && ((from_gen_number+1) != max_generation)) ? USE_PADDING_FRONT : 0;

    size_t real_size = size + Align (min_obj_size);
    if (pad_in_front)
        real_size += Align (min_obj_size);

    if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
                       generation_allocation_limit (gen), old_loc, USE_PADDING_TAIL | pad_in_front)))
    {
        size_t sz_list = gen_allocator->first_bucket_size();
        for (unsigned int a_l_idx = 0; a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++)
        {
            if ((real_size < (sz_list / 2)) || (a_l_idx == (gen_allocator->number_of_buckets()-1)))
            {
                uint8_t* free_list = gen_allocator->alloc_list_head_of (a_l_idx);
                uint8_t* prev_free_item = 0;
                while (free_list != 0)
                {
                    dprintf (3, ("considering free list %Ix", (size_t)free_list));

                    size_t free_list_size = unused_array_size (free_list);

                    if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, free_list, (free_list + free_list_size),
                                    old_loc, USE_PADDING_TAIL | pad_in_front))
                    {
                        dprintf (4, ("F:%Ix-%Id",
                                     (size_t)free_list, free_list_size));

                        gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, !discard_p);
                        generation_free_list_space (gen) -= free_list_size;
                        remove_gen_free (gen->gen_num, free_list_size);

                        adjust_limit (free_list, free_list_size, gen, from_gen_number+1);
                        generation_allocate_end_seg_p (gen) = FALSE;
                        goto finished;
                    }
                    // We do first fit on bucket 0 because we are not guaranteed to find a fit there.
                    else if (discard_p || (a_l_idx == 0))
                    {
                        dprintf (3, ("couldn't use this free area, discarding"));
                        generation_free_obj_space (gen) += free_list_size;

                        gen_allocator->unlink_item (a_l_idx, free_list, prev_free_item, FALSE);
                        generation_free_list_space (gen) -= free_list_size;
                        remove_gen_free (gen->gen_num, free_list_size);
                    }
                    else
                    {
                        prev_free_item = free_list;
                    }
                    free_list = free_list_slot (free_list);
                }
            }
            sz_list = sz_list * 2;
        }
        //go back to the beginning of the segment list
        heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
        if (seg != generation_allocation_segment (gen))
        {
            leave_allocation_segment (gen);
            generation_allocation_segment (gen) = seg;
        }
        while (seg != ephemeral_heap_segment)
        {
            if (size_fit_p(size REQD_ALIGN_AND_OFFSET_ARG, heap_segment_plan_allocated (seg),
                           heap_segment_committed (seg), old_loc, USE_PADDING_TAIL | pad_in_front))
            {
                dprintf (3, ("using what's left in committed"));
                adjust_limit (heap_segment_plan_allocated (seg),
                              heap_segment_committed (seg) -
                              heap_segment_plan_allocated (seg),
                              gen, from_gen_number+1);
                generation_allocate_end_seg_p (gen) = TRUE;
                // dformat (t, 3, "Expanding segment allocation");
                heap_segment_plan_allocated (seg) =
                    heap_segment_committed (seg);
                goto finished;
            }
            else
            {
                if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, heap_segment_plan_allocated (seg),
                                heap_segment_reserved (seg), old_loc, USE_PADDING_TAIL | pad_in_front) &&
                    grow_heap_segment (seg, heap_segment_plan_allocated (seg), old_loc, size, pad_in_front REQD_ALIGN_AND_OFFSET_ARG))
                {
                    dprintf (3, ("using what's left in reserved"));
                    adjust_limit (heap_segment_plan_allocated (seg),
                                  heap_segment_committed (seg) -
                                  heap_segment_plan_allocated (seg),
                                  gen, from_gen_number+1);
                    generation_allocate_end_seg_p (gen) = TRUE;
                    heap_segment_plan_allocated (seg) =
                        heap_segment_committed (seg);

                    goto finished;
                }
                else
                {
                    leave_allocation_segment (gen);
                    heap_segment*   next_seg = heap_segment_next_rw (seg);
                    if (next_seg)
                    {
                        dprintf (3, ("getting next segment"));
                        generation_allocation_segment (gen) = next_seg;
                        generation_allocation_pointer (gen) = heap_segment_mem (next_seg);
                        generation_allocation_limit (gen) = generation_allocation_pointer (gen);
                    }
                    else
                    {
                        size = 0;
                        goto finished;
                    }
                }
            }
            seg = generation_allocation_segment (gen);
        }
        //No need to fix the last region. Will be done later
        size = 0;
        goto finished;
    }
    finished:
    if (0 == size)
    {
        return 0;
    }
    else
    {
        uint8_t*  result = generation_allocation_pointer (gen);
        size_t pad = 0;

#ifdef SHORT_PLUGS
        if ((pad_in_front & USE_PADDING_FRONT) &&
            (((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))==0) ||
             ((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))>=DESIRED_PLUG_LENGTH)))
        {
            pad = Align (min_obj_size);
            set_plug_padded (old_loc);
        }
#endif //SHORT_PLUGS

#ifdef FEATURE_STRUCTALIGN
        _ASSERTE(!old_loc || alignmentOffset != 0);
        _ASSERTE(old_loc || requiredAlignment == DATA_ALIGNMENT);
        if (old_loc != 0)
        {
            size_t pad1 = ComputeStructAlignPad(result+pad, requiredAlignment, alignmentOffset);
            set_node_aligninfo (old_loc, requiredAlignment, pad1);
            pad += pad1;
        }
#else // FEATURE_STRUCTALIGN
        if (!((old_loc == 0) || same_large_alignment_p (old_loc, result+pad)))
        {
            pad += switch_alignment_size (is_plug_padded (old_loc));
            set_node_realigned (old_loc);
            dprintf (3, ("Allocation realignment old_loc: %Ix, new_loc:%Ix",
                         (size_t)old_loc, (size_t)(result+pad)));
            assert (same_large_alignment_p (result + pad, old_loc));
        }
#endif // FEATURE_STRUCTALIGN
        dprintf (3, ("Allocate %Id bytes", size));

        if ((old_loc == 0) || (pad != 0))
        {
            //allocating a non plug or a gap, so reset the start region
            generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
        }

        generation_allocation_pointer (gen) += size + pad;
        assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen));
        if (generation_allocate_end_seg_p (gen))
        {
            generation_end_seg_allocated (gen) += size;
        }
        else
        {
            generation_free_list_allocated (gen) += size;
        }
        generation_allocation_size (gen) += size;

        dprintf (3, ("aio: ptr: %Ix, limit: %Ix, sr: %Ix",
            generation_allocation_pointer (gen), generation_allocation_limit (gen),
            generation_allocation_context_start_region (gen)));

        return result + pad;;
    }
}

void gc_heap::repair_allocation_in_expanded_heap (generation* consing_gen)
{
    //make sure that every generation has a planned allocation start
    int  gen_number = max_generation - 1;
    while (gen_number>= 0)
    {
        generation* gen = generation_of (gen_number);
        if (0 == generation_plan_allocation_start (gen))
        {
            realloc_plan_generation_start (gen, consing_gen);

            assert (generation_plan_allocation_start (gen));
        }
        gen_number--;
    }

    // now we know the planned allocation size
    size_t  size = (generation_allocation_limit (consing_gen) - generation_allocation_pointer (consing_gen));
    heap_segment* seg = generation_allocation_segment (consing_gen);
    if (generation_allocation_limit (consing_gen) == heap_segment_plan_allocated (seg))
    {
        if (size != 0)
        {
            heap_segment_plan_allocated (seg) = generation_allocation_pointer (consing_gen);
        }
    }
    else
    {
        assert (settings.condemned_generation == max_generation);
        uint8_t* first_address = generation_allocation_limit (consing_gen);
        //look through the pinned plugs for relevant ones.
        //Look for the right pinned plug to start from.
        size_t mi = 0;
        mark* m = 0;
        while (mi != mark_stack_tos)
        {
            m = pinned_plug_of (mi);
            if ((pinned_plug (m) == first_address))
                break;
            else
                mi++;
        }
        assert (mi != mark_stack_tos);
        pinned_len (m) = size;
    }
}

//tododefrag optimize for new segment (plan_allocated == mem)
uint8_t* gc_heap::allocate_in_expanded_heap (generation* gen,
                                          size_t size,
                                          BOOL& adjacentp,
                                          uint8_t* old_loc,
#ifdef SHORT_PLUGS
                                          BOOL set_padding_on_saved_p,
                                          mark* pinned_plug_entry,
#endif //SHORT_PLUGS
                                          BOOL consider_bestfit,
                                          int active_new_gen_number
                                          REQD_ALIGN_AND_OFFSET_DCL)
{
    UNREFERENCED_PARAMETER(active_new_gen_number);
    dprintf (3, ("aie: P: %Ix, size: %Ix", old_loc, size));

    size = Align (size);
    assert (size >= Align (min_obj_size));
    int pad_in_front = ((old_loc != 0) && (active_new_gen_number != max_generation)) ? USE_PADDING_FRONT : 0;

    if (consider_bestfit && use_bestfit)
    {
        assert (bestfit_seg);
        dprintf (SEG_REUSE_LOG_1, ("reallocating 0x%Ix in expanded heap, size: %Id",
                    old_loc, size));
        return bestfit_seg->fit (old_loc,
                                 size REQD_ALIGN_AND_OFFSET_ARG);
    }

    heap_segment* seg = generation_allocation_segment (gen);

    if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
                       generation_allocation_limit (gen), old_loc,
                       ((generation_allocation_limit (gen) !=
                          heap_segment_plan_allocated (seg))? USE_PADDING_TAIL : 0) | pad_in_front)))
    {
        dprintf (3, ("aie: can't fit: ptr: %Ix, limit: %Ix", generation_allocation_pointer (gen),
            generation_allocation_limit (gen)));

        adjacentp = FALSE;
        uint8_t* first_address = (generation_allocation_limit (gen) ?
                               generation_allocation_limit (gen) :
                               heap_segment_mem (seg));
        assert (in_range_for_segment (first_address, seg));

        uint8_t* end_address   = heap_segment_reserved (seg);

        dprintf (3, ("aie: first_addr: %Ix, gen alloc limit: %Ix, end_address: %Ix",
            first_address, generation_allocation_limit (gen), end_address));

        size_t mi = 0;
        mark* m = 0;

        if (heap_segment_allocated (seg) != heap_segment_mem (seg))
        {
            assert (settings.condemned_generation == max_generation);
            //look through the pinned plugs for relevant ones.
            //Look for the right pinned plug to start from.
            while (mi != mark_stack_tos)
            {
                m = pinned_plug_of (mi);
                if ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address))
                {
                    dprintf (3, ("aie: found pin: %Ix", pinned_plug (m)));
                    break;
                }
                else
                    mi++;
            }
            if (mi != mark_stack_tos)
            {
                //fix old free list.
                size_t  hsize = (generation_allocation_limit (gen) - generation_allocation_pointer (gen));
                {
                    dprintf(3,("gc filling up hole"));
                    ptrdiff_t mi1 = (ptrdiff_t)mi;
                    while ((mi1 >= 0) &&
                           (pinned_plug (pinned_plug_of(mi1)) != generation_allocation_limit (gen)))
                    {
                        dprintf (3, ("aie: checking pin %Ix", pinned_plug (pinned_plug_of(mi1))));
                        mi1--;
                    }
                    if (mi1 >= 0)
                    {
                        size_t saved_pinned_len = pinned_len (pinned_plug_of(mi1));
                        pinned_len (pinned_plug_of(mi1)) = hsize;
                        dprintf (3, ("changing %Ix len %Ix->%Ix",
                            pinned_plug (pinned_plug_of(mi1)),
                            saved_pinned_len, pinned_len (pinned_plug_of(mi1))));
                    }
                }
            }
        }
        else
        {
            assert (generation_allocation_limit (gen) ==
                    generation_allocation_pointer (gen));
            mi = mark_stack_tos;
        }

        while ((mi != mark_stack_tos) && in_range_for_segment (pinned_plug (m), seg))
        {
            size_t len = pinned_len (m);
            uint8_t*  free_list = (pinned_plug (m) - len);
            dprintf (3, ("aie: testing free item: %Ix->%Ix(%Ix)",
                free_list, (free_list + len), len));
            if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, free_list, (free_list + len), old_loc, USE_PADDING_TAIL | pad_in_front))
            {
                dprintf (3, ("aie: Found adequate unused area: %Ix, size: %Id",
                            (size_t)free_list, len));
                {
                    generation_allocation_pointer (gen) = free_list;
                    generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
                    generation_allocation_limit (gen) = (free_list + len);
                }
                goto allocate_in_free;
            }
            mi++;
            m = pinned_plug_of (mi);
        }

        //switch to the end of the segment.
        generation_allocation_pointer (gen) = heap_segment_plan_allocated (seg);
        generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
        heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
        generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
        dprintf (3, ("aie: switching to end of seg: %Ix->%Ix(%Ix)",
            generation_allocation_pointer (gen), generation_allocation_limit (gen),
            (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));

        if (!size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
                         generation_allocation_limit (gen), old_loc, USE_PADDING_TAIL | pad_in_front))
        {
            dprintf (3, ("aie: ptr: %Ix, limit: %Ix, can't alloc", generation_allocation_pointer (gen),
                generation_allocation_limit (gen)));
            assert (!"Can't allocate if no free space");
            return 0;
        }
    }
    else
    {
        adjacentp = TRUE;
    }

allocate_in_free:
    {
        uint8_t*  result = generation_allocation_pointer (gen);
        size_t pad = 0;

#ifdef SHORT_PLUGS
        if ((pad_in_front & USE_PADDING_FRONT) &&
            (((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))==0) ||
             ((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))>=DESIRED_PLUG_LENGTH)))

        {
            pad = Align (min_obj_size);
            set_padding_in_expand (old_loc, set_padding_on_saved_p, pinned_plug_entry);
        }
#endif //SHORT_PLUGS

#ifdef FEATURE_STRUCTALIGN
        _ASSERTE(!old_loc || alignmentOffset != 0);
        _ASSERTE(old_loc || requiredAlignment == DATA_ALIGNMENT);
        if (old_loc != 0)
        {
            size_t pad1 = ComputeStructAlignPad(result+pad, requiredAlignment, alignmentOffset);
            set_node_aligninfo (old_loc, requiredAlignment, pad1);
            pad += pad1;
            adjacentp = FALSE;
        }
#else // FEATURE_STRUCTALIGN
        if (!((old_loc == 0) || same_large_alignment_p (old_loc, result+pad)))
        {
            pad += switch_alignment_size (is_plug_padded (old_loc));
            set_node_realigned (old_loc);
            dprintf (3, ("Allocation realignment old_loc: %Ix, new_loc:%Ix",
                         (size_t)old_loc, (size_t)(result+pad)));
            assert (same_large_alignment_p (result + pad, old_loc));
            adjacentp = FALSE;
        }
#endif // FEATURE_STRUCTALIGN

        if ((old_loc == 0) || (pad != 0))
        {
            //allocating a non plug or a gap, so reset the start region
            generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
        }

        generation_allocation_pointer (gen) += size + pad;
        assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen));
        dprintf (3, ("Allocated in expanded heap %Ix:%Id", (size_t)(result+pad), size));

        dprintf (3, ("aie: ptr: %Ix, limit: %Ix, sr: %Ix",
            generation_allocation_pointer (gen), generation_allocation_limit (gen),
            generation_allocation_context_start_region (gen)));

        return result + pad;
    }
}

generation*  gc_heap::ensure_ephemeral_heap_segment (generation* consing_gen)
{
    heap_segment* seg = generation_allocation_segment (consing_gen);
    if (seg != ephemeral_heap_segment)
    {
        assert (generation_allocation_pointer (consing_gen)>= heap_segment_mem (seg));
        assert (generation_allocation_pointer (consing_gen)<= heap_segment_committed (seg));

        //fix the allocated size of the segment.
        heap_segment_plan_allocated (seg) = generation_allocation_pointer (consing_gen);

        generation* new_consing_gen = generation_of (max_generation - 1);
        generation_allocation_pointer (new_consing_gen) =
                heap_segment_mem (ephemeral_heap_segment);
        generation_allocation_limit (new_consing_gen) =
            generation_allocation_pointer (new_consing_gen);
        generation_allocation_context_start_region (new_consing_gen) =
            generation_allocation_pointer (new_consing_gen);
        generation_allocation_segment (new_consing_gen) = ephemeral_heap_segment;

        return new_consing_gen;
    }
    else
        return consing_gen;
}

uint8_t* gc_heap::allocate_in_condemned_generations (generation* gen,
                                                  size_t size,
                                                  int from_gen_number,
#ifdef SHORT_PLUGS
                                                  BOOL* convert_to_pinned_p,
                                                  uint8_t* next_pinned_plug,
                                                  heap_segment* current_seg,
#endif //SHORT_PLUGS
                                                  uint8_t* old_loc
                                                  REQD_ALIGN_AND_OFFSET_DCL)
{
    // Make sure that the youngest generation gap hasn't been allocated
    if (settings.promotion)
    {
        assert (generation_plan_allocation_start (youngest_generation) == 0);
    }

    size = Align (size);
    assert (size >= Align (min_obj_size));
    int to_gen_number = from_gen_number;
    if (from_gen_number != (int)max_generation)
    {
        to_gen_number = from_gen_number + (settings.promotion ? 1 : 0);
    }

    dprintf (3, ("aic gen%d: s: %Id", gen->gen_num, size));

    int pad_in_front = ((old_loc != 0) && (to_gen_number != max_generation)) ? USE_PADDING_FRONT : 0;

    if ((from_gen_number != -1) && (from_gen_number != (int)max_generation) && settings.promotion)
    {
        generation_condemned_allocated (generation_of (from_gen_number + (settings.promotion ? 1 : 0))) += size;
        generation_allocation_size (generation_of (from_gen_number + (settings.promotion ? 1 : 0))) += size;
    }
retry:
    {
        heap_segment* seg = generation_allocation_segment (gen);
        if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
                           generation_allocation_limit (gen), old_loc,
                           ((generation_allocation_limit (gen) != heap_segment_plan_allocated (seg))?USE_PADDING_TAIL:0)|pad_in_front)))
        {
            if ((! (pinned_plug_que_empty_p()) &&
                 (generation_allocation_limit (gen) ==
                  pinned_plug (oldest_pin()))))
            {
                size_t entry = deque_pinned_plug();
                mark* pinned_plug_entry = pinned_plug_of (entry);
                size_t len = pinned_len (pinned_plug_entry);
                uint8_t* plug = pinned_plug (pinned_plug_entry);
                set_new_pin_info (pinned_plug_entry, generation_allocation_pointer (gen));

#ifdef FREE_USAGE_STATS
                generation_allocated_in_pinned_free (gen) += generation_allocated_since_last_pin (gen);
                dprintf (3, ("allocated %Id so far within pin %Ix, total->%Id",
                    generation_allocated_since_last_pin (gen),
                    plug,
                    generation_allocated_in_pinned_free (gen)));
                generation_allocated_since_last_pin (gen) = 0;

                add_item_to_current_pinned_free (gen->gen_num, pinned_len (pinned_plug_of (entry)));
#endif //FREE_USAGE_STATS

                dprintf (3, ("mark stack bos: %Id, tos: %Id, aic: p %Ix len: %Ix->%Ix",
                    mark_stack_bos, mark_stack_tos, plug, len, pinned_len (pinned_plug_of (entry))));

                assert(mark_stack_array[entry].len == 0 ||
                       mark_stack_array[entry].len >= Align(min_obj_size));
                generation_allocation_pointer (gen) = plug + len;
                generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
                generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
                set_allocator_next_pin (gen);

                //Add the size of the pinned plug to the right pinned allocations
                //find out which gen this pinned plug came from
                int frgn = object_gennum (plug);
                if ((frgn != (int)max_generation) && settings.promotion)
                {
                    generation_pinned_allocation_sweep_size ((generation_of (frgn +1))) += len;
                    int togn = object_gennum_plan (plug);
                    if (frgn < togn)
                    {
                        generation_pinned_allocation_compact_size (generation_of (togn)) += len;
                    }
                }
                goto retry;
            }

            if (generation_allocation_limit (gen) != heap_segment_plan_allocated (seg))
            {
                generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
                dprintf (3, ("changed limit to plan alloc: %Ix", generation_allocation_limit (gen)));
            }
            else
            {
                if (heap_segment_plan_allocated (seg) != heap_segment_committed (seg))
                {
                    heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
                    generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
                    dprintf (3, ("changed limit to commit: %Ix", generation_allocation_limit (gen)));
                }
                else
                {
#ifndef RESPECT_LARGE_ALIGNMENT
                    assert (gen != youngest_generation);
#endif //RESPECT_LARGE_ALIGNMENT

                    if (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
                                    heap_segment_reserved (seg), old_loc, USE_PADDING_TAIL | pad_in_front) &&
                        (grow_heap_segment (seg, generation_allocation_pointer (gen), old_loc,
                                            size, pad_in_front REQD_ALIGN_AND_OFFSET_ARG)))
                    {
                        dprintf (3, ("Expanded segment allocation by committing more memory"));
                        heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
                        generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
                    }
                    else
                    {
                        heap_segment*   next_seg = heap_segment_next (seg);
                        assert (generation_allocation_pointer (gen)>=
                                heap_segment_mem (seg));
                        // Verify that all pinned plugs for this segment are consumed
                        if (!pinned_plug_que_empty_p() &&
                            ((pinned_plug (oldest_pin()) <
                              heap_segment_allocated (seg)) &&
                             (pinned_plug (oldest_pin()) >=
                              generation_allocation_pointer (gen))))
                        {
                            LOG((LF_GC, LL_INFO10, "remaining pinned plug %Ix while leaving segment on allocation",
                                         pinned_plug (oldest_pin())));
                            FATAL_GC_ERROR();
                        }
                        assert (generation_allocation_pointer (gen)>=
                                heap_segment_mem (seg));
                        assert (generation_allocation_pointer (gen)<=
                                heap_segment_committed (seg));
                        heap_segment_plan_allocated (seg) = generation_allocation_pointer (gen);

                        if (next_seg)
                        {
                            generation_allocation_segment (gen) = next_seg;
                            generation_allocation_pointer (gen) = heap_segment_mem (next_seg);
                            generation_allocation_limit (gen) = generation_allocation_pointer (gen);
                            generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
                        }
                        else
                        {
                            return 0; //should only happen during allocation of generation 0 gap
                            // in that case we are going to grow the heap anyway
                        }
                    }
                }
            }
            set_allocator_next_pin (gen);

            goto retry;
        }
    }

    {
        assert (generation_allocation_pointer (gen)>=
                heap_segment_mem (generation_allocation_segment (gen)));
        uint8_t* result = generation_allocation_pointer (gen);
        size_t pad = 0;
#ifdef SHORT_PLUGS
        if ((pad_in_front & USE_PADDING_FRONT) &&
            (((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))==0) ||
             ((generation_allocation_pointer (gen) - generation_allocation_context_start_region (gen))>=DESIRED_PLUG_LENGTH)))
        {
            ptrdiff_t dist = old_loc - result;
            if (dist == 0)
            {
                dprintf (3, ("old alloc: %Ix, same as new alloc, not padding", old_loc));
                pad = 0;
            }
            else
            {
                if ((dist > 0) && (dist < (ptrdiff_t)Align (min_obj_size)))
                {
                    dprintf (3, ("old alloc: %Ix, only %d bytes > new alloc! Shouldn't happen", old_loc, dist));
                    FATAL_GC_ERROR();
                }

                pad = Align (min_obj_size);
                set_plug_padded (old_loc);
            }
        }
#endif //SHORT_PLUGS
#ifdef FEATURE_STRUCTALIGN
        _ASSERTE(!old_loc || alignmentOffset != 0);
        _ASSERTE(old_loc || requiredAlignment == DATA_ALIGNMENT);
        if ((old_loc != 0))
        {
            size_t pad1 = ComputeStructAlignPad(result+pad, requiredAlignment, alignmentOffset);
            set_node_aligninfo (old_loc, requiredAlignment, pad1);
            pad += pad1;
        }
#else // FEATURE_STRUCTALIGN
        if (!((old_loc == 0) || same_large_alignment_p (old_loc, result+pad)))
        {
            pad += switch_alignment_size (is_plug_padded (old_loc));
            set_node_realigned(old_loc);
            dprintf (3, ("Allocation realignment old_loc: %Ix, new_loc:%Ix",
                         (size_t)old_loc, (size_t)(result+pad)));
            assert (same_large_alignment_p (result + pad, old_loc));
        }
#endif // FEATURE_STRUCTALIGN

#ifdef SHORT_PLUGS
        if ((next_pinned_plug != 0) && (pad != 0) && (generation_allocation_segment (gen) == current_seg))
        {
            assert (old_loc != 0);
            ptrdiff_t dist_to_next_pin = (ptrdiff_t)(next_pinned_plug - (generation_allocation_pointer (gen) + size + pad));
            assert (dist_to_next_pin >= 0);

            if ((dist_to_next_pin >= 0) && (dist_to_next_pin < (ptrdiff_t)Align (min_obj_size)))
            {
                dprintf (3, ("%Ix->(%Ix,%Ix),%Ix(%Ix)(%Ix),NP->PP",
                    old_loc,
                    generation_allocation_pointer (gen),
                    generation_allocation_limit (gen),
                    next_pinned_plug,
                    size,
                    dist_to_next_pin));
                clear_plug_padded (old_loc);
                pad = 0;
                *convert_to_pinned_p = TRUE;
                record_interesting_data_point (idp_converted_pin);

                return 0;
            }
        }
#endif //SHORT_PLUGS

        if ((old_loc == 0) || (pad != 0))
        {
            //allocating a non plug or a gap, so reset the start region
            generation_allocation_context_start_region (gen) = generation_allocation_pointer (gen);
        }

        generation_allocation_pointer (gen) += size + pad;
        assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen));

#ifdef FREE_USAGE_STATS
        generation_allocated_since_last_pin (gen) += size;
#endif //FREE_USAGE_STATS

        dprintf (3, ("aic: ptr: %Ix, limit: %Ix, sr: %Ix",
            generation_allocation_pointer (gen), generation_allocation_limit (gen),
            generation_allocation_context_start_region (gen)));

        assert (result + pad);
        return result + pad;
    }
}

inline int power (int x, int y)
{
    int z = 1;
    for (int i = 0; i < y; i++)
    {
        z = z*x;
    }
    return z;
}

int gc_heap::joined_generation_to_condemn (BOOL should_evaluate_elevation,
                                           int initial_gen,
                                           int current_gen,
                                           BOOL* blocking_collection_p
                                           STRESS_HEAP_ARG(int n_original))
{
#ifdef BGC_SERVO_TUNING
    if (settings.entry_memory_load == 0)
    {
        uint32_t current_memory_load = 0;
        uint64_t current_available_physical = 0;
        get_memory_info (&current_memory_load, &current_available_physical);

        settings.entry_memory_load = current_memory_load;
        settings.entry_available_physical_mem = current_available_physical;
    }
#endif //BGC_SERVO_TUNING

    int n = current_gen;
#ifdef MULTIPLE_HEAPS
    BOOL joined_last_gc_before_oom = FALSE;
    for (int i = 0; i < n_heaps; i++)
    {
        if (g_heaps[i]->last_gc_before_oom)
        {
            dprintf (GTC_LOG, ("h%d is setting blocking to TRUE", i));
            joined_last_gc_before_oom = TRUE;
            break;
        }
    }
#else
    BOOL joined_last_gc_before_oom = last_gc_before_oom;
#endif //MULTIPLE_HEAPS

    if (joined_last_gc_before_oom && settings.pause_mode != pause_low_latency)
    {
        assert (*blocking_collection_p);
    }

    if (should_evaluate_elevation && (n == max_generation))
    {
        dprintf (GTC_LOG, ("lock: %d(%d)",
            (settings.should_lock_elevation ? 1 : 0),
            settings.elevation_locked_count));

        if (settings.should_lock_elevation)
        {
            settings.elevation_locked_count++;
            if (settings.elevation_locked_count == 6)
            {
                settings.elevation_locked_count = 0;
            }
            else
            {
                n = max_generation - 1;
                settings.elevation_reduced = TRUE;
            }
        }
        else
        {
            settings.elevation_locked_count = 0;
        }
    }
    else
    {
        settings.should_lock_elevation = FALSE;
        settings.elevation_locked_count = 0;
    }

    if (provisional_mode_triggered && (n == max_generation))
    {
        // There are a few cases where we should not reduce the generation.
        if ((initial_gen == max_generation) || (settings.reason == reason_alloc_loh))
        {
            // If we are doing a full GC in the provisional mode, we always
            // make it blocking because we don't want to get into a situation
            // where foreground GCs are asking for a compacting full GC right away
            // and not getting it.
            dprintf (GTC_LOG, ("full GC induced, not reducing gen"));
            *blocking_collection_p = TRUE;
        }
        else if (should_expand_in_full_gc || joined_last_gc_before_oom)
        {
            dprintf (GTC_LOG, ("need full blocking GCs to expand heap or avoid OOM, not reducing gen"));
            assert (*blocking_collection_p);
        }
        else
        {
            dprintf (GTC_LOG, ("reducing gen in PM: %d->%d->%d", initial_gen, n, (max_generation - 1)));
            n = max_generation - 1;
        }
    }

    if (should_expand_in_full_gc)
    {
        should_expand_in_full_gc = FALSE;
    }

    if (heap_hard_limit)
    {
        // If we have already consumed 90% of the limit, we should check to see if we should compact LOH.
        // TODO: should unify this with gen2.
        dprintf (GTC_LOG, ("committed %Id is %d%% of limit %Id",
            current_total_committed, (int)((float)current_total_committed * 100.0 / (float)heap_hard_limit),
            heap_hard_limit));

        bool full_compact_gc_p = false;

        if (joined_last_gc_before_oom)
        {
            full_compact_gc_p = true;
        }
        else if ((current_total_committed * 10) >= (heap_hard_limit * 9))
        {
            size_t loh_frag = get_total_gen_fragmentation (max_generation + 1);

            // If the LOH frag is >= 1/8 it's worth compacting it
            if ((loh_frag * 8) >= heap_hard_limit)
            {
                dprintf (GTC_LOG, ("loh frag: %Id > 1/8 of limit %Id", loh_frag, (heap_hard_limit / 8)));
                full_compact_gc_p = true;
            }
            else
            {
                // If there's not much fragmentation but it looks like it'll be productive to
                // collect LOH, do that.
                size_t est_loh_reclaim = get_total_gen_estimated_reclaim (max_generation + 1);
                full_compact_gc_p = ((est_loh_reclaim * 8) >= heap_hard_limit);
                dprintf (GTC_LOG, ("loh est reclaim: %Id, 1/8 of limit %Id", est_loh_reclaim, (heap_hard_limit / 8)));
            }
        }

        if (full_compact_gc_p)
        {
            n = max_generation;
            *blocking_collection_p = TRUE;
            settings.loh_compaction = TRUE;
            dprintf (GTC_LOG, ("compacting LOH due to hard limit"));
        }
    }

#ifdef BGC_SERVO_TUNING
    if (bgc_tuning::should_trigger_ngc2())
    {
        n = max_generation;
        *blocking_collection_p = TRUE;
    }

    if ((n < max_generation) && !recursive_gc_sync::background_running_p() &&
        bgc_tuning::stepping_trigger (settings.entry_memory_load, get_current_gc_index (max_generation)))
    {
        n = max_generation;
        saved_bgc_tuning_reason = reason_bgc_stepping;
    }

    if ((n < max_generation) && bgc_tuning::should_trigger_bgc())
    {
        n = max_generation;
    }

    if (n == (max_generation - 1))
    {
        if (bgc_tuning::should_delay_alloc (max_generation))
        {
            n -= 1;
        }
    }
#endif //BGC_SERVO_TUNING

    if ((n == max_generation) && (*blocking_collection_p == FALSE))
    {
        // If we are doing a gen2 we should reset elevation regardless and let the gen2
        // decide if we should lock again or in the bgc case by design we will not retract
        // gen1 start.
        settings.should_lock_elevation = FALSE;
        settings.elevation_locked_count = 0;
        dprintf (1, ("doing bgc, reset elevation"));
    }

#ifdef STRESS_HEAP
#ifdef BACKGROUND_GC
    // We can only do Concurrent GC Stress if the caller did not explicitly ask for all
    // generations to be collected,
    //
    // [LOCALGC TODO] STRESS_HEAP is not defined for a standalone GC so there are multiple
    // things that need to be fixed in this code block.
    if (n_original != max_generation &&
        g_pConfig->GetGCStressLevel() && gc_can_use_concurrent)
    {
#ifndef FEATURE_REDHAWK
        // for the GC stress mix mode throttle down gen2 collections
        if (g_pConfig->IsGCStressMix())
        {
            size_t current_gc_count = 0;

#ifdef MULTIPLE_HEAPS
            current_gc_count = (size_t)dd_collection_count (g_heaps[0]->dynamic_data_of (0));
#else
            current_gc_count = (size_t)dd_collection_count (dynamic_data_of (0));
#endif //MULTIPLE_HEAPS
            // in gc stress, only escalate every 10th non-gen2 collection to a gen2...
            if ((current_gc_count % 10) == 0)
            {
                n = max_generation;
            }
        }
        // for traditional GC stress
        else if (*blocking_collection_p)
        {
            // We call StressHeap() a lot for Concurrent GC Stress. However,
            // if we can not do a concurrent collection, no need to stress anymore.
            // @TODO: Enable stress when the memory pressure goes down again
            GCStressPolicy::GlobalDisable();
        }
        else
#endif // !FEATURE_REDHAWK
        {
            n = max_generation;
        }
    }
#endif //BACKGROUND_GC
#endif //STRESS_HEAP

    return n;
}

inline
size_t get_survived_size (gc_history_per_heap* hist)
{
    size_t surv_size = 0;
    gc_generation_data* gen_data;

    for (int gen_number = 0; gen_number <= (max_generation + 1); gen_number++)
    {
        gen_data = &(hist->gen_data[gen_number]);
        surv_size += (gen_data->size_after -
                      gen_data->free_list_space_after -
                      gen_data->free_obj_space_after);
    }

    return surv_size;
}

size_t gc_heap::get_total_survived_size()
{
    size_t total_surv_size = 0;
#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < gc_heap::n_heaps; i++)
    {
        gc_heap* hp = gc_heap::g_heaps[i];
        gc_history_per_heap* current_gc_data_per_heap = hp->get_gc_data_per_heap();
        total_surv_size += get_survived_size (current_gc_data_per_heap);
    }
#else
    gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
    total_surv_size = get_survived_size (current_gc_data_per_heap);
#endif //MULTIPLE_HEAPS
    return total_surv_size;
}

size_t gc_heap::get_total_allocated_since_last_gc()
{
    size_t total_allocated_size = 0;
#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < gc_heap::n_heaps; i++)
    {
        gc_heap* hp = gc_heap::g_heaps[i];
        total_allocated_size += hp->allocated_since_last_gc;
        hp->allocated_since_last_gc = 0;
    }
#else
    total_allocated_size = allocated_since_last_gc;
    allocated_since_last_gc = 0;
#endif //MULTIPLE_HEAPS
    return total_allocated_size;
}

// Gets what's allocated on both SOH and LOH that hasn't been collected.
size_t gc_heap::get_current_allocated()
{
    dynamic_data* dd = dynamic_data_of (0);
    size_t current_alloc = dd_desired_allocation (dd) - dd_new_allocation (dd);
    dd = dynamic_data_of (max_generation + 1);
    current_alloc += dd_desired_allocation (dd) - dd_new_allocation (dd);

    return current_alloc;
}

size_t gc_heap::get_total_allocated()
{
    size_t total_current_allocated = 0;
#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < gc_heap::n_heaps; i++)
    {
        gc_heap* hp = gc_heap::g_heaps[i];
        total_current_allocated += hp->get_current_allocated();
    }
#else
    total_current_allocated = get_current_allocated();
#endif //MULTIPLE_HEAPS
    return total_current_allocated;
}

#ifdef BGC_SERVO_TUNING
size_t gc_heap::get_total_generation_size (int gen_number)
{
    size_t total_generation_size = 0;
#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < gc_heap::n_heaps; i++)
    {
        gc_heap* hp = gc_heap::g_heaps[i];
#else //MULTIPLE_HEAPS
    {
        gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS

        total_generation_size += hp->generation_size (gen_number);
    }
    return total_generation_size;
}

// gets all that's allocated into the gen. This is only used for gen2/3
// for servo tuning.
size_t gc_heap::get_total_servo_alloc (int gen_number)
{
    size_t total_alloc = 0;
    bool loh_p = (gen_number == (max_generation + 1));

#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < gc_heap::n_heaps; i++)
    {
        gc_heap* hp = gc_heap::g_heaps[i];
#else //MULTIPLE_HEAPS
    {
        gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS
        generation* gen = hp->generation_of (gen_number);
        total_alloc += generation_free_list_allocated (gen);
        total_alloc += generation_end_seg_allocated (gen);
        total_alloc += generation_condemned_allocated (gen);
        total_alloc += generation_sweep_allocated (gen);
    }

    return total_alloc;
}

size_t gc_heap::get_total_bgc_promoted()
{
    size_t total_bgc_promoted = 0;
#ifdef MULTIPLE_HEAPS
    int num_heaps = gc_heap::n_heaps;
#else //MULTIPLE_HEAPS
    int num_heaps = 1;
#endif //MULTIPLE_HEAPS

    for (int i = 0; i < num_heaps; i++)
    {
        total_bgc_promoted += bpromoted_bytes (i);
    }
    return total_bgc_promoted;
}

// This is called after compute_new_dynamic_data is called, at which point
// dd_current_size is calculated.
size_t gc_heap::get_total_surv_size (int gen_number)
{
    size_t total_surv_size = 0;
#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < gc_heap::n_heaps; i++)
    {
        gc_heap* hp = gc_heap::g_heaps[i];
#else //MULTIPLE_HEAPS
    {
        gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS
        total_surv_size += dd_current_size (hp->dynamic_data_of (gen_number));
    }
    return total_surv_size;
}

size_t gc_heap::get_total_begin_data_size (int gen_number)
{
    size_t total_begin_data_size = 0;
#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < gc_heap::n_heaps; i++)
    {
        gc_heap* hp = gc_heap::g_heaps[i];
#else //MULTIPLE_HEAPS
    {
        gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS

        total_begin_data_size += dd_begin_data_size (hp->dynamic_data_of (gen_number));
    }
    return total_begin_data_size;
}

size_t gc_heap::get_total_generation_fl_size (int gen_number)
{
    size_t total_generation_fl_size = 0;
#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < gc_heap::n_heaps; i++)
    {
        gc_heap* hp = gc_heap::g_heaps[i];
#else //MULTIPLE_HEAPS
    {
        gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS
        total_generation_fl_size += generation_free_list_space (hp->generation_of (gen_number));
    }
    return total_generation_fl_size;
}

size_t gc_heap::get_current_gc_index (int gen_number)
{
#ifdef MULTIPLE_HEAPS
    gc_heap* hp = gc_heap::g_heaps[0];
    return dd_collection_count (hp->dynamic_data_of (gen_number));
#else
    return dd_collection_count (dynamic_data_of (gen_number));
#endif //MULTIPLE_HEAPS
}
#endif //BGC_SERVO_TUNING

size_t gc_heap::current_generation_size (int gen_number)
{
    dynamic_data* dd = dynamic_data_of (gen_number);
    size_t gen_size = (dd_current_size (dd) + dd_desired_allocation (dd)
                        - dd_new_allocation (dd));

    return gen_size;
}

#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable:6326) // "Potential comparison of a constant with another constant" is intentional in this function.
#endif //_PREFAST_

/*
    This is called by when we are actually doing a GC, or when we are just checking whether
    we would do a full blocking GC, in which case check_only_p is TRUE.

    The difference between calling this with check_only_p TRUE and FALSE is that when it's
    TRUE:
            settings.reason is ignored
            budgets are not checked (since they are checked before this is called)
            it doesn't change anything non local like generation_skip_ratio
*/
int gc_heap::generation_to_condemn (int n_initial,
                                    BOOL* blocking_collection_p,
                                    BOOL* elevation_requested_p,
                                    BOOL check_only_p)
{
    gc_mechanisms temp_settings = settings;
    gen_to_condemn_tuning temp_condemn_reasons;
    gc_mechanisms* local_settings = (check_only_p ? &temp_settings : &settings);
    gen_to_condemn_tuning* local_condemn_reasons = (check_only_p ? &temp_condemn_reasons : &gen_to_condemn_reasons);
    if (!check_only_p)
    {
        if ((local_settings->reason == reason_oos_soh) || (local_settings->reason == reason_oos_loh))
        {
            assert (n_initial >= 1);
        }

        assert (settings.reason != reason_empty);
    }

    local_condemn_reasons->init();

    int n = n_initial;
    int n_alloc = n;
    if (heap_number == 0)
    {
        dprintf (GTC_LOG, ("init: %d(%d)", n_initial, settings.reason));
    }
    int i = 0;
    int temp_gen = 0;
    BOOL low_memory_detected = g_low_memory_status;
    uint32_t memory_load = 0;
    uint64_t available_physical = 0;
    uint64_t available_page_file = 0;
    BOOL check_memory = FALSE;
    BOOL high_fragmentation  = FALSE;
    BOOL v_high_memory_load  = FALSE;
    BOOL high_memory_load    = FALSE;
    BOOL low_ephemeral_space = FALSE;
    BOOL evaluate_elevation  = TRUE;
    *elevation_requested_p   = FALSE;
    *blocking_collection_p   = FALSE;

    BOOL check_max_gen_alloc = TRUE;

#ifdef STRESS_HEAP
    int orig_gen = n;
#endif //STRESS_HEAP

    if (!check_only_p)
    {
        dd_fragmentation (dynamic_data_of (0)) =
            generation_free_list_space (youngest_generation) +
            generation_free_obj_space (youngest_generation);

        dd_fragmentation (dynamic_data_of (max_generation + 1)) =
            generation_free_list_space (large_object_generation) +
            generation_free_obj_space (large_object_generation);

        //save new_allocation
        for (i = 0; i <= max_generation + 1; i++)
        {
            dynamic_data* dd = dynamic_data_of (i);
            dprintf (GTC_LOG, ("h%d: g%d: l: %Id (%Id)",
                heap_number, i,
                dd_new_allocation (dd),
                dd_desired_allocation (dd)));
            dd_gc_new_allocation (dd) = dd_new_allocation (dd);
        }

        local_condemn_reasons->set_gen (gen_initial, n);
        temp_gen = n;

#ifdef BACKGROUND_GC
        if (recursive_gc_sync::background_running_p()
#ifdef BGC_SERVO_TUNING
            || bgc_tuning::fl_tuning_triggered
            || (bgc_tuning::enable_fl_tuning && bgc_tuning::use_stepping_trigger_p)
#endif //BGC_SERVO_TUNING
            )
        {
            check_max_gen_alloc = FALSE;
        }
#endif //BACKGROUND_GC

        if (check_max_gen_alloc)
        {
            //figure out if large objects need to be collected.
            if (get_new_allocation (max_generation+1) <= 0)
            {
                n = max_generation;
                local_condemn_reasons->set_gen (gen_alloc_budget, n);
                dprintf (BGC_TUNING_LOG, ("BTL[GTC]: trigger based on gen%d b: %Id",
                         (max_generation + 1),
                         get_new_allocation (max_generation+1)));
            }
        }

        //figure out which generation ran out of allocation
        for (i = n+1; i <= (check_max_gen_alloc ? max_generation : (max_generation - 1)); i++)
        {
            if (get_new_allocation (i) <= 0)
            {
                n = i;
                if (n == max_generation)
                {
                    dprintf (BGC_TUNING_LOG, ("BTL[GTC]: trigger based on gen2 b: %Id",
                            get_new_allocation (max_generation)));
                }
            }
            else
                break;
        }
    }

    if (n > temp_gen)
    {
        local_condemn_reasons->set_gen (gen_alloc_budget, n);
    }

    dprintf (GTC_LOG, ("h%d: g%d budget", heap_number, ((get_new_allocation (max_generation+1) <= 0) ? 3 : n)));

    n_alloc = n;

#if defined(BACKGROUND_GC) && !defined(MULTIPLE_HEAPS)
    //time based tuning
    // if enough time has elapsed since the last gc
    // and the number of gc is too low (1/10 of lower gen) then collect
    // This should also be enabled if we have memory concerns
    int n_time_max = max_generation;

    if (!check_only_p)
    {
        if (!check_max_gen_alloc)
        {
            n_time_max = max_generation - 1;
        }
    }

    if ((local_settings->pause_mode == pause_interactive) ||
        (local_settings->pause_mode == pause_sustained_low_latency))
    {
        dynamic_data* dd0 = dynamic_data_of (0);
        size_t now = GetHighPrecisionTimeStamp();
        temp_gen = n;
        for (i = (temp_gen+1); i <= n_time_max; i++)
        {
            dynamic_data* dd = dynamic_data_of (i);
            if ((now > dd_time_clock(dd) + dd_time_clock_interval(dd)) &&
                (dd_gc_clock (dd0) > (dd_gc_clock (dd) + dd_gc_clock_interval(dd))) &&
                ((n < max_generation) || ((dd_current_size (dd) < dd_max_size (dd0)))))
            {
                n = min (i, n_time_max);
                dprintf (GTC_LOG, ("time %d", n));
            }
        }
        if (n > temp_gen)
        {
            local_condemn_reasons->set_gen (gen_time_tuning, n);
            if (n == max_generation)
            {
                dprintf (BGC_TUNING_LOG, ("BTL[GTC]: trigger based on time"));
            }
        }
    }

    if (n != n_alloc)
    {
        dprintf (GTC_LOG, ("Condemning %d based on time tuning and fragmentation", n));
    }
#endif //BACKGROUND_GC && !MULTIPLE_HEAPS

    if (n < (max_generation - 1))
    {
        if (dt_low_card_table_efficiency_p (tuning_deciding_condemned_gen))
        {
            n = max (n, max_generation - 1);
            local_settings->promotion = TRUE;
            dprintf (GTC_LOG, ("h%d: skip %d, c %d",
                        heap_number, generation_skip_ratio, n));
            local_condemn_reasons->set_condition (gen_low_card_p);
        }
    }

    if (!check_only_p)
    {
        generation_skip_ratio = 100;
    }

    if (dt_low_ephemeral_space_p (check_only_p ?
                                  tuning_deciding_full_gc :
                                  tuning_deciding_condemned_gen))
    {
        low_ephemeral_space = TRUE;

        n = max (n, max_generation - 1);
        local_condemn_reasons->set_condition (gen_low_ephemeral_p);
        dprintf (GTC_LOG, ("h%d: low eph", heap_number));

        if (!provisional_mode_triggered)
        {
#ifdef BACKGROUND_GC
            if (!gc_can_use_concurrent || (generation_free_list_space (generation_of (max_generation)) == 0))
#endif //BACKGROUND_GC
            {
                //It is better to defragment first if we are running out of space for
                //the ephemeral generation but we have enough fragmentation to make up for it
                //in the non ephemeral generation. Essentially we are trading a gen2 for
                // having to expand heap in ephemeral collections.
                if (dt_high_frag_p (tuning_deciding_condemned_gen,
                                    max_generation - 1,
                                    TRUE))
                {
                    high_fragmentation = TRUE;
                    local_condemn_reasons->set_condition (gen_max_high_frag_e_p);
                    dprintf (GTC_LOG, ("heap%d: gen1 frag", heap_number));
                }
            }
        }
    }

    //figure out which ephemeral generation is too fragmented
    temp_gen = n;
    for (i = n+1; i < max_generation; i++)
    {
        if (dt_high_frag_p (tuning_deciding_condemned_gen, i))
        {
            dprintf (GTC_LOG, ("h%d g%d too frag", heap_number, i));
            n = i;
        }
        else
            break;
    }

    if (low_ephemeral_space)
    {
        //enable promotion
        local_settings->promotion = TRUE;
    }

    if (n > temp_gen)
    {
        local_condemn_reasons->set_condition (gen_eph_high_frag_p);
    }

    if (!check_only_p)
    {
        if (settings.pause_mode == pause_low_latency)
        {
            if (!is_induced (settings.reason))
            {
                n = min (n, max_generation - 1);
                dprintf (GTC_LOG, ("low latency mode is enabled, condemning %d", n));
                evaluate_elevation = FALSE;
                goto exit;
            }
        }
    }

    // It's hard to catch when we get to the point that the memory load is so high
    // we get an induced GC from the finalizer thread so we are checking the memory load
    // for every gen0 GC.
    check_memory = (check_only_p ?
                    (n >= 0) :
                    ((n >= 1) || low_memory_detected));

    if (check_memory)
    {
        //find out if we are short on memory
        get_memory_info (&memory_load, &available_physical, &available_page_file);
        if (heap_number == 0)
        {
            dprintf (GTC_LOG, ("ml: %d", memory_load));
        }

        // Need to get it early enough for all heaps to use.
        local_settings->entry_available_physical_mem = available_physical;
        local_settings->entry_memory_load = memory_load;

        // @TODO: Force compaction more often under GCSTRESS
        if (memory_load >= high_memory_load_th || low_memory_detected)
        {
#ifdef SIMPLE_DPRINTF
            // stress log can't handle any parameter that's bigger than a void*.
            if (heap_number == 0)
            {
                dprintf (GTC_LOG, ("tp: %I64d, ap: %I64d", total_physical_mem, available_physical));
            }
#endif //SIMPLE_DPRINTF

            high_memory_load = TRUE;

            if (memory_load >= v_high_memory_load_th || low_memory_detected)
            {
                // TODO: Perhaps in 64-bit we should be estimating gen1's fragmentation as well since
                // gen1/gen0 may take a lot more memory than gen2.
                if (!high_fragmentation)
                {
                    high_fragmentation = dt_estimate_reclaim_space_p (tuning_deciding_condemned_gen, max_generation);
                }
                v_high_memory_load = TRUE;
            }
            else
            {
                if (!high_fragmentation)
                {
                    high_fragmentation = dt_estimate_high_frag_p (tuning_deciding_condemned_gen, max_generation, available_physical);
                }
            }

            if (high_fragmentation)
            {
                if (high_memory_load)
                {
                    local_condemn_reasons->set_condition (gen_max_high_frag_m_p);
                }
                else if (v_high_memory_load)
                {
                    local_condemn_reasons->set_condition (gen_max_high_frag_vm_p);
                }
            }
        }
    }

    dprintf (GTC_LOG, ("h%d: le: %d, hm: %d, vm: %d, f: %d",
                 heap_number, low_ephemeral_space, high_memory_load, v_high_memory_load,
                 high_fragmentation));

    if (should_expand_in_full_gc)
    {
        dprintf (GTC_LOG, ("h%d: expand_in_full - BLOCK", heap_number));
        *blocking_collection_p = TRUE;
        evaluate_elevation = FALSE;
        n = max_generation;
        local_condemn_reasons->set_condition (gen_expand_fullgc_p);
    }

    if (last_gc_before_oom)
    {
        dprintf (GTC_LOG, ("h%d: alloc full - BLOCK", heap_number));
        n = max_generation;
        *blocking_collection_p = TRUE;

        if ((local_settings->reason == reason_oos_loh) ||
            (local_settings->reason == reason_alloc_loh))
        {
            evaluate_elevation = FALSE;
        }

        local_condemn_reasons->set_condition (gen_before_oom);
    }

    if (!check_only_p)
    {
        if (is_induced_blocking (settings.reason) &&
            n_initial == max_generation
            IN_STRESS_HEAP( && !settings.stress_induced ))
        {
            if (heap_number == 0)
            {
                dprintf (GTC_LOG, ("induced - BLOCK"));
            }

            *blocking_collection_p = TRUE;
            local_condemn_reasons->set_condition (gen_induced_fullgc_p);
            evaluate_elevation = FALSE;
        }

        if (settings.reason == reason_induced_noforce)
        {
            local_condemn_reasons->set_condition (gen_induced_noforce_p);
            evaluate_elevation = FALSE;
        }
    }

    if (!provisional_mode_triggered && evaluate_elevation && (low_ephemeral_space || high_memory_load || v_high_memory_load))
    {
        *elevation_requested_p = TRUE;
#ifdef BIT64
        // if we are in high memory load and have consumed 10% of the gen2 budget, do a gen2 now.
        if (high_memory_load || v_high_memory_load)
        {
            dynamic_data* dd_max = dynamic_data_of (max_generation);
            if (((float)dd_new_allocation (dd_max) / (float)dd_desired_allocation (dd_max)) < 0.9)
            {
                dprintf (GTC_LOG, ("%Id left in gen2 alloc (%Id)",
                    dd_new_allocation (dd_max), dd_desired_allocation (dd_max)));
                n = max_generation;
                local_condemn_reasons->set_condition (gen_almost_max_alloc);
            }
        }

        if (n <= max_generation)
        {
#endif // BIT64
            if (high_fragmentation)
            {
                //elevate to max_generation
                n = max_generation;
                dprintf (GTC_LOG, ("h%d: f full", heap_number));

#ifdef BACKGROUND_GC
                if (high_memory_load || v_high_memory_load)
                {
                    // For background GC we want to do blocking collections more eagerly because we don't
                    // want to get into the situation where the memory load becomes high while we are in
                    // a background GC and we'd have to wait for the background GC to finish to start
                    // a blocking collection (right now the implemenation doesn't handle converting
                    // a background GC to a blocking collection midway.
                    dprintf (GTC_LOG, ("h%d: bgc - BLOCK", heap_number));
                    *blocking_collection_p = TRUE;
                }
#else
                if (v_high_memory_load)
                {
                    dprintf (GTC_LOG, ("h%d: - BLOCK", heap_number));
                    *blocking_collection_p = TRUE;
                }
#endif //BACKGROUND_GC
            }
            else
            {
                n = max (n, max_generation - 1);
                dprintf (GTC_LOG, ("h%d: nf c %d", heap_number, n));
            }
#ifdef BIT64
        }
#endif // BIT64
    }

    if (!provisional_mode_triggered && (n == (max_generation - 1)) && (n_alloc < (max_generation -1)))
    {
#ifdef BGC_SERVO_TUNING
        if (!bgc_tuning::enable_fl_tuning)
#endif //BGC_SERVO_TUNING
        {
            dprintf (GTC_LOG, ("h%d: budget %d, check 2",
                        heap_number, n_alloc));
            if (get_new_allocation (max_generation) <= 0)
            {
                dprintf (GTC_LOG, ("h%d: budget alloc", heap_number));
                n = max_generation;
                local_condemn_reasons->set_condition (gen_max_gen1);
            }
        }
    }

    //figure out if max_generation is too fragmented -> blocking collection
    if (!provisional_mode_triggered
#ifdef BGC_SERVO_TUNING
        && !bgc_tuning::enable_fl_tuning
#endif //BGC_SERVO_TUNING
        && (n == max_generation))
    {
        if (dt_high_frag_p (tuning_deciding_condemned_gen, n))
        {
            dprintf (GTC_LOG, ("h%d: g%d too frag", heap_number, n));
            local_condemn_reasons->set_condition (gen_max_high_frag_p);
            if (local_settings->pause_mode != pause_sustained_low_latency)
            {
                *blocking_collection_p = TRUE;
            }
        }
    }

#ifdef BACKGROUND_GC
    if ((n == max_generation) && !(*blocking_collection_p))
    {
        if (heap_number == 0)
        {
            BOOL bgc_heap_too_small = TRUE;
            size_t gen2size = 0;
            size_t gen3size = 0;
#ifdef MULTIPLE_HEAPS
            for (int i = 0; i < n_heaps; i++)
            {
                if (((g_heaps[i]->current_generation_size (max_generation)) > bgc_min_per_heap) ||
                    ((g_heaps[i]->current_generation_size (max_generation + 1)) > bgc_min_per_heap))
                {
                    bgc_heap_too_small = FALSE;
                    break;
                }
            }
#else //MULTIPLE_HEAPS
            if ((current_generation_size (max_generation) > bgc_min_per_heap) ||
                (current_generation_size (max_generation + 1) > bgc_min_per_heap))
            {
                bgc_heap_too_small = FALSE;
            }
#endif //MULTIPLE_HEAPS

            if (bgc_heap_too_small)
            {
                dprintf (GTC_LOG, ("gen2 and gen3 too small"));

#ifdef STRESS_HEAP
                // do not turn stress-induced collections into blocking GCs
                if (!settings.stress_induced)
#endif //STRESS_HEAP
                {
                    *blocking_collection_p = TRUE;
                }

                local_condemn_reasons->set_condition (gen_gen2_too_small);
            }
        }
    }
#endif //BACKGROUND_GC

exit:
    if (!check_only_p)
    {
#ifdef STRESS_HEAP
#ifdef BACKGROUND_GC
        // We can only do Concurrent GC Stress if the caller did not explicitly ask for all
        // generations to be collected,

        if (orig_gen != max_generation &&
            g_pConfig->GetGCStressLevel() && gc_can_use_concurrent)
        {
            *elevation_requested_p = FALSE;
        }
#endif //BACKGROUND_GC
#endif //STRESS_HEAP

        if (check_memory)
        {
            fgm_result.available_pagefile_mb = (size_t)(available_page_file / (1024 * 1024));
        }

        local_condemn_reasons->set_gen (gen_final_per_heap, n);
        get_gc_data_per_heap()->gen_to_condemn_reasons.init (local_condemn_reasons);

#ifdef DT_LOG
        local_condemn_reasons->print (heap_number);
#endif //DT_LOG

        if ((local_settings->reason == reason_oos_soh) ||
            (local_settings->reason == reason_oos_loh))
        {
            assert (n >= 1);
        }
    }

    return n;
}

#ifdef _PREFAST_
#pragma warning(pop)
#endif //_PREFAST_

inline
size_t gc_heap::min_reclaim_fragmentation_threshold (uint32_t num_heaps)
{
    // if the memory load is higher, the threshold we'd want to collect gets lower.
    size_t min_mem_based_on_available =
        (500 - (settings.entry_memory_load - high_memory_load_th) * 40) * 1024 * 1024 / num_heaps;

    size_t ten_percent_size = (size_t)((float)generation_size (max_generation) * 0.10);
    uint64_t three_percent_mem = mem_one_percent * 3 / num_heaps;

#ifdef SIMPLE_DPRINTF
    dprintf (GTC_LOG, ("min av: %Id, 10%% gen2: %Id, 3%% mem: %I64d",
        min_mem_based_on_available, ten_percent_size, three_percent_mem));
#endif //SIMPLE_DPRINTF
    return (size_t)(min (min_mem_based_on_available, min (ten_percent_size, three_percent_mem)));
}

inline
uint64_t gc_heap::min_high_fragmentation_threshold(uint64_t available_mem, uint32_t num_heaps)
{
    return min (available_mem, (256*1024*1024)) / num_heaps;
}

enum {
CORINFO_EXCEPTION_GC = 0xE0004743 // 'GC'
};


#ifdef BACKGROUND_GC
void gc_heap::init_background_gc ()
{
    //reset the allocation so foreground gc can allocate into older (max_generation) generation
    generation* gen = generation_of (max_generation);
    generation_allocation_pointer (gen)= 0;
    generation_allocation_limit (gen) = 0;
    generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen));

    PREFIX_ASSUME(generation_allocation_segment(gen) != NULL);

    //reset the plan allocation for each segment
    for (heap_segment* seg = generation_allocation_segment (gen); seg != ephemeral_heap_segment;
        seg = heap_segment_next_rw (seg))
    {
        heap_segment_plan_allocated (seg) = heap_segment_allocated (seg);
    }

    if (heap_number == 0)
    {
        dprintf (2, ("heap%d: bgc lowest: %Ix, highest: %Ix",
            heap_number,
            background_saved_lowest_address,
            background_saved_highest_address));
    }

    gc_lh_block_event.Reset();
}

#endif //BACKGROUND_GC

inline
void fire_drain_mark_list_event (size_t mark_list_objects)
{
    FIRE_EVENT(BGCDrainMark, mark_list_objects);
}

inline
void fire_revisit_event (size_t dirtied_pages,
                         size_t marked_objects,
                         BOOL large_objects_p)
{
    FIRE_EVENT(BGCRevisit, dirtied_pages, marked_objects, large_objects_p);
}

inline
void fire_overflow_event (uint8_t* overflow_min,
                          uint8_t* overflow_max,
                          size_t marked_objects,
                          int large_objects_p)
{
    FIRE_EVENT(BGCOverflow, (uint64_t)overflow_min, (uint64_t)overflow_max, marked_objects, large_objects_p);
}

void gc_heap::concurrent_print_time_delta (const char* msg)
{
    UNREFERENCED_PARAMETER(msg);
#ifdef TRACE_GC
    size_t current_time = GetHighPrecisionTimeStamp();
    size_t elapsed_time = current_time - time_bgc_last;
    time_bgc_last = current_time;

    dprintf (2, ("h%d: %s T %Id ms", heap_number, msg, elapsed_time));
#endif //TRACE_GC
}

void gc_heap::free_list_info (int gen_num, const char* msg)
{
    UNREFERENCED_PARAMETER(gen_num);
    UNREFERENCED_PARAMETER(msg);
#if defined (BACKGROUND_GC) && defined (TRACE_GC)
    dprintf (3, ("h%d: %s", heap_number, msg));
    for (int i = 0; i <= (max_generation + 1); i++)
    {
        generation* gen = generation_of (i);
        if ((generation_allocation_size (gen) == 0) &&
            (generation_free_list_space (gen) == 0) &&
            (generation_free_obj_space (gen) == 0))
        {
            // don't print if everything is 0.
        }
        else
        {
            dprintf (3, ("h%d: g%d: a-%Id, fl-%Id, fo-%Id",
                heap_number, i,
                generation_allocation_size (gen),
                generation_free_list_space (gen),
                generation_free_obj_space (gen)));
        }
    }
#endif // BACKGROUND_GC && TRACE_GC
}

void gc_heap::update_collection_counts_for_no_gc()
{
    assert (settings.pause_mode == pause_no_gc);

    settings.condemned_generation = max_generation;
#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < n_heaps; i++)
        g_heaps[i]->update_collection_counts();
#else //MULTIPLE_HEAPS
    update_collection_counts();
#endif //MULTIPLE_HEAPS

    full_gc_counts[gc_type_blocking]++;
}

BOOL gc_heap::should_proceed_with_gc()
{
    if (gc_heap::settings.pause_mode == pause_no_gc)
    {
        if (current_no_gc_region_info.started)
        {
            // The no_gc mode was already in progress yet we triggered another GC,
            // this effectively exits the no_gc mode.
            restore_data_for_no_gc();
        }
        else
            return should_proceed_for_no_gc();
    }

    return TRUE;
}

//internal part of gc used by the serial and concurrent version
void gc_heap::gc1()
{
#ifdef BACKGROUND_GC
    assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
#endif //BACKGROUND_GC

#ifdef TIME_GC
    mark_time = plan_time = reloc_time = compact_time = sweep_time = 0;
#endif //TIME_GC

    verify_soh_segment_list();

    int n = settings.condemned_generation;

    if (settings.reason == reason_pm_full_gc)
    {
        assert (n == max_generation);
        init_records();

        gen_to_condemn_tuning* local_condemn_reasons = &(get_gc_data_per_heap()->gen_to_condemn_reasons);
        local_condemn_reasons->init();
        local_condemn_reasons->set_gen (gen_initial, n);
        local_condemn_reasons->set_gen (gen_final_per_heap, n);
    }

    update_collection_counts ();

#ifdef BACKGROUND_GC
    bgc_alloc_lock->check();
#endif //BACKGROUND_GC

    free_list_info (max_generation, "beginning");

    vm_heap->GcCondemnedGeneration = settings.condemned_generation;

    assert (g_gc_card_table == card_table);

#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
    assert (g_gc_card_bundle_table == card_bundle_table);
#endif

    {
        if (n == max_generation)
        {
            gc_low = lowest_address;
            gc_high = highest_address;
        }
        else
        {
            gc_low = generation_allocation_start (generation_of (n));
            gc_high = heap_segment_reserved (ephemeral_heap_segment);
        }
#ifdef BACKGROUND_GC
        if (settings.concurrent)
        {
#ifdef TRACE_GC
            time_bgc_last = GetHighPrecisionTimeStamp();
#endif //TRACE_GC

            FIRE_EVENT(BGCBegin);

            concurrent_print_time_delta ("BGC");

//#ifdef WRITE_WATCH
            //reset_write_watch (FALSE);
//#endif //WRITE_WATCH

            concurrent_print_time_delta ("RW");
            background_mark_phase();
            free_list_info (max_generation, "after mark phase");

            background_sweep();
            free_list_info (max_generation, "after sweep phase");
        }
        else
#endif //BACKGROUND_GC
        {
            mark_phase (n, FALSE);

            GCScan::GcRuntimeStructuresValid (FALSE);
            plan_phase (n);
            GCScan::GcRuntimeStructuresValid (TRUE);
        }
    }

    size_t end_gc_time = GetHighPrecisionTimeStamp();
//    printf ("generation: %d, elapsed time: %Id\n", n,  end_gc_time - dd_time_clock (dynamic_data_of (0)));

    //adjust the allocation size from the pinned quantities.
    for (int gen_number = 0; gen_number <= min (max_generation,n+1); gen_number++)
    {
        generation* gn = generation_of (gen_number);
        if (settings.compaction)
        {
            generation_pinned_allocated (gn) += generation_pinned_allocation_compact_size (gn);
            generation_allocation_size (generation_of (gen_number)) += generation_pinned_allocation_compact_size (gn);
        }
        else
        {
            generation_pinned_allocated (gn) += generation_pinned_allocation_sweep_size (gn);
            generation_allocation_size (generation_of (gen_number)) += generation_pinned_allocation_sweep_size (gn);
        }
        generation_pinned_allocation_sweep_size (gn) = 0;
        generation_pinned_allocation_compact_size (gn) = 0;
    }

#ifdef BACKGROUND_GC
    if (settings.concurrent)
    {
        dynamic_data* dd = dynamic_data_of (n);
        dd_gc_elapsed_time (dd) = end_gc_time - dd_time_clock (dd);

#ifdef HEAP_BALANCE_INSTRUMENTATION
        if (heap_number == 0)
        {
            last_gc_end_time_ms = end_gc_time;
            dprintf (HEAP_BALANCE_LOG, ("[GC#%Id-%Id-BGC]", settings.gc_index, dd_gc_elapsed_time (dd)));
        }
#endif //HEAP_BALANCE_INSTRUMENTATION

        free_list_info (max_generation, "after computing new dynamic data");

        gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();

        for (int gen_number = 0; gen_number < max_generation; gen_number++)
        {
            dprintf (2, ("end of BGC: gen%d new_alloc: %Id",
                         gen_number, dd_desired_allocation (dynamic_data_of (gen_number))));
            current_gc_data_per_heap->gen_data[gen_number].size_after = generation_size (gen_number);
            current_gc_data_per_heap->gen_data[gen_number].free_list_space_after = generation_free_list_space (generation_of (gen_number));
            current_gc_data_per_heap->gen_data[gen_number].free_obj_space_after = generation_free_obj_space (generation_of (gen_number));
        }
    }
    else
#endif //BACKGROUND_GC
    {
        free_list_info (max_generation, "end");
        for (int gen_number = 0; gen_number <= n; gen_number++)
        {
            dynamic_data* dd = dynamic_data_of (gen_number);
            dd_gc_elapsed_time (dd) = end_gc_time - dd_time_clock (dd);
            compute_new_dynamic_data (gen_number);
        }

        if (n != max_generation)
        {
            int gen_num_for_data = ((n < (max_generation - 1)) ? (n + 1) : (max_generation + 1));
            for (int gen_number = (n + 1); gen_number <= gen_num_for_data; gen_number++)
            {
                get_gc_data_per_heap()->gen_data[gen_number].size_after = generation_size (gen_number);
                get_gc_data_per_heap()->gen_data[gen_number].free_list_space_after = generation_free_list_space (generation_of (gen_number));
                get_gc_data_per_heap()->gen_data[gen_number].free_obj_space_after = generation_free_obj_space (generation_of (gen_number));
            }
        }

        get_gc_data_per_heap()->maxgen_size_info.running_free_list_efficiency = (uint32_t)(generation_allocator_efficiency (generation_of (max_generation)) * 100);

        free_list_info (max_generation, "after computing new dynamic data");

        if (heap_number == 0)
        {
            size_t gc_elapsed_time = dd_gc_elapsed_time (dynamic_data_of (0));
#ifdef HEAP_BALANCE_INSTRUMENTATION
            last_gc_end_time_ms = end_gc_time;
            dprintf (HEAP_BALANCE_LOG, ("[GC#%Id-%Id-%Id]", settings.gc_index, gc_elapsed_time, dd_time_clock (dynamic_data_of (0))));
#endif //HEAP_BALANCE_INSTRUMENTATION

            dprintf (GTC_LOG, ("GC#%d(gen%d) took %Idms",
                dd_collection_count (dynamic_data_of (0)),
                settings.condemned_generation,
                gc_elapsed_time));
        }

        for (int gen_number = 0; gen_number <= (max_generation + 1); gen_number++)
        {
            dprintf (2, ("end of FGC/NGC: gen%d new_alloc: %Id",
                         gen_number, dd_desired_allocation (dynamic_data_of (gen_number))));
        }
    }

    if (n < max_generation)
    {
        compute_promoted_allocation (1 + n);

        dynamic_data* dd = dynamic_data_of (1 + n);
        size_t new_fragmentation = generation_free_list_space (generation_of (1 + n)) +
                                   generation_free_obj_space (generation_of (1 + n));

#ifdef BACKGROUND_GC
        if (current_c_gc_state != c_gc_state_planning)
#endif //BACKGROUND_GC
        {
            if (settings.promotion)
            {
                dd_fragmentation (dd) = new_fragmentation;
            }
            else
            {
                //assert (dd_fragmentation (dd) == new_fragmentation);
            }
        }
    }

#ifdef BACKGROUND_GC
    if (!settings.concurrent)
#endif //BACKGROUND_GC
    {
#ifndef FEATURE_REDHAWK
        // GCToEEInterface::IsGCThread() always returns false on CoreRT, but this assert is useful in CoreCLR.
        assert(GCToEEInterface::IsGCThread());
#endif // FEATURE_REDHAWK
        adjust_ephemeral_limits();
    }

#ifdef BACKGROUND_GC
    assert (ephemeral_low == generation_allocation_start (generation_of ( max_generation -1)));
    assert (ephemeral_high == heap_segment_reserved (ephemeral_heap_segment));
#endif //BACKGROUND_GC

    if (fgn_maxgen_percent)
    {
        if (settings.condemned_generation == (max_generation - 1))
        {
            check_for_full_gc (max_generation - 1, 0);
        }
        else if (settings.condemned_generation == max_generation)
        {
            if (full_gc_approach_event_set
#ifdef MULTIPLE_HEAPS
                && (heap_number == 0)
#endif //MULTIPLE_HEAPS
                )
            {
                dprintf (2, ("FGN-GC: setting gen2 end event"));

                full_gc_approach_event.Reset();
#ifdef BACKGROUND_GC
                // By definition WaitForFullGCComplete only succeeds if it's full, *blocking* GC, otherwise need to return N/A
                fgn_last_gc_was_concurrent = settings.concurrent ? TRUE : FALSE;
#endif //BACKGROUND_GC
                full_gc_end_event.Set();
                full_gc_approach_event_set = false;
            }
        }
    }

#ifdef BACKGROUND_GC
    if (!settings.concurrent)
#endif //BACKGROUND_GC
    {
        //decide on the next allocation quantum
        if (alloc_contexts_used >= 1)
        {
            allocation_quantum = Align (min ((size_t)CLR_SIZE,
                                            (size_t)max (1024, get_new_allocation (0) / (2 * alloc_contexts_used))),
                                            get_alignment_constant(FALSE));
            dprintf (3, ("New allocation quantum: %d(0x%Ix)", allocation_quantum, allocation_quantum));
        }
    }

    descr_generations (FALSE);

    verify_soh_segment_list();

#ifdef BACKGROUND_GC
    add_to_history_per_heap();
    if (heap_number == 0)
    {
        add_to_history();
    }
#endif // BACKGROUND_GC

#ifdef GC_STATS
    if (GCStatistics::Enabled() && heap_number == 0)
        g_GCStatistics.AddGCStats(settings,
            dd_gc_elapsed_time(dynamic_data_of(settings.condemned_generation)));
#endif // GC_STATS

#ifdef TIME_GC
    fprintf (stdout, "%d,%d,%d,%d,%d,%d\n",
             n, mark_time, plan_time, reloc_time, compact_time, sweep_time);
#endif //TIME_GC

#ifdef BACKGROUND_GC
    assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
#endif //BACKGROUND_GC

#if defined(VERIFY_HEAP) || (defined (FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC))
    if (FALSE
#ifdef VERIFY_HEAP
        // Note that right now g_pConfig->GetHeapVerifyLevel always returns the same
        // value. If we ever allow randomly adjusting this as the process runs,
        // we cannot call it this way as joins need to match - we must have the same
        // value for all heaps like we do with bgc_heap_walk_for_etw_p.
        || (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
#endif
#if defined(FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC)
        || (bgc_heap_walk_for_etw_p && settings.concurrent)
#endif
        )
    {
#ifdef BACKGROUND_GC
        bool cooperative_mode = true;

        if (settings.concurrent)
        {
            cooperative_mode = enable_preemptive ();

#ifdef MULTIPLE_HEAPS
            bgc_t_join.join(this, gc_join_suspend_ee_verify);
            if (bgc_t_join.joined())
            {
                bgc_threads_sync_event.Reset();

                dprintf(2, ("Joining BGC threads to suspend EE for verify heap"));
                bgc_t_join.restart();
            }
            if (heap_number == 0)
            {
                suspend_EE();
                bgc_threads_sync_event.Set();
            }
            else
            {
                bgc_threads_sync_event.Wait(INFINITE, FALSE);
                dprintf (2, ("bgc_threads_sync_event is signalled"));
            }
#else
            suspend_EE();
#endif //MULTIPLE_HEAPS

            //fix the allocation area so verify_heap can proceed.
            fix_allocation_contexts (FALSE);
        }
#endif //BACKGROUND_GC

#ifdef BACKGROUND_GC
        assert (settings.concurrent == (uint32_t)(bgc_thread_id.IsCurrentThread()));
#ifdef FEATURE_EVENT_TRACE
        if (bgc_heap_walk_for_etw_p && settings.concurrent)
        {
            GCToEEInterface::DiagWalkBGCSurvivors(__this);

#ifdef MULTIPLE_HEAPS
            bgc_t_join.join(this, gc_join_after_profiler_heap_walk);
            if (bgc_t_join.joined())
            {
                bgc_t_join.restart();
            }
#endif // MULTIPLE_HEAPS
        }
#endif // FEATURE_EVENT_TRACE
#endif //BACKGROUND_GC

#ifdef VERIFY_HEAP
        if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
            verify_heap (FALSE);
#endif // VERIFY_HEAP

#ifdef BACKGROUND_GC
        if (settings.concurrent)
        {
            repair_allocation_contexts (TRUE);

#ifdef MULTIPLE_HEAPS
            bgc_t_join.join(this, gc_join_restart_ee_verify);
            if (bgc_t_join.joined())
            {
                bgc_threads_sync_event.Reset();

                dprintf(2, ("Joining BGC threads to restart EE after verify heap"));
                bgc_t_join.restart();
            }
            if (heap_number == 0)
            {
                restart_EE();
                bgc_threads_sync_event.Set();
            }
            else
            {
                bgc_threads_sync_event.Wait(INFINITE, FALSE);
                dprintf (2, ("bgc_threads_sync_event is signalled"));
            }
#else
            restart_EE();
#endif //MULTIPLE_HEAPS

            disable_preemptive (cooperative_mode);
        }
#endif //BACKGROUND_GC
    }
#endif // defined(VERIFY_HEAP) || (defined(FEATURE_EVENT_TRACE) && defined(BACKGROUND_GC))

#ifdef MULTIPLE_HEAPS
    if (!settings.concurrent)
    {
        gc_t_join.join(this, gc_join_done);
        if (gc_t_join.joined ())
        {
            gc_heap::internal_gc_done = false;

            //equalize the new desired size of the generations
            int limit = settings.condemned_generation;
            if (limit == max_generation)
            {
                limit = max_generation+1;
            }
            for (int gen = 0; gen <= limit; gen++)
            {
                size_t total_desired = 0;

                for (int i = 0; i < gc_heap::n_heaps; i++)
                {
                    gc_heap* hp = gc_heap::g_heaps[i];
                    dynamic_data* dd = hp->dynamic_data_of (gen);
                    size_t temp_total_desired = total_desired + dd_desired_allocation (dd);
                    if (temp_total_desired < total_desired)
                    {
                        // we overflowed.
                        total_desired = (size_t)MAX_PTR;
                        break;
                    }
                    total_desired = temp_total_desired;
                }

                size_t desired_per_heap = Align (total_desired/gc_heap::n_heaps,
                                                    get_alignment_constant ((gen != (max_generation+1))));

                if (gen == 0)
                {
#if 1 //subsumed by the linear allocation model
                    // to avoid spikes in mem usage due to short terms fluctuations in survivorship,
                    // apply some smoothing.
                    size_t smoothing = 3; // exponential smoothing factor
                    smoothed_desired_per_heap = desired_per_heap / smoothing + ((smoothed_desired_per_heap / smoothing) * (smoothing-1));
                    dprintf (HEAP_BALANCE_LOG, ("TEMPsn = %Id  n = %Id", smoothed_desired_per_heap, desired_per_heap));
                    desired_per_heap = Align(smoothed_desired_per_heap, get_alignment_constant (true));
#endif //0

                    if (!heap_hard_limit)
                    {
                        // if desired_per_heap is close to min_gc_size, trim it
                        // down to min_gc_size to stay in the cache
                        gc_heap* hp = gc_heap::g_heaps[0];
                        dynamic_data* dd = hp->dynamic_data_of (gen);
                        size_t min_gc_size = dd_min_size(dd);
                        // if min GC size larger than true on die cache, then don't bother
                        // limiting the desired size
                        if ((min_gc_size <= GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE)) &&
                            desired_per_heap <= 2*min_gc_size)
                        {
                            desired_per_heap = min_gc_size;
                        }
                    }
#ifdef BIT64
                    desired_per_heap = joined_youngest_desired (desired_per_heap);
                    dprintf (2, ("final gen0 new_alloc: %Id", desired_per_heap));
#endif // BIT64
                    gc_data_global.final_youngest_desired = desired_per_heap;
                }
#if 1 //subsumed by the linear allocation model
                if (gen == (max_generation + 1))
                {
                    // to avoid spikes in mem usage due to short terms fluctuations in survivorship,
                    // apply some smoothing.
                    static size_t smoothed_desired_per_heap_loh = 0;
                    size_t smoothing = 3; // exponential smoothing factor
                    size_t loh_count = dd_collection_count (dynamic_data_of (max_generation));
                    if (smoothing  > loh_count)
                        smoothing  = loh_count;
                    smoothed_desired_per_heap_loh = desired_per_heap / smoothing + ((smoothed_desired_per_heap_loh / smoothing) * (smoothing-1));
                    dprintf (2, ("smoothed_desired_per_heap_loh  = %Id  desired_per_heap = %Id", smoothed_desired_per_heap_loh, desired_per_heap));
                    desired_per_heap = Align(smoothed_desired_per_heap_loh, get_alignment_constant (false));
                }
#endif //0
                for (int i = 0; i < gc_heap::n_heaps; i++)
                {
                    gc_heap* hp = gc_heap::g_heaps[i];
                    dynamic_data* dd = hp->dynamic_data_of (gen);
                    dd_desired_allocation (dd) = desired_per_heap;
                    dd_gc_new_allocation (dd) = desired_per_heap;
                    dd_new_allocation (dd) = desired_per_heap;

                    if (gen == 0)
                    {
                        hp->fgn_last_alloc = desired_per_heap;
                    }
                }
            }

#ifdef FEATURE_LOH_COMPACTION
            BOOL all_heaps_compacted_p = TRUE;
#endif //FEATURE_LOH_COMPACTION
            int max_gen0_must_clear_bricks = 0;
            for (int i = 0; i < gc_heap::n_heaps; i++)
            {
                gc_heap* hp = gc_heap::g_heaps[i];
                hp->decommit_ephemeral_segment_pages();
                hp->rearrange_large_heap_segments();
#ifdef FEATURE_LOH_COMPACTION
                all_heaps_compacted_p &= hp->loh_compacted_p;
#endif //FEATURE_LOH_COMPACTION
                // compute max of gen0_must_clear_bricks over all heaps
                max_gen0_must_clear_bricks = max(max_gen0_must_clear_bricks, hp->gen0_must_clear_bricks);
            }

#ifdef FEATURE_LOH_COMPACTION
            check_loh_compact_mode (all_heaps_compacted_p);
#endif //FEATURE_LOH_COMPACTION

            // if max_gen0_must_clear_bricks > 0, distribute to all heaps -
            // if one heap encountered an interior pointer during this GC,
            // the next GC might see one on another heap
            if (max_gen0_must_clear_bricks > 0)
            {
                for (int i = 0; i < gc_heap::n_heaps; i++)
                {
                    gc_heap* hp = gc_heap::g_heaps[i];
                    hp->gen0_must_clear_bricks = max_gen0_must_clear_bricks;
                }
            }

            fire_pevents();
            pm_full_gc_init_or_clear();

            gc_t_join.restart();
        }
        alloc_context_count = 0;
        heap_select::mark_heap (heap_number);
    }

#else
    gc_data_global.final_youngest_desired =
        dd_desired_allocation (dynamic_data_of (0));

    check_loh_compact_mode (loh_compacted_p);

    decommit_ephemeral_segment_pages();
    fire_pevents();

    if (!(settings.concurrent))
    {
        rearrange_large_heap_segments();
        do_post_gc();
    }

    pm_full_gc_init_or_clear();

#ifdef BACKGROUND_GC
    recover_bgc_settings();
#endif //BACKGROUND_GC
#endif //MULTIPLE_HEAPS
}

void gc_heap::save_data_for_no_gc()
{
    current_no_gc_region_info.saved_pause_mode = settings.pause_mode;
#ifdef MULTIPLE_HEAPS
    // This is to affect heap balancing.
    for (int i = 0; i < n_heaps; i++)
    {
        current_no_gc_region_info.saved_gen0_min_size = dd_min_size (g_heaps[i]->dynamic_data_of (0));
        dd_min_size (g_heaps[i]->dynamic_data_of (0)) = min_balance_threshold;
        current_no_gc_region_info.saved_gen3_min_size = dd_min_size (g_heaps[i]->dynamic_data_of (max_generation + 1));
        dd_min_size (g_heaps[i]->dynamic_data_of (max_generation + 1)) = 0;
    }
#endif //MULTIPLE_HEAPS
}

void gc_heap::restore_data_for_no_gc()
{
    gc_heap::settings.pause_mode = current_no_gc_region_info.saved_pause_mode;
#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < n_heaps; i++)
    {
        dd_min_size (g_heaps[i]->dynamic_data_of (0)) = current_no_gc_region_info.saved_gen0_min_size;
        dd_min_size (g_heaps[i]->dynamic_data_of (max_generation + 1)) = current_no_gc_region_info.saved_gen3_min_size;
    }
#endif //MULTIPLE_HEAPS
}

start_no_gc_region_status gc_heap::prepare_for_no_gc_region (uint64_t total_size,
                                                             BOOL loh_size_known,
                                                             uint64_t loh_size,
                                                             BOOL disallow_full_blocking)
{
    if (current_no_gc_region_info.started)
    {
        return start_no_gc_in_progress;
    }

    start_no_gc_region_status status = start_no_gc_success;

    save_data_for_no_gc();
    settings.pause_mode = pause_no_gc;
    current_no_gc_region_info.start_status = start_no_gc_success;

    uint64_t allocation_no_gc_loh = 0;
    uint64_t allocation_no_gc_soh = 0;
    assert(total_size != 0);
    if (loh_size_known)
    {
        assert(loh_size != 0);
        assert(loh_size <= total_size);
        allocation_no_gc_loh = loh_size;
        allocation_no_gc_soh = total_size - loh_size;
    }
    else
    {
        allocation_no_gc_soh = total_size;
        allocation_no_gc_loh = total_size;
    }

    int soh_align_const = get_alignment_constant (TRUE);
    size_t max_soh_allocated = soh_segment_size - segment_info_size - eph_gen_starts_size;
    size_t size_per_heap = 0;
    const double scale_factor = 1.05;

    int num_heaps = 1;
#ifdef MULTIPLE_HEAPS
    num_heaps = n_heaps;
#endif // MULTIPLE_HEAPS

    uint64_t total_allowed_soh_allocation = (uint64_t)max_soh_allocated * num_heaps;
    // [LOCALGC TODO]
    // In theory, the upper limit here is the physical memory of the machine, not
    // SIZE_T_MAX. This is not true today because total_physical_mem can be
    // larger than SIZE_T_MAX if running in wow64 on a machine with more than
    // 4GB of RAM. Once Local GC code divergence is resolved and code is flowing
    // more freely between branches, it would be good to clean this up to use
    // total_physical_mem instead of SIZE_T_MAX.
    assert(total_allowed_soh_allocation <= SIZE_T_MAX);
    uint64_t total_allowed_loh_allocation = SIZE_T_MAX;
    uint64_t total_allowed_soh_alloc_scaled = allocation_no_gc_soh > 0 ? static_cast<uint64_t>(total_allowed_soh_allocation / scale_factor) : 0;
    uint64_t total_allowed_loh_alloc_scaled = allocation_no_gc_loh > 0 ? static_cast<uint64_t>(total_allowed_loh_allocation / scale_factor) : 0;

    if (allocation_no_gc_soh > total_allowed_soh_alloc_scaled ||
        allocation_no_gc_loh > total_allowed_loh_alloc_scaled)
    {
        status = start_no_gc_too_large;
        goto done;
    }

    if (allocation_no_gc_soh > 0)
    {
        allocation_no_gc_soh = static_cast<uint64_t>(allocation_no_gc_soh * scale_factor);
        allocation_no_gc_soh = min (allocation_no_gc_soh, total_allowed_soh_alloc_scaled);
    }

    if (allocation_no_gc_loh > 0)
    {
        allocation_no_gc_loh = static_cast<uint64_t>(allocation_no_gc_loh * scale_factor);
        allocation_no_gc_loh = min (allocation_no_gc_loh, total_allowed_loh_alloc_scaled);
    }

    if (disallow_full_blocking)
        current_no_gc_region_info.minimal_gc_p = TRUE;

    if (allocation_no_gc_soh != 0)
    {
        current_no_gc_region_info.soh_allocation_size = (size_t)allocation_no_gc_soh;
        size_per_heap = current_no_gc_region_info.soh_allocation_size;
#ifdef MULTIPLE_HEAPS
        size_per_heap /= n_heaps;
        for (int i = 0; i < n_heaps; i++)
        {
            // due to heap balancing we need to allow some room before we even look to balance to another heap.
            g_heaps[i]->soh_allocation_no_gc = min (Align ((size_per_heap + min_balance_threshold), soh_align_const), max_soh_allocated);
        }
#else //MULTIPLE_HEAPS
        soh_allocation_no_gc = min (Align (size_per_heap, soh_align_const), max_soh_allocated);
#endif //MULTIPLE_HEAPS
    }

    if (allocation_no_gc_loh != 0)
    {
        current_no_gc_region_info.loh_allocation_size = (size_t)allocation_no_gc_loh;
        size_per_heap = current_no_gc_region_info.loh_allocation_size;
#ifdef MULTIPLE_HEAPS
        size_per_heap /= n_heaps;
        for (int i = 0; i < n_heaps; i++)
            g_heaps[i]->loh_allocation_no_gc = Align (size_per_heap, get_alignment_constant (FALSE));
#else //MULTIPLE_HEAPS
        loh_allocation_no_gc = Align (size_per_heap, get_alignment_constant (FALSE));
#endif //MULTIPLE_HEAPS
    }

done:
    if (status != start_no_gc_success)
        restore_data_for_no_gc();
    return status;
}

void gc_heap::handle_failure_for_no_gc()
{
    gc_heap::restore_data_for_no_gc();
    // sets current_no_gc_region_info.started to FALSE here.
    memset (&current_no_gc_region_info, 0, sizeof (current_no_gc_region_info));
}

start_no_gc_region_status gc_heap::get_start_no_gc_region_status()
{
    return current_no_gc_region_info.start_status;
}

void gc_heap::record_gcs_during_no_gc()
{
    if (current_no_gc_region_info.started)
    {
        current_no_gc_region_info.num_gcs++;
        if (is_induced (settings.reason))
            current_no_gc_region_info.num_gcs_induced++;
    }
}

BOOL gc_heap::find_loh_free_for_no_gc()
{
    allocator* loh_allocator = generation_allocator (generation_of (max_generation + 1));
    size_t sz_list = loh_allocator->first_bucket_size();
    size_t size = loh_allocation_no_gc;
    for (unsigned int a_l_idx = 0; a_l_idx < loh_allocator->number_of_buckets(); a_l_idx++)
    {
        if ((size < sz_list) || (a_l_idx == (loh_allocator->number_of_buckets()-1)))
        {
            uint8_t* free_list = loh_allocator->alloc_list_head_of (a_l_idx);
            while (free_list)
            {
                size_t free_list_size = unused_array_size(free_list);

                if (free_list_size > loh_allocation_no_gc)
                {
                    dprintf (3, ("free item %Ix(%Id) for no gc", (size_t)free_list, free_list_size));
                    return TRUE;
                }

                free_list = free_list_slot (free_list);
            }
        }
        sz_list = sz_list * 2;
    }

    return FALSE;
}

BOOL gc_heap::find_loh_space_for_no_gc()
{
    saved_loh_segment_no_gc = 0;

    if (find_loh_free_for_no_gc())
        return TRUE;

    heap_segment* seg = generation_allocation_segment (generation_of (max_generation + 1));

    while (seg)
    {
        size_t remaining = heap_segment_reserved (seg) - heap_segment_allocated (seg);
        if (remaining >= loh_allocation_no_gc)
        {
            saved_loh_segment_no_gc = seg;
            break;
        }
        seg = heap_segment_next (seg);
    }

    if (!saved_loh_segment_no_gc && current_no_gc_region_info.minimal_gc_p)
    {
        // If no full GC is allowed, we try to get a new seg right away.
        saved_loh_segment_no_gc = get_segment_for_loh (get_large_seg_size (loh_allocation_no_gc)
#ifdef MULTIPLE_HEAPS
                                                      , this
#endif //MULTIPLE_HEAPS
                                                      );
    }

    return (saved_loh_segment_no_gc != 0);
}

BOOL gc_heap::loh_allocated_for_no_gc()
{
    if (!saved_loh_segment_no_gc)
        return FALSE;

    heap_segment* seg = generation_allocation_segment (generation_of (max_generation + 1));
    do
    {
        if (seg == saved_loh_segment_no_gc)
        {
            return FALSE;
        }
        seg = heap_segment_next (seg);
    } while (seg);

    return TRUE;
}

BOOL gc_heap::commit_loh_for_no_gc (heap_segment* seg)
{
    uint8_t* end_committed = heap_segment_allocated (seg) + loh_allocation_no_gc;
    assert (end_committed <= heap_segment_reserved (seg));
    return (grow_heap_segment (seg, end_committed));
}

void gc_heap::thread_no_gc_loh_segments()
{
#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < n_heaps; i++)
    {
        gc_heap* hp = g_heaps[i];
        if (hp->loh_allocated_for_no_gc())
        {
            hp->thread_loh_segment (hp->saved_loh_segment_no_gc);
            hp->saved_loh_segment_no_gc = 0;
        }
    }
#else //MULTIPLE_HEAPS
    if (loh_allocated_for_no_gc())
    {
        thread_loh_segment (saved_loh_segment_no_gc);
        saved_loh_segment_no_gc = 0;
    }
#endif //MULTIPLE_HEAPS
}

void gc_heap::set_loh_allocations_for_no_gc()
{
    if (current_no_gc_region_info.loh_allocation_size != 0)
    {
        dynamic_data* dd = dynamic_data_of (max_generation + 1);
        dd_new_allocation (dd) = loh_allocation_no_gc;
        dd_gc_new_allocation (dd) = dd_new_allocation (dd);
    }
}

void gc_heap::set_soh_allocations_for_no_gc()
{
    if (current_no_gc_region_info.soh_allocation_size != 0)
    {
        dynamic_data* dd = dynamic_data_of (0);
        dd_new_allocation (dd) = soh_allocation_no_gc;
        dd_gc_new_allocation (dd) = dd_new_allocation (dd);
#ifdef MULTIPLE_HEAPS
        alloc_context_count = 0;
#endif //MULTIPLE_HEAPS
    }
}

void gc_heap::set_allocations_for_no_gc()
{
#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < n_heaps; i++)
    {
        gc_heap* hp = g_heaps[i];
        hp->set_loh_allocations_for_no_gc();
        hp->set_soh_allocations_for_no_gc();
    }
#else //MULTIPLE_HEAPS
    set_loh_allocations_for_no_gc();
    set_soh_allocations_for_no_gc();
#endif //MULTIPLE_HEAPS
}

BOOL gc_heap::should_proceed_for_no_gc()
{
    BOOL gc_requested = FALSE;
    BOOL loh_full_gc_requested = FALSE;
    BOOL soh_full_gc_requested = FALSE;
    BOOL no_gc_requested = FALSE;
    BOOL get_new_loh_segments = FALSE;

    if (current_no_gc_region_info.soh_allocation_size)
    {
#ifdef MULTIPLE_HEAPS
        for (int i = 0; i < n_heaps; i++)
        {
            gc_heap* hp = g_heaps[i];
            if ((size_t)(heap_segment_reserved (hp->ephemeral_heap_segment) - hp->alloc_allocated) < hp->soh_allocation_no_gc)
            {
                gc_requested = TRUE;
                break;
            }
        }
#else //MULTIPLE_HEAPS
        if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - alloc_allocated) < soh_allocation_no_gc)
            gc_requested = TRUE;
#endif //MULTIPLE_HEAPS

        if (!gc_requested)
        {
#ifdef MULTIPLE_HEAPS
            for (int i = 0; i < n_heaps; i++)
            {
                gc_heap* hp = g_heaps[i];
                if (!(hp->grow_heap_segment (hp->ephemeral_heap_segment, (hp->alloc_allocated + hp->soh_allocation_no_gc))))
                {
                    soh_full_gc_requested = TRUE;
                    break;
                }
            }
#else //MULTIPLE_HEAPS
            if (!grow_heap_segment (ephemeral_heap_segment, (alloc_allocated + soh_allocation_no_gc)))
                soh_full_gc_requested = TRUE;
#endif //MULTIPLE_HEAPS
        }
    }

    if (!current_no_gc_region_info.minimal_gc_p && gc_requested)
    {
        soh_full_gc_requested = TRUE;
    }

    no_gc_requested = !(soh_full_gc_requested || gc_requested);

    if (soh_full_gc_requested && current_no_gc_region_info.minimal_gc_p)
    {
        current_no_gc_region_info.start_status = start_no_gc_no_memory;
        goto done;
    }

    if (!soh_full_gc_requested && current_no_gc_region_info.loh_allocation_size)
    {
        // Check to see if we have enough reserved space.
#ifdef MULTIPLE_HEAPS
        for (int i = 0; i < n_heaps; i++)
        {
            gc_heap* hp = g_heaps[i];
            if (!hp->find_loh_space_for_no_gc())
            {
                loh_full_gc_requested = TRUE;
                break;
            }
        }
#else //MULTIPLE_HEAPS
        if (!find_loh_space_for_no_gc())
            loh_full_gc_requested = TRUE;
#endif //MULTIPLE_HEAPS

        // Check to see if we have committed space.
        if (!loh_full_gc_requested)
        {
#ifdef MULTIPLE_HEAPS
            for (int i = 0; i < n_heaps; i++)
            {
                gc_heap* hp = g_heaps[i];
                if (hp->saved_loh_segment_no_gc &&!hp->commit_loh_for_no_gc (hp->saved_loh_segment_no_gc))
                {
                    loh_full_gc_requested = TRUE;
                    break;
                }
            }
#else //MULTIPLE_HEAPS
            if (saved_loh_segment_no_gc && !commit_loh_for_no_gc (saved_loh_segment_no_gc))
                loh_full_gc_requested = TRUE;
#endif //MULTIPLE_HEAPS
        }
    }

    if (loh_full_gc_requested || soh_full_gc_requested)
    {
        if (current_no_gc_region_info.minimal_gc_p)
            current_no_gc_region_info.start_status = start_no_gc_no_memory;
    }

    no_gc_requested = !(loh_full_gc_requested || soh_full_gc_requested || gc_requested);

    if (current_no_gc_region_info.start_status == start_no_gc_success)
    {
        if (no_gc_requested)
            set_allocations_for_no_gc();
    }

done:

    if ((current_no_gc_region_info.start_status == start_no_gc_success) && !no_gc_requested)
        return TRUE;
    else
    {
        // We are done with starting the no_gc_region.
        current_no_gc_region_info.started = TRUE;
        return FALSE;
    }
}

end_no_gc_region_status gc_heap::end_no_gc_region()
{
    dprintf (1, ("end no gc called"));

    end_no_gc_region_status status = end_no_gc_success;

    if (!(current_no_gc_region_info.started))
        status = end_no_gc_not_in_progress;
    if (current_no_gc_region_info.num_gcs_induced)
        status = end_no_gc_induced;
    else if (current_no_gc_region_info.num_gcs)
        status = end_no_gc_alloc_exceeded;

    if (settings.pause_mode == pause_no_gc)
        restore_data_for_no_gc();

    // sets current_no_gc_region_info.started to FALSE here.
    memset (&current_no_gc_region_info, 0, sizeof (current_no_gc_region_info));

    return status;
}

//update counters
void gc_heap::update_collection_counts ()
{
    dynamic_data* dd0 = dynamic_data_of (0);
    dd_gc_clock (dd0) += 1;

    size_t now = GetHighPrecisionTimeStamp();

    for (int i = 0; i <= settings.condemned_generation;i++)
    {
        dynamic_data* dd = dynamic_data_of (i);
        dd_collection_count (dd)++;
        //this is needed by the linear allocation model
        if (i == max_generation)
            dd_collection_count (dynamic_data_of (max_generation+1))++;
        dd_gc_clock (dd) = dd_gc_clock (dd0);
        dd_time_clock (dd) = now;
    }
}

BOOL gc_heap::expand_soh_with_minimal_gc()
{
    if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment)) >= soh_allocation_no_gc)
        return TRUE;

    heap_segment* new_seg = soh_get_segment_to_expand();
    if (new_seg)
    {
        if (g_gc_card_table != card_table)
            copy_brick_card_table();

        settings.promotion = TRUE;
        settings.demotion = FALSE;
        ephemeral_promotion = TRUE;
        int condemned_gen_number = max_generation - 1;

        generation* gen = 0;
        int align_const = get_alignment_constant (TRUE);

        for (int i = 0; i <= condemned_gen_number; i++)
        {
            gen = generation_of (i);
            saved_ephemeral_plan_start[i] = generation_allocation_start (gen);
            saved_ephemeral_plan_start_size[i] = Align (size (generation_allocation_start (gen)), align_const);
        }

        // We do need to clear the bricks here as we are converting a bunch of ephemeral objects to gen2
        // and need to make sure that there are no left over bricks from the previous GCs for the space
        // we just used for gen0 allocation. We will need to go through the bricks for these objects for
        // ephemeral GCs later.
        for (size_t b = brick_of (generation_allocation_start (generation_of (0)));
             b < brick_of (align_on_brick (heap_segment_allocated (ephemeral_heap_segment)));
             b++)
        {
            set_brick (b, -1);
        }

        size_t ephemeral_size = (heap_segment_allocated (ephemeral_heap_segment) -
                                generation_allocation_start (generation_of (max_generation - 1)));
        heap_segment_next (ephemeral_heap_segment) = new_seg;
        ephemeral_heap_segment = new_seg;
        uint8_t*  start = heap_segment_mem (ephemeral_heap_segment);

        for (int i = condemned_gen_number; i >= 0; i--)
        {
            gen = generation_of (i);
            size_t gen_start_size = Align (min_obj_size);
            make_generation (generation_table[i], ephemeral_heap_segment, start, 0);
            generation_plan_allocation_start (gen) = start;
            generation_plan_allocation_start_size (gen) = gen_start_size;
            start += gen_start_size;
        }
        heap_segment_used (ephemeral_heap_segment) = start - plug_skew;
        heap_segment_plan_allocated (ephemeral_heap_segment) = start;

        fix_generation_bounds (condemned_gen_number, generation_of (0));

        dd_gc_new_allocation (dynamic_data_of (max_generation)) -= ephemeral_size;
        dd_new_allocation (dynamic_data_of (max_generation)) = dd_gc_new_allocation (dynamic_data_of (max_generation));

        adjust_ephemeral_limits();
        return TRUE;
    }
    else
        return FALSE;
}

// Only to be done on the thread that calls restart in a join for server GC
// and reset the oom status per heap.
void gc_heap::check_and_set_no_gc_oom()
{
#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < n_heaps; i++)
    {
        gc_heap* hp = g_heaps[i];
        if (hp->no_gc_oom_p)
        {
            current_no_gc_region_info.start_status = start_no_gc_no_memory;
            hp->no_gc_oom_p = false;
        }
    }
#else
    if (no_gc_oom_p)
    {
        current_no_gc_region_info.start_status = start_no_gc_no_memory;
        no_gc_oom_p = false;
    }
#endif //MULTIPLE_HEAPS
}

void gc_heap::allocate_for_no_gc_after_gc()
{
    if (current_no_gc_region_info.minimal_gc_p)
        repair_allocation_contexts (TRUE);

    no_gc_oom_p = false;

    if (current_no_gc_region_info.start_status != start_no_gc_no_memory)
    {
        if (current_no_gc_region_info.soh_allocation_size != 0)
        {
            if (((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment)) < soh_allocation_no_gc) ||
                (!grow_heap_segment (ephemeral_heap_segment, (heap_segment_allocated (ephemeral_heap_segment) + soh_allocation_no_gc))))
            {
                no_gc_oom_p = true;
            }

#ifdef MULTIPLE_HEAPS
            gc_t_join.join(this, gc_join_after_commit_soh_no_gc);
            if (gc_t_join.joined())
            {
#endif //MULTIPLE_HEAPS

                check_and_set_no_gc_oom();

#ifdef MULTIPLE_HEAPS
                gc_t_join.restart();
            }
#endif //MULTIPLE_HEAPS
        }

        if ((current_no_gc_region_info.start_status == start_no_gc_success) &&
            !(current_no_gc_region_info.minimal_gc_p) &&
            (current_no_gc_region_info.loh_allocation_size != 0))
        {
            gc_policy = policy_compact;
            saved_loh_segment_no_gc = 0;

            if (!find_loh_free_for_no_gc())
            {
                heap_segment* seg = generation_allocation_segment (generation_of (max_generation + 1));
                BOOL found_seg_p = FALSE;
                while (seg)
                {
                    if ((size_t)(heap_segment_reserved (seg) - heap_segment_allocated (seg)) >= loh_allocation_no_gc)
                    {
                        found_seg_p = TRUE;
                        if (!commit_loh_for_no_gc (seg))
                        {
                            no_gc_oom_p = true;
                            break;
                        }
                    }
                    seg = heap_segment_next (seg);
                }

                if (!found_seg_p)
                    gc_policy = policy_expand;
            }

#ifdef MULTIPLE_HEAPS
            gc_t_join.join(this, gc_join_expand_loh_no_gc);
            if (gc_t_join.joined())
            {
                check_and_set_no_gc_oom();

                if (current_no_gc_region_info.start_status == start_no_gc_success)
                {
                    for (int i = 0; i < n_heaps; i++)
                    {
                        gc_heap* hp = g_heaps[i];
                        if (hp->gc_policy == policy_expand)
                        {
                            hp->saved_loh_segment_no_gc = get_segment_for_loh (get_large_seg_size (loh_allocation_no_gc), hp);
                            if (!(hp->saved_loh_segment_no_gc))
                            {
                                current_no_gc_region_info.start_status = start_no_gc_no_memory;
                                break;
                            }
                        }
                    }
                }

                gc_t_join.restart();
            }
#else //MULTIPLE_HEAPS
            check_and_set_no_gc_oom();

            if ((current_no_gc_region_info.start_status == start_no_gc_success) && (gc_policy == policy_expand))
            {
                saved_loh_segment_no_gc = get_segment_for_loh (get_large_seg_size (loh_allocation_no_gc));
                if (!saved_loh_segment_no_gc)
                    current_no_gc_region_info.start_status = start_no_gc_no_memory;
            }
#endif //MULTIPLE_HEAPS

            if ((current_no_gc_region_info.start_status == start_no_gc_success) && saved_loh_segment_no_gc)
            {
                if (!commit_loh_for_no_gc (saved_loh_segment_no_gc))
                {
                    no_gc_oom_p = true;
                }
            }
        }
    }

#ifdef MULTIPLE_HEAPS
    gc_t_join.join(this, gc_join_final_no_gc);
    if (gc_t_join.joined())
    {
#endif //MULTIPLE_HEAPS

        check_and_set_no_gc_oom();

        if (current_no_gc_region_info.start_status == start_no_gc_success)
        {
            set_allocations_for_no_gc();
            current_no_gc_region_info.started = TRUE;
        }

#ifdef MULTIPLE_HEAPS
        gc_t_join.restart();
    }
#endif //MULTIPLE_HEAPS
}

void gc_heap::init_records()
{
    // An option is to move this to be after we figure out which gen to condemn so we don't
    // need to clear some generations' data 'cause we know they don't change, but that also means
    // we can't simply call memset here.
    memset (&gc_data_per_heap, 0, sizeof (gc_data_per_heap));
    gc_data_per_heap.heap_index = heap_number;
    if (heap_number == 0)
        memset (&gc_data_global, 0, sizeof (gc_data_global));

#ifdef GC_CONFIG_DRIVEN
    memset (interesting_data_per_gc, 0, sizeof (interesting_data_per_gc));
#endif //GC_CONFIG_DRIVEN
    memset (&fgm_result, 0, sizeof (fgm_result));

    for (int i = 0; i <= (max_generation + 1); i++)
    {
        gc_data_per_heap.gen_data[i].size_before = generation_size (i);
        generation* gen = generation_of (i);
        gc_data_per_heap.gen_data[i].free_list_space_before = generation_free_list_space (gen);
        gc_data_per_heap.gen_data[i].free_obj_space_before = generation_free_obj_space (gen);
    }

    sufficient_gen0_space_p = FALSE;

#ifdef MULTIPLE_HEAPS
    gen0_allocated_after_gc_p = false;
#endif //MULTIPLE_HEAPS

#if defined (_DEBUG) && defined (VERIFY_HEAP)
    verify_pinned_queue_p = FALSE;
#endif // _DEBUG && VERIFY_HEAP
}

void gc_heap::pm_full_gc_init_or_clear()
{
    // This means the next GC will be a full blocking GC and we need to init.
    if (settings.condemned_generation == (max_generation - 1))
    {
        if (pm_trigger_full_gc)
        {
#ifdef MULTIPLE_HEAPS
            do_post_gc();
#endif //MULTIPLE_HEAPS
            dprintf (GTC_LOG, ("init for PM triggered full GC"));
            uint32_t saved_entry_memory_load = settings.entry_memory_load;
            settings.init_mechanisms();
            settings.reason = reason_pm_full_gc;
            settings.condemned_generation = max_generation;
            settings.entry_memory_load = saved_entry_memory_load;
            // Can't assert this since we only check at the end of gen2 GCs,
            // during gen1 the memory load could have already dropped.
            // Although arguably we should just turn off PM then...
            //assert (settings.entry_memory_load >= high_memory_load_th);
            assert (settings.entry_memory_load > 0);
            settings.gc_index += 1;
            do_pre_gc();
        }
    }
    // This means we are in the progress of a full blocking GC triggered by
    // this PM mode.
    else if (settings.reason == reason_pm_full_gc)
    {
        assert (settings.condemned_generation == max_generation);
        assert (pm_trigger_full_gc);
        pm_trigger_full_gc = false;

        dprintf (GTC_LOG, ("PM triggered full GC done"));
    }
}

void gc_heap::garbage_collect_pm_full_gc()
{
    assert (settings.condemned_generation == max_generation);
    assert (settings.reason == reason_pm_full_gc);
    assert (!settings.concurrent);
    gc1();
}

void gc_heap::garbage_collect (int n)
{
    //reset the number of alloc contexts
    alloc_contexts_used = 0;

    fix_allocation_contexts (TRUE);
#ifdef MULTIPLE_HEAPS
#ifdef JOIN_STATS
    gc_t_join.start_ts(this);
#endif //JOIN_STATS
    clear_gen0_bricks();
#endif //MULTIPLE_HEAPS

    if ((settings.pause_mode == pause_no_gc) && current_no_gc_region_info.minimal_gc_p)
    {
#ifdef MULTIPLE_HEAPS
        gc_t_join.join(this, gc_join_minimal_gc);
        if (gc_t_join.joined())
        {
#endif //MULTIPLE_HEAPS

#ifdef MULTIPLE_HEAPS
            // this is serialized because we need to get a segment
            for (int i = 0; i < n_heaps; i++)
            {
                if (!(g_heaps[i]->expand_soh_with_minimal_gc()))
                    current_no_gc_region_info.start_status = start_no_gc_no_memory;
            }
#else
            if (!expand_soh_with_minimal_gc())
                current_no_gc_region_info.start_status = start_no_gc_no_memory;
#endif //MULTIPLE_HEAPS

            update_collection_counts_for_no_gc();

#ifdef MULTIPLE_HEAPS
            gc_t_join.restart();
        }
#endif //MULTIPLE_HEAPS

        goto done;
    }

    init_records();

    settings.reason = gc_trigger_reason;
#if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
    num_pinned_objects = 0;
#endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE

#ifdef STRESS_HEAP
    if (settings.reason == reason_gcstress)
    {
        settings.reason = reason_induced;
        settings.stress_induced = TRUE;
    }
#endif // STRESS_HEAP

#ifdef MULTIPLE_HEAPS
    //align all heaps on the max generation to condemn
    dprintf (3, ("Joining for max generation to condemn"));
    condemned_generation_num = generation_to_condemn (n,
                                                    &blocking_collection,
                                                    &elevation_requested,
                                                    FALSE);
    gc_t_join.join(this, gc_join_generation_determined);
    if (gc_t_join.joined())
#endif //MULTIPLE_HEAPS
    {
#if !defined(SEG_MAPPING_TABLE) && !defined(FEATURE_BASICFREEZE)
        //delete old slots from the segment table
        seg_table->delete_old_slots();
#endif //!SEG_MAPPING_TABLE && !FEATURE_BASICFREEZE

#ifdef MULTIPLE_HEAPS
        for (int i = 0; i < n_heaps; i++)
        {
            gc_heap* hp = g_heaps[i];
            // check for card table growth
            if (g_gc_card_table != hp->card_table)
                hp->copy_brick_card_table();

            hp->rearrange_large_heap_segments();
#ifdef BACKGROUND_GC
            hp->background_delay_delete_loh_segments();
            if (!recursive_gc_sync::background_running_p())
                hp->rearrange_small_heap_segments();
#endif //BACKGROUND_GC
        }
#else //MULTIPLE_HEAPS
        if (g_gc_card_table != card_table)
            copy_brick_card_table();

        rearrange_large_heap_segments();
#ifdef BACKGROUND_GC
        background_delay_delete_loh_segments();
        if (!recursive_gc_sync::background_running_p())
            rearrange_small_heap_segments();
#endif //BACKGROUND_GC
#endif //MULTIPLE_HEAPS

    BOOL should_evaluate_elevation = TRUE;
    BOOL should_do_blocking_collection = FALSE;

#ifdef MULTIPLE_HEAPS
    int gen_max = condemned_generation_num;
    for (int i = 0; i < n_heaps; i++)
    {
        if (gen_max < g_heaps[i]->condemned_generation_num)
            gen_max = g_heaps[i]->condemned_generation_num;
        if (should_evaluate_elevation && !(g_heaps[i]->elevation_requested))
            should_evaluate_elevation = FALSE;
        if ((!should_do_blocking_collection) && (g_heaps[i]->blocking_collection))
            should_do_blocking_collection = TRUE;
    }

    settings.condemned_generation = gen_max;
#else //MULTIPLE_HEAPS
    settings.condemned_generation = generation_to_condemn (n,
                                                        &blocking_collection,
                                                        &elevation_requested,
                                                        FALSE);
    should_evaluate_elevation = elevation_requested;
    should_do_blocking_collection = blocking_collection;
#endif //MULTIPLE_HEAPS

    settings.condemned_generation = joined_generation_to_condemn (
                                        should_evaluate_elevation,
                                        n,
                                        settings.condemned_generation,
                                        &should_do_blocking_collection
                                        STRESS_HEAP_ARG(n)
                                        );

    STRESS_LOG1(LF_GCROOTS|LF_GC|LF_GCALLOC, LL_INFO10,
            "condemned generation num: %d\n", settings.condemned_generation);

    record_gcs_during_no_gc();

    if (settings.condemned_generation > 1)
        settings.promotion = TRUE;

#ifdef HEAP_ANALYZE
    // At this point we've decided what generation is condemned
    // See if we've been requested to analyze survivors after the mark phase
    if (GCToEEInterface::AnalyzeSurvivorsRequested(settings.condemned_generation))
    {
        heap_analyze_enabled = TRUE;
    }
#endif // HEAP_ANALYZE

        GCToEEInterface::DiagGCStart(settings.condemned_generation, settings.reason == reason_induced);

#ifdef BACKGROUND_GC
        if ((settings.condemned_generation == max_generation) &&
            (recursive_gc_sync::background_running_p()))
        {
            //TODO BACKGROUND_GC If we just wait for the end of gc, it won't work
            // because we have to collect 0 and 1 properly
            // in particular, the allocation contexts are gone.
            // For now, it is simpler to collect max_generation-1
            settings.condemned_generation = max_generation - 1;
            dprintf (GTC_LOG, ("bgc - 1 instead of 2"));
        }

        if ((settings.condemned_generation == max_generation) &&
            (should_do_blocking_collection == FALSE) &&
            gc_can_use_concurrent &&
            !temp_disable_concurrent_p &&
            ((settings.pause_mode == pause_interactive) || (settings.pause_mode == pause_sustained_low_latency)))
        {
            keep_bgc_threads_p = TRUE;
            c_write (settings.concurrent,  TRUE);
        }
#endif //BACKGROUND_GC

        settings.gc_index = (uint32_t)dd_collection_count (dynamic_data_of (0)) + 1;

#ifdef MULTIPLE_HEAPS
        hb_log_balance_activities();
        hb_log_new_allocation();
#endif //MULTIPLE_HEAPS

        // Call the EE for start of GC work
        // just one thread for MP GC
        GCToEEInterface::GcStartWork (settings.condemned_generation,
                                max_generation);

        // TODO: we could fire an ETW event to say this GC as a concurrent GC but later on due to not being able to
        // create threads or whatever, this could be a non concurrent GC. Maybe for concurrent GC we should fire
        // it in do_background_gc and if it failed to be a CGC we fire it in gc1... in other words, this should be
        // fired in gc1.
        do_pre_gc();

#ifdef MULTIPLE_HEAPS
        gc_start_event.Reset();
        //start all threads on the roots.
        dprintf(3, ("Starting all gc threads for gc"));
        gc_t_join.restart();
#endif //MULTIPLE_HEAPS
    }

    descr_generations (TRUE);

#ifdef VERIFY_HEAP
    if ((GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) &&
       !(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_POST_GC_ONLY))
    {
        verify_heap (TRUE);
    }
    if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_BARRIERCHECK)
        checkGCWriteBarrier();

#endif // VERIFY_HEAP

#ifdef BACKGROUND_GC
    if (settings.concurrent)
    {
        // We need to save the settings because we'll need to restore it after each FGC.
        assert (settings.condemned_generation == max_generation);
        settings.compaction = FALSE;
        saved_bgc_settings = settings;

#ifdef MULTIPLE_HEAPS
        if (heap_number == 0)
        {
            for (int i = 0; i < n_heaps; i++)
            {
                prepare_bgc_thread (g_heaps[i]);
            }
            dprintf (2, ("setting bgc_threads_sync_event"));
            bgc_threads_sync_event.Set();
        }
        else
        {
            bgc_threads_sync_event.Wait(INFINITE, FALSE);
            dprintf (2, ("bgc_threads_sync_event is signalled"));
        }
#else
        prepare_bgc_thread(0);
#endif //MULTIPLE_HEAPS

#ifdef MULTIPLE_HEAPS
        gc_t_join.join(this, gc_join_start_bgc);
        if (gc_t_join.joined())
#endif //MULTIPLE_HEAPS
        {
            do_concurrent_p = TRUE;
            do_ephemeral_gc_p = FALSE;
#ifdef MULTIPLE_HEAPS
            dprintf(2, ("Joined to perform a background GC"));

            for (int i = 0; i < n_heaps; i++)
            {
                gc_heap* hp = g_heaps[i];
                if (!(hp->bgc_thread) || !hp->commit_mark_array_bgc_init (hp->mark_array))
                {
                    do_concurrent_p = FALSE;
                    break;
                }
                else
                {
                    hp->background_saved_lowest_address = hp->lowest_address;
                    hp->background_saved_highest_address = hp->highest_address;
                }
            }
#else
            do_concurrent_p = (!!bgc_thread && commit_mark_array_bgc_init (mark_array));
            if (do_concurrent_p)
            {
                background_saved_lowest_address = lowest_address;
                background_saved_highest_address = highest_address;
            }
#endif //MULTIPLE_HEAPS

            if (do_concurrent_p)
            {
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
                SoftwareWriteWatch::EnableForGCHeap();
#endif //FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP

#ifdef MULTIPLE_HEAPS
                for (int i = 0; i < n_heaps; i++)
                    g_heaps[i]->current_bgc_state = bgc_initialized;
#else
                current_bgc_state = bgc_initialized;
#endif //MULTIPLE_HEAPS

                int gen = check_for_ephemeral_alloc();
                // always do a gen1 GC before we start BGC.
                // This is temporary for testing purpose.
                //int gen = max_generation - 1;
                dont_restart_ee_p = TRUE;
                if (gen == -1)
                {
                    // If we decide to not do a GC before the BGC we need to
                    // restore the gen0 alloc context.
#ifdef MULTIPLE_HEAPS
                    for (int i = 0; i < n_heaps; i++)
                    {
                        generation_allocation_pointer (g_heaps[i]->generation_of (0)) =  0;
                        generation_allocation_limit (g_heaps[i]->generation_of (0)) = 0;
                    }
#else
                    generation_allocation_pointer (youngest_generation) =  0;
                    generation_allocation_limit (youngest_generation) = 0;
#endif //MULTIPLE_HEAPS
                }
                else
                {
                    do_ephemeral_gc_p = TRUE;

                    settings.init_mechanisms();
                    settings.condemned_generation = gen;
                    settings.gc_index = (size_t)dd_collection_count (dynamic_data_of (0)) + 2;
                    do_pre_gc();

                    // TODO BACKGROUND_GC need to add the profiling stuff here.
                    dprintf (GTC_LOG, ("doing gen%d before doing a bgc", gen));
                }

                //clear the cards so they don't bleed in gen 1 during collection
                // shouldn't this always be done at the beginning of any GC?
                //clear_card_for_addresses (
                //    generation_allocation_start (generation_of (0)),
                //    heap_segment_allocated (ephemeral_heap_segment));

                if (!do_ephemeral_gc_p)
                {
                    do_background_gc();
                }
            }
            else
            {
                settings.compaction = TRUE;
                c_write (settings.concurrent, FALSE);
            }

#ifdef MULTIPLE_HEAPS
            gc_t_join.restart();
#endif //MULTIPLE_HEAPS
        }

        if (do_concurrent_p)
        {
            // At this point we are sure we'll be starting a BGC, so save its per heap data here.
            // global data is only calculated at the end of the GC so we don't need to worry about
            // FGCs overwriting it.
            memset (&bgc_data_per_heap, 0, sizeof (bgc_data_per_heap));
            memcpy (&bgc_data_per_heap, &gc_data_per_heap, sizeof(gc_data_per_heap));

            if (do_ephemeral_gc_p)
            {
                dprintf (2, ("GC threads running, doing gen%d GC", settings.condemned_generation));

                gen_to_condemn_reasons.init();
                gen_to_condemn_reasons.set_condition (gen_before_bgc);
                gc_data_per_heap.gen_to_condemn_reasons.init (&gen_to_condemn_reasons);
                gc1();
#ifdef MULTIPLE_HEAPS
                gc_t_join.join(this, gc_join_bgc_after_ephemeral);
                if (gc_t_join.joined())
#endif //MULTIPLE_HEAPS
                {
#ifdef MULTIPLE_HEAPS
                    do_post_gc();
#endif //MULTIPLE_HEAPS
                    settings = saved_bgc_settings;
                    assert (settings.concurrent);

                    do_background_gc();

#ifdef MULTIPLE_HEAPS
                    gc_t_join.restart();
#endif //MULTIPLE_HEAPS
                }
            }
        }
        else
        {
            dprintf (2, ("couldn't create BGC threads, reverting to doing a blocking GC"));
            gc1();
        }
    }
    else
#endif //BACKGROUND_GC
    {
        gc1();
    }
#ifndef MULTIPLE_HEAPS
    allocation_running_time = (size_t)GCToOSInterface::GetLowPrecisionTimeStamp();
    allocation_running_amount = dd_new_allocation (dynamic_data_of (0));
    fgn_last_alloc = dd_new_allocation (dynamic_data_of (0));
#endif //MULTIPLE_HEAPS

done:
    if (settings.pause_mode == pause_no_gc)
        allocate_for_no_gc_after_gc();

}

#define mark_stack_empty_p() (mark_stack_base == mark_stack_tos)

inline
size_t& gc_heap::promoted_bytes(int thread)
{
#ifdef MULTIPLE_HEAPS
    return g_promoted [thread*16];
#else //MULTIPLE_HEAPS
    UNREFERENCED_PARAMETER(thread);
    return g_promoted;
#endif //MULTIPLE_HEAPS
}

#ifdef INTERIOR_POINTERS
heap_segment* gc_heap::find_segment (uint8_t* interior, BOOL small_segment_only_p)
{
#ifdef SEG_MAPPING_TABLE
    heap_segment* seg = seg_mapping_table_segment_of (interior);
    if (seg)
    {
        if (small_segment_only_p && heap_segment_loh_p (seg))
            return 0;
    }
    return seg;
#else //SEG_MAPPING_TABLE
#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < gc_heap::n_heaps; i++)
    {
        gc_heap* h = gc_heap::g_heaps [i];
        hs = h->find_segment_per_heap (o, small_segment_only_p);
        if (hs)
        {
            break;
        }
    }
#else
    {
        gc_heap* h = pGenGCHeap;
        hs = h->find_segment_per_heap (o, small_segment_only_p);
    }
#endif //MULTIPLE_HEAPS
#endif //SEG_MAPPING_TABLE
}

heap_segment* gc_heap::find_segment_per_heap (uint8_t* interior, BOOL small_segment_only_p)
{
#ifdef SEG_MAPPING_TABLE
    return find_segment (interior, small_segment_only_p);
#else //SEG_MAPPING_TABLE
    if (in_range_for_segment (interior, ephemeral_heap_segment))
    {
        return ephemeral_heap_segment;
    }
    else
    {
        heap_segment* found_seg = 0;

        {
            heap_segment* seg = generation_start_segment (generation_of (max_generation));
            do
            {
                if (in_range_for_segment (interior, seg))
                {
                    found_seg = seg;
                    goto end_find_segment;
                }

            } while ((seg = heap_segment_next (seg)) != 0);
        }
        if (!small_segment_only_p)
        {
#ifdef BACKGROUND_GC
            {
                ptrdiff_t delta = 0;
                heap_segment* seg = segment_of (interior, delta);
                if (seg && in_range_for_segment (interior, seg))
                {
                    found_seg = seg;
                }
                goto end_find_segment;
            }
#else //BACKGROUND_GC
            heap_segment* seg = generation_start_segment (generation_of (max_generation+1));
            do
            {
                if (in_range_for_segment(interior, seg))
                {
                    found_seg = seg;
                    goto end_find_segment;
                }

            } while ((seg = heap_segment_next (seg)) != 0);
#endif //BACKGROUND_GC
        }
end_find_segment:

        return found_seg;
    }
#endif //SEG_MAPPING_TABLE
}
#endif //INTERIOR_POINTERS

#if !defined(_DEBUG) && !defined(__GNUC__)
inline // This causes link errors if global optimization is off
#endif //!_DEBUG && !__GNUC__
gc_heap* gc_heap::heap_of (uint8_t* o)
{
#ifdef MULTIPLE_HEAPS
    if (o == 0)
        return g_heaps [0];
#ifdef SEG_MAPPING_TABLE
    gc_heap* hp = seg_mapping_table_heap_of (o);
    return (hp ? hp : g_heaps[0]);
#else //SEG_MAPPING_TABLE
    ptrdiff_t delta = 0;
    heap_segment* seg = segment_of (o, delta);
    return (seg ? heap_segment_heap (seg) : g_heaps [0]);
#endif //SEG_MAPPING_TABLE
#else //MULTIPLE_HEAPS
    UNREFERENCED_PARAMETER(o);
    return __this;
#endif //MULTIPLE_HEAPS
}

inline
gc_heap* gc_heap::heap_of_gc (uint8_t* o)
{
#ifdef MULTIPLE_HEAPS
    if (o == 0)
        return g_heaps [0];
#ifdef SEG_MAPPING_TABLE
    gc_heap* hp = seg_mapping_table_heap_of_gc (o);
    return (hp ? hp : g_heaps[0]);
#else //SEG_MAPPING_TABLE
    ptrdiff_t delta = 0;
    heap_segment* seg = segment_of (o, delta);
    return (seg ? heap_segment_heap (seg) : g_heaps [0]);
#endif //SEG_MAPPING_TABLE
#else //MULTIPLE_HEAPS
    UNREFERENCED_PARAMETER(o);
    return __this;
#endif //MULTIPLE_HEAPS
}

#ifdef INTERIOR_POINTERS
// will find all heap objects (large and small)
uint8_t* gc_heap::find_object (uint8_t* interior, uint8_t* low)
{
    if (!gen0_bricks_cleared)
    {
#ifdef MULTIPLE_HEAPS
        assert (!"Should have already been done in server GC");
#endif //MULTIPLE_HEAPS
        gen0_bricks_cleared = TRUE;
        //initialize brick table for gen 0
        for (size_t b = brick_of (generation_allocation_start (generation_of (0)));
             b < brick_of (align_on_brick
                           (heap_segment_allocated (ephemeral_heap_segment)));
             b++)
        {
            set_brick (b, -1);
        }
    }
    //indicate that in the future this needs to be done during allocation
    gen0_must_clear_bricks = FFIND_DECAY;

    int brick_entry = get_brick_entry(brick_of (interior));
    if (brick_entry == 0)
    {
        // this is a pointer to a large object
        heap_segment* seg = find_segment_per_heap (interior, FALSE);
        if (seg
#ifdef FEATURE_CONSERVATIVE_GC
            && (GCConfig::GetConservativeGC() || interior <= heap_segment_allocated(seg))
#endif
            )
        {
            // If interior falls within the first free object at the beginning of a generation,
            // we don't have brick entry for it, and we may incorrectly treat it as on large object heap.
            int align_const = get_alignment_constant (heap_segment_read_only_p (seg)
#ifdef FEATURE_CONSERVATIVE_GC
                                                       || (GCConfig::GetConservativeGC() && !heap_segment_loh_p (seg))
#endif
                                                      );
            //int align_const = get_alignment_constant (heap_segment_read_only_p (seg));
            assert (interior < heap_segment_allocated (seg));

            uint8_t* o = heap_segment_mem (seg);
            while (o < heap_segment_allocated (seg))
            {
                uint8_t* next_o = o + Align (size (o), align_const);
                assert (next_o > o);
                if ((o <= interior) && (interior < next_o))
                return o;
                o = next_o;
            }
            return 0;
        }
        else
        {
            return 0;
        }
    }
    else if (interior >= low)
    {
        heap_segment* seg = find_segment_per_heap (interior, TRUE);
        if (seg)
        {
#ifdef FEATURE_CONSERVATIVE_GC
            if (interior >= heap_segment_allocated (seg))
                return 0;
#else
            assert (interior < heap_segment_allocated (seg));
#endif
            uint8_t* o = find_first_object (interior, heap_segment_mem (seg));
            return o;
        }
        else
            return 0;
    }
    else
        return 0;
}

uint8_t*
gc_heap::find_object_for_relocation (uint8_t* interior, uint8_t* low, uint8_t* high)
{
    uint8_t* old_address = interior;
    if (!((old_address >= low) && (old_address < high)))
        return 0;
    uint8_t* plug = 0;
    size_t  brick = brick_of (old_address);
    int    brick_entry =  brick_table [ brick ];
    if (brick_entry != 0)
    {
    retry:
        {
            while (brick_entry < 0)
            {
                brick = (brick + brick_entry);
                brick_entry =  brick_table [ brick ];
            }
            uint8_t* old_loc = old_address;
            uint8_t* node = tree_search ((brick_address (brick) + brick_entry-1),
                                      old_loc);
            if (node <= old_loc)
                plug = node;
            else
            {
                brick = brick - 1;
                brick_entry =  brick_table [ brick ];
                goto retry;
            }

        }
        assert (plug);
        //find the object by going along the plug
        uint8_t* o = plug;
        while (o <= interior)
        {
            uint8_t* next_o = o + Align (size (o));
            assert (next_o > o);
            if (next_o > interior)
            {
                break;
            }
            o = next_o;
        }
        assert ((o <= interior) && ((o + Align (size (o))) > interior));
        return o;
    }
    else
    {
        // this is a pointer to a large object
        heap_segment* seg = find_segment_per_heap (interior, FALSE);
        if (seg)
        {
            assert (interior < heap_segment_allocated (seg));

            uint8_t* o = heap_segment_mem (seg);
            while (o < heap_segment_allocated (seg))
            {
                uint8_t* next_o = o + Align (size (o));
                assert (next_o > o);
                if ((o < interior) && (interior < next_o))
                return o;
                o = next_o;
            }
            return 0;
        }
        else
            {
            return 0;
        }
    }
}
#else //INTERIOR_POINTERS
inline
uint8_t* gc_heap::find_object (uint8_t* o, uint8_t* low)
{
    return o;
}
#endif //INTERIOR_POINTERS

#ifdef MULTIPLE_HEAPS

#ifdef MARK_LIST
#ifdef GC_CONFIG_DRIVEN
#define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;} else {mark_list_index++;}}
#else //GC_CONFIG_DRIVEN
#define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;}}
#endif //GC_CONFIG_DRIVEN
#else //MARK_LIST
#define m_boundary(o) {}
#endif //MARK_LIST

#define m_boundary_fullgc(o) {}

#else //MULTIPLE_HEAPS

#ifdef MARK_LIST
#ifdef GC_CONFIG_DRIVEN
#define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;} else {mark_list_index++;} if (slow > o) slow = o; if (shigh < o) shigh = o;}
#else
#define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;}if (slow > o) slow = o; if (shigh < o) shigh = o;}
#endif //GC_CONFIG_DRIVEN
#else //MARK_LIST
#define m_boundary(o) {if (slow > o) slow = o; if (shigh < o) shigh = o;}
#endif //MARK_LIST

#define m_boundary_fullgc(o) {if (slow > o) slow = o; if (shigh < o) shigh = o;}

#endif //MULTIPLE_HEAPS

#define method_table(o) ((CObjectHeader*)(o))->GetMethodTable()

inline
BOOL gc_heap::gc_mark1 (uint8_t* o)
{
    BOOL marked = !marked (o);
    set_marked (o);
    dprintf (3, ("*%Ix*, newly marked: %d", (size_t)o, marked));
    return marked;
}

inline
BOOL gc_heap::gc_mark (uint8_t* o, uint8_t* low, uint8_t* high)
{
    BOOL marked = FALSE;
    if ((o >= low) && (o < high))
        marked = gc_mark1 (o);
#ifdef MULTIPLE_HEAPS
    else if (o)
    {
        //find the heap
        gc_heap* hp = heap_of_gc (o);
        assert (hp);
        if ((o >= hp->gc_low) && (o < hp->gc_high))
            marked = gc_mark1 (o);
    }
#ifdef SNOOP_STATS
    snoop_stat.objects_checked_count++;

    if (marked)
    {
        snoop_stat.objects_marked_count++;
    }
    if (!o)
    {
        snoop_stat.zero_ref_count++;
    }

#endif //SNOOP_STATS
#endif //MULTIPLE_HEAPS
    return marked;
}

#ifdef BACKGROUND_GC

inline
BOOL gc_heap::background_marked (uint8_t* o)
{
    return mark_array_marked (o);
}
inline
BOOL gc_heap::background_mark1 (uint8_t* o)
{
    BOOL to_mark = !mark_array_marked (o);

    dprintf (3, ("b*%Ix*b(%d)", (size_t)o, (to_mark ? 1 : 0)));
    if (to_mark)
    {
        mark_array_set_marked (o);
        dprintf (4, ("n*%Ix*n", (size_t)o));
        return TRUE;
    }
    else
        return FALSE;
}

// TODO: we could consider filtering out NULL's here instead of going to
// look for it on other heaps
inline
BOOL gc_heap::background_mark (uint8_t* o, uint8_t* low, uint8_t* high)
{
    BOOL marked = FALSE;
    if ((o >= low) && (o < high))
        marked = background_mark1 (o);
#ifdef MULTIPLE_HEAPS
    else if (o)
    {
        //find the heap
        gc_heap* hp = heap_of (o);
        assert (hp);
        if ((o >= hp->background_saved_lowest_address) && (o < hp->background_saved_highest_address))
            marked = background_mark1 (o);
    }
#endif //MULTIPLE_HEAPS
    return marked;
}

#endif //BACKGROUND_GC

inline
uint8_t* gc_heap::next_end (heap_segment* seg, uint8_t* f)
{
    if (seg == ephemeral_heap_segment)
        return  f;
    else
        return  heap_segment_allocated (seg);
}

#define new_start() {if (ppstop <= start) {break;} else {parm = start}}
#define ignore_start 0
#define use_start 1

#define go_through_object(mt,o,size,parm,start,start_useful,limit,exp)      \
{                                                                           \
    CGCDesc* map = CGCDesc::GetCGCDescFromMT((MethodTable*)(mt));           \
    CGCDescSeries* cur = map->GetHighestSeries();                           \
    ptrdiff_t cnt = (ptrdiff_t) map->GetNumSeries();                        \
                                                                            \
    if (cnt >= 0)                                                           \
    {                                                                       \
        CGCDescSeries* last = map->GetLowestSeries();                       \
        uint8_t** parm = 0;                                                 \
        do                                                                  \
        {                                                                   \
            assert (parm <= (uint8_t**)((o) + cur->GetSeriesOffset()));     \
            parm = (uint8_t**)((o) + cur->GetSeriesOffset());               \
            uint8_t** ppstop =                                              \
                (uint8_t**)((uint8_t*)parm + cur->GetSeriesSize() + (size));\
            if (!start_useful || (uint8_t*)ppstop > (start))                \
            {                                                               \
                if (start_useful && (uint8_t*)parm < (start)) parm = (uint8_t**)(start);\
                while (parm < ppstop)                                       \
                {                                                           \
                   {exp}                                                    \
                   parm++;                                                  \
                }                                                           \
            }                                                               \
            cur--;                                                          \
                                                                            \
        } while (cur >= last);                                              \
    }                                                                       \
    else                                                                    \
    {                                                                       \
        /* Handle the repeating case - array of valuetypes */               \
        uint8_t** parm = (uint8_t**)((o) + cur->startoffset);               \
        if (start_useful && start > (uint8_t*)parm)                         \
        {                                                                   \
            ptrdiff_t cs = mt->RawGetComponentSize();                         \
            parm = (uint8_t**)((uint8_t*)parm + (((start) - (uint8_t*)parm)/cs)*cs); \
        }                                                                   \
        while ((uint8_t*)parm < ((o)+(size)-plug_skew))                     \
        {                                                                   \
            for (ptrdiff_t __i = 0; __i > cnt; __i--)                         \
            {                                                               \
                HALF_SIZE_T skip =  cur->val_serie[__i].skip;               \
                HALF_SIZE_T nptrs = cur->val_serie[__i].nptrs;              \
                uint8_t** ppstop = parm + nptrs;                            \
                if (!start_useful || (uint8_t*)ppstop > (start))            \
                {                                                           \
                    if (start_useful && (uint8_t*)parm < (start)) parm = (uint8_t**)(start);      \
                    do                                                      \
                    {                                                       \
                       {exp}                                                \
                       parm++;                                              \
                    } while (parm < ppstop);                                \
                }                                                           \
                parm = (uint8_t**)((uint8_t*)ppstop + skip);                \
            }                                                               \
        }                                                                   \
    }                                                                       \
}

#define go_through_object_nostart(mt,o,size,parm,exp) {go_through_object(mt,o,size,parm,o,ignore_start,(o + size),exp); }

// 1 thing to note about this macro:
// 1) you can use *parm safely but in general you don't want to use parm
// because for the collectible types it's not an address on the managed heap.
#ifndef COLLECTIBLE_CLASS
#define go_through_object_cl(mt,o,size,parm,exp)                            \
{                                                                           \
    if (header(o)->ContainsPointers())                                      \
    {                                                                       \
        go_through_object_nostart(mt,o,size,parm,exp);                      \
    }                                                                       \
}
#else //COLLECTIBLE_CLASS
#define go_through_object_cl(mt,o,size,parm,exp)                            \
{                                                                           \
    if (header(o)->Collectible())                                           \
    {                                                                       \
        uint8_t* class_obj = get_class_object (o);                             \
        uint8_t** parm = &class_obj;                                           \
        do {exp} while (false);                                             \
    }                                                                       \
    if (header(o)->ContainsPointers())                                      \
    {                                                                       \
        go_through_object_nostart(mt,o,size,parm,exp);                      \
    }                                                                       \
}
#endif //COLLECTIBLE_CLASS

// This starts a plug. But mark_stack_tos isn't increased until set_pinned_info is called.
void gc_heap::enque_pinned_plug (uint8_t* plug,
                                 BOOL save_pre_plug_info_p,
                                 uint8_t* last_object_in_last_plug)
{
    if (mark_stack_array_length <= mark_stack_tos)
    {
        if (!grow_mark_stack (mark_stack_array, mark_stack_array_length, MARK_STACK_INITIAL_LENGTH))
        {
            // we don't want to continue here due to security
            // risks. This happens very rarely and fixing it in the
            // way so that we can continue is a bit involved and will
            // not be done in Dev10.
            GCToEEInterface::HandleFatalError((unsigned int)CORINFO_EXCEPTION_GC);
        }
    }

    dprintf (3, ("enqueuing P #%Id(%Ix): %Ix. oldest: %Id, LO: %Ix, pre: %d",
        mark_stack_tos, &mark_stack_array[mark_stack_tos], plug, mark_stack_bos, last_object_in_last_plug, (save_pre_plug_info_p ? 1 : 0)));
    mark& m = mark_stack_array[mark_stack_tos];
    m.first = plug;
    // Must be set now because if we have a short object we'll need the value of saved_pre_p.
    m.saved_pre_p = save_pre_plug_info_p;

    if (save_pre_plug_info_p)
    {
#ifdef SHORT_PLUGS
        BOOL is_padded = is_plug_padded (last_object_in_last_plug);
        if (is_padded)
            clear_plug_padded (last_object_in_last_plug);
#endif //SHORT_PLUGS
        memcpy (&(m.saved_pre_plug), &(((plug_and_gap*)plug)[-1]), sizeof (gap_reloc_pair));
#ifdef SHORT_PLUGS
        if (is_padded)
            set_plug_padded (last_object_in_last_plug);
#endif //SHORT_PLUGS

        memcpy (&(m.saved_pre_plug_reloc), &(((plug_and_gap*)plug)[-1]), sizeof (gap_reloc_pair));

        // If the last object in the last plug is too short, it requires special handling.
        size_t last_obj_size = plug - last_object_in_last_plug;
        if (last_obj_size < min_pre_pin_obj_size)
        {
            record_interesting_data_point (idp_pre_short);
#ifdef SHORT_PLUGS
            if (is_padded)
                record_interesting_data_point (idp_pre_short_padded);
#endif //SHORT_PLUGS
            dprintf (3, ("encountered a short object %Ix right before pinned plug %Ix!",
                         last_object_in_last_plug, plug));
            // Need to set the short bit regardless of having refs or not because we need to
            // indicate that this object is not walkable.
            m.set_pre_short();

#ifdef COLLECTIBLE_CLASS
            if (is_collectible (last_object_in_last_plug))
            {
                m.set_pre_short_collectible();
            }
#endif //COLLECTIBLE_CLASS

            if (contain_pointers (last_object_in_last_plug))
            {
                dprintf (3, ("short object: %Ix(%Ix)", last_object_in_last_plug, last_obj_size));

                go_through_object_nostart (method_table(last_object_in_last_plug), last_object_in_last_plug, last_obj_size, pval,
                    {
                        size_t gap_offset = (((size_t)pval - (size_t)(plug - sizeof (gap_reloc_pair) - plug_skew))) / sizeof (uint8_t*);
                        dprintf (3, ("member: %Ix->%Ix, %Id ptrs from beginning of gap", (uint8_t*)pval, *pval, gap_offset));
                        m.set_pre_short_bit (gap_offset);
                    }
                );
            }
        }
    }

    m.saved_post_p = FALSE;
}

void gc_heap::save_post_plug_info (uint8_t* last_pinned_plug, uint8_t* last_object_in_last_plug, uint8_t* post_plug)
{
    UNREFERENCED_PARAMETER(last_pinned_plug);

    mark& m = mark_stack_array[mark_stack_tos - 1];
    assert (last_pinned_plug == m.first);
    m.saved_post_plug_info_start = (uint8_t*)&(((plug_and_gap*)post_plug)[-1]);

#ifdef SHORT_PLUGS
    BOOL is_padded = is_plug_padded (last_object_in_last_plug);
    if (is_padded)
        clear_plug_padded (last_object_in_last_plug);
#endif //SHORT_PLUGS
    memcpy (&(m.saved_post_plug), m.saved_post_plug_info_start, sizeof (gap_reloc_pair));
#ifdef SHORT_PLUGS
    if (is_padded)
        set_plug_padded (last_object_in_last_plug);
#endif //SHORT_PLUGS

    memcpy (&(m.saved_post_plug_reloc), m.saved_post_plug_info_start, sizeof (gap_reloc_pair));

    // This is important - we need to clear all bits here except the last one.
    m.saved_post_p = TRUE;

#ifdef _DEBUG
    m.saved_post_plug_debug.gap = 1;
#endif //_DEBUG

    dprintf (3, ("PP %Ix has NP %Ix right after", last_pinned_plug, post_plug));

    size_t last_obj_size = post_plug - last_object_in_last_plug;
    if (last_obj_size < min_pre_pin_obj_size)
    {
        dprintf (3, ("PP %Ix last obj %Ix is too short", last_pinned_plug, last_object_in_last_plug));
        record_interesting_data_point (idp_post_short);
#ifdef SHORT_PLUGS
        if (is_padded)
            record_interesting_data_point (idp_post_short_padded);
#endif //SHORT_PLUGS
        m.set_post_short();
#if defined (_DEBUG) && defined (VERIFY_HEAP)
        verify_pinned_queue_p = TRUE;
#endif // _DEBUG && VERIFY_HEAP

#ifdef COLLECTIBLE_CLASS
        if (is_collectible (last_object_in_last_plug))
        {
            m.set_post_short_collectible();
        }
#endif //COLLECTIBLE_CLASS

        if (contain_pointers (last_object_in_last_plug))
        {
            dprintf (3, ("short object: %Ix(%Ix)", last_object_in_last_plug, last_obj_size));

            // TODO: since we won't be able to walk this object in relocation, we still need to
            // take care of collectible assemblies here.
            go_through_object_nostart (method_table(last_object_in_last_plug), last_object_in_last_plug, last_obj_size, pval,
                {
                    size_t gap_offset = (((size_t)pval - (size_t)(post_plug - sizeof (gap_reloc_pair) - plug_skew))) / sizeof (uint8_t*);
                    dprintf (3, ("member: %Ix->%Ix, %Id ptrs from beginning of gap", (uint8_t*)pval, *pval, gap_offset));
                    m.set_post_short_bit (gap_offset);
                }
            );
        }
    }
}

//#define PREFETCH
#ifdef PREFETCH
__declspec(naked) void __fastcall Prefetch(void* addr)
{
   __asm {
       PREFETCHT0 [ECX]
        ret
    };
}
#else //PREFETCH
inline void Prefetch (void* addr)
{
    UNREFERENCED_PARAMETER(addr);
}
#endif //PREFETCH
#ifdef MH_SC_MARK
inline
VOLATILE(uint8_t*)& gc_heap::ref_mark_stack (gc_heap* hp, int index)
{
    return ((VOLATILE(uint8_t*)*)(hp->mark_stack_array))[index];
}

#endif //MH_SC_MARK

#define stolen 2
#define partial 1
#define partial_object 3
inline
uint8_t* ref_from_slot (uint8_t* r)
{
    return (uint8_t*)((size_t)r & ~(stolen | partial));
}
inline
BOOL stolen_p (uint8_t* r)
{
    return (((size_t)r&2) && !((size_t)r&1));
}
inline
BOOL ready_p (uint8_t* r)
{
    return ((size_t)r != 1);
}
inline
BOOL partial_p (uint8_t* r)
{
    return (((size_t)r&1) && !((size_t)r&2));
}
inline
BOOL straight_ref_p (uint8_t* r)
{
    return (!stolen_p (r) && !partial_p (r));
}
inline
BOOL partial_object_p (uint8_t* r)
{
    return (((size_t)r & partial_object) == partial_object);
}
inline
BOOL ref_p (uint8_t* r)
{
    return (straight_ref_p (r) || partial_object_p (r));
}

void gc_heap::mark_object_simple1 (uint8_t* oo, uint8_t* start THREAD_NUMBER_DCL)
{
    SERVER_SC_MARK_VOLATILE(uint8_t*)* mark_stack_tos = (SERVER_SC_MARK_VOLATILE(uint8_t*)*)mark_stack_array;
    SERVER_SC_MARK_VOLATILE(uint8_t*)* mark_stack_limit = (SERVER_SC_MARK_VOLATILE(uint8_t*)*)&mark_stack_array[mark_stack_array_length];
    SERVER_SC_MARK_VOLATILE(uint8_t*)* mark_stack_base = mark_stack_tos;
#ifdef SORT_MARK_STACK
    SERVER_SC_MARK_VOLATILE(uint8_t*)* sorted_tos = mark_stack_base;
#endif //SORT_MARK_STACK

    // If we are doing a full GC we don't use mark list anyway so use m_boundary_fullgc that doesn't
    // update mark list.
    BOOL  full_p = (settings.condemned_generation == max_generation);

    assert ((start >= oo) && (start < oo+size(oo)));

#ifndef MH_SC_MARK
    *mark_stack_tos = oo;
#endif //!MH_SC_MARK

    while (1)
    {
#ifdef MULTIPLE_HEAPS
#else  //MULTIPLE_HEAPS
        const int thread = 0;
#endif //MULTIPLE_HEAPS

        if (oo && ((size_t)oo != 4))
        {
            size_t s = 0;
            if (stolen_p (oo))
            {
                --mark_stack_tos;
                goto next_level;
            }
            else if (!partial_p (oo) && ((s = size (oo)) < (partial_size_th*sizeof (uint8_t*))))
            {
                BOOL overflow_p = FALSE;

                if (mark_stack_tos + (s) /sizeof (uint8_t*) >= (mark_stack_limit  - 1))
                {
                    size_t num_components = ((method_table(oo))->HasComponentSize() ? ((CObjectHeader*)oo)->GetNumComponents() : 0);
                    if (mark_stack_tos + CGCDesc::GetNumPointers(method_table(oo), s, num_components) >= (mark_stack_limit - 1))
                    {
                        overflow_p = TRUE;
                    }
                }

                if (overflow_p == FALSE)
                {
                    dprintf(3,("pushing mark for %Ix ", (size_t)oo));

                    go_through_object_cl (method_table(oo), oo, s, ppslot,
                                          {
                                              uint8_t* o = *ppslot;
                                              Prefetch(o);
                                              if (gc_mark (o, gc_low, gc_high))
                                              {
                                                  if (full_p)
                                                  {
                                                      m_boundary_fullgc (o);
                                                  }
                                                  else
                                                  {
                                                      m_boundary (o);
                                                  }
                                                  size_t obj_size = size (o);
                                                  promoted_bytes (thread) += obj_size;
                                                  if (contain_pointers_or_collectible (o))
                                                  {
                                                      *(mark_stack_tos++) = o;
                                                  }
                                              }
                                          }
                        );
                }
                else
                {
                    dprintf(3,("mark stack overflow for object %Ix ", (size_t)oo));
                    min_overflow_address = min (min_overflow_address, oo);
                    max_overflow_address = max (max_overflow_address, oo);
                }
            }
            else
            {
                if (partial_p (oo))
                {
                    start = ref_from_slot (oo);
                    oo = ref_from_slot (*(--mark_stack_tos));
                    dprintf (4, ("oo: %Ix, start: %Ix\n", (size_t)oo, (size_t)start));
                    assert ((oo < start) && (start < (oo + size (oo))));
                }
#ifdef COLLECTIBLE_CLASS
                else
                {
                    // If there's a class object, push it now. We are guaranteed to have the slot since
                    // we just popped one object off.
                    if (is_collectible (oo))
                    {
                        uint8_t* class_obj = get_class_object (oo);
                        if (gc_mark (class_obj, gc_low, gc_high))
                        {
                            if (full_p)
                            {
                                m_boundary_fullgc (class_obj);
                            }
                            else
                            {
                                m_boundary (class_obj);
                            }

                            size_t obj_size = size (class_obj);
                            promoted_bytes (thread) += obj_size;
                            *(mark_stack_tos++) = class_obj;
                            // The code below expects that the oo is still stored in the stack slot that was
                            // just popped and it "pushes" it back just by incrementing the mark_stack_tos.
                            // But the class_obj has just overwritten that stack slot and so the oo needs to
                            // be stored to the new slot that's pointed to by the mark_stack_tos.
                            *mark_stack_tos = oo;
                        }
                    }

                    if (!contain_pointers (oo))
                    {
                        goto next_level;
                    }
                }
#endif //COLLECTIBLE_CLASS

                s = size (oo);

                BOOL overflow_p = FALSE;

                if (mark_stack_tos + (num_partial_refs + 2)  >= mark_stack_limit)
                {
                    overflow_p = TRUE;
                }
                if (overflow_p == FALSE)
                {
                    dprintf(3,("pushing mark for %Ix ", (size_t)oo));

                    //push the object and its current
                    SERVER_SC_MARK_VOLATILE(uint8_t*)* place = ++mark_stack_tos;
                    mark_stack_tos++;
#ifdef MH_SC_MARK
                    *(place-1) = 0;
                    *(place) = (uint8_t*)partial;
#endif //MH_SC_MARK
                    int i = num_partial_refs;
                    uint8_t* ref_to_continue = 0;

                    go_through_object (method_table(oo), oo, s, ppslot,
                                       start, use_start, (oo + s),
                                       {
                                           uint8_t* o = *ppslot;
                                           Prefetch(o);
                                           if (gc_mark (o, gc_low, gc_high))
                                           {
                                                if (full_p)
                                                {
                                                    m_boundary_fullgc (o);
                                                }
                                                else
                                                {
                                                    m_boundary (o);
                                                }
                                                size_t obj_size = size (o);
                                                promoted_bytes (thread) += obj_size;
                                                if (contain_pointers_or_collectible (o))
                                                {
                                                    *(mark_stack_tos++) = o;
                                                    if (--i == 0)
                                                    {
                                                        ref_to_continue = (uint8_t*)((size_t)(ppslot+1) | partial);
                                                        goto more_to_do;
                                                    }

                                                }
                                           }

                                       }
                        );
                    //we are finished with this object
                    assert (ref_to_continue == 0);
#ifdef MH_SC_MARK
                    assert ((*(place-1)) == (uint8_t*)0);
#else //MH_SC_MARK
                    *(place-1) = 0;
#endif //MH_SC_MARK
                    *place = 0;
                    // shouldn't we decrease tos by 2 here??

more_to_do:
                    if (ref_to_continue)
                    {
                        //update the start
#ifdef MH_SC_MARK
                        assert ((*(place-1)) == (uint8_t*)0);
                        *(place-1) = (uint8_t*)((size_t)oo | partial_object);
                        assert (((*place) == (uint8_t*)1) || ((*place) == (uint8_t*)2));
#endif //MH_SC_MARK
                        *place = ref_to_continue;
                    }
                }
                else
                {
                    dprintf(3,("mark stack overflow for object %Ix ", (size_t)oo));
                    min_overflow_address = min (min_overflow_address, oo);
                    max_overflow_address = max (max_overflow_address, oo);
                }
            }
#ifdef SORT_MARK_STACK
            if (mark_stack_tos > sorted_tos + mark_stack_array_length/8)
            {
                rqsort1 (sorted_tos, mark_stack_tos-1);
                sorted_tos = mark_stack_tos-1;
            }
#endif //SORT_MARK_STACK
        }
    next_level:
        if (!(mark_stack_empty_p()))
        {
            oo = *(--mark_stack_tos);
            start = oo;

#ifdef SORT_MARK_STACK
            sorted_tos = min ((size_t)sorted_tos, (size_t)mark_stack_tos);
#endif //SORT_MARK_STACK
        }
        else
            break;
    }
}

#ifdef MH_SC_MARK
BOOL same_numa_node_p (int hn1, int hn2)
{
    return (heap_select::find_numa_node_from_heap_no (hn1) == heap_select::find_numa_node_from_heap_no (hn2));
}

int find_next_buddy_heap (int this_heap_number, int current_buddy, int n_heaps)
{
    int hn = (current_buddy+1)%n_heaps;
    while (hn != current_buddy)
    {
        if ((this_heap_number != hn) && (same_numa_node_p (this_heap_number, hn)))
            return hn;
        hn = (hn+1)%n_heaps;
    }
    return current_buddy;
}

void
gc_heap::mark_steal()
{
    mark_stack_busy() = 0;
    //clear the mark stack in the snooping range
    for (int i = 0; i < max_snoop_level; i++)
    {
        ((VOLATILE(uint8_t*)*)(mark_stack_array))[i] = 0;
    }

    //pick the next heap as our buddy
    int thpn = find_next_buddy_heap (heap_number, heap_number, n_heaps);

#ifdef SNOOP_STATS
        dprintf (SNOOP_LOG, ("(GC%d)heap%d: start snooping %d", settings.gc_index, heap_number, (heap_number+1)%n_heaps));
        uint32_t begin_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
#endif //SNOOP_STATS

    int idle_loop_count = 0;
    int first_not_ready_level = 0;

    while (1)
    {
        gc_heap* hp = g_heaps [thpn];
        int level = first_not_ready_level;
        first_not_ready_level = 0;

        while (check_next_mark_stack (hp) && (level < (max_snoop_level-1)))
        {
            idle_loop_count = 0;
#ifdef SNOOP_STATS
            snoop_stat.busy_count++;
            dprintf (SNOOP_LOG, ("heap%d: looking at next heap level %d stack contents: %Ix",
                                 heap_number, level, (int)((uint8_t**)(hp->mark_stack_array))[level]));
#endif //SNOOP_STATS

            uint8_t* o = ref_mark_stack (hp, level);

            uint8_t* start = o;
            if (ref_p (o))
            {
                mark_stack_busy() = 1;

                BOOL success = TRUE;
                uint8_t* next = (ref_mark_stack (hp, level+1));
                if (ref_p (next))
                {
                    if (((size_t)o > 4) && !partial_object_p (o))
                    {
                        //this is a normal object, not a partial mark tuple
                        //success = (Interlocked::CompareExchangePointer (&ref_mark_stack (hp, level), 0, o)==o);
                        success = (Interlocked::CompareExchangePointer (&ref_mark_stack (hp, level), (uint8_t*)4, o)==o);
#ifdef SNOOP_STATS
                        snoop_stat.interlocked_count++;
                        if (success)
                            snoop_stat.normal_count++;
#endif //SNOOP_STATS
                    }
                    else
                    {
                        //it is a stolen entry, or beginning/ending of a partial mark
                        level++;
#ifdef SNOOP_STATS
                        snoop_stat.stolen_or_pm_count++;
#endif //SNOOP_STATS
                        success = FALSE;
                    }
                }
                else if (stolen_p (next))
                {
                    //ignore the stolen guy and go to the next level
                    success = FALSE;
                    level+=2;
#ifdef SNOOP_STATS
                    snoop_stat.stolen_entry_count++;
#endif //SNOOP_STATS
                }
                else
                {
                    assert (partial_p (next));
                    start = ref_from_slot (next);
                    //re-read the object
                    o = ref_from_slot (ref_mark_stack (hp, level));
                    if (o && start)
                    {
                        //steal the object
                        success = (Interlocked::CompareExchangePointer (&ref_mark_stack (hp, level+1), (uint8_t*)stolen, next)==next);
#ifdef SNOOP_STATS
                        snoop_stat.interlocked_count++;
                        if (success)
                        {
                            snoop_stat.partial_mark_parent_count++;
                        }
#endif //SNOOP_STATS
                    }
                    else
                    {
                        // stack is not ready, or o is completely different from the last time we read from this stack level.
                        // go up 2 levels to steal children or totally unrelated objects.
                        success = FALSE;
                        if (first_not_ready_level == 0)
                        {
                            first_not_ready_level = level;
                        }
                        level+=2;
#ifdef SNOOP_STATS
                        snoop_stat.pm_not_ready_count++;
#endif //SNOOP_STATS
                    }
                }
                if (success)
                {

#ifdef SNOOP_STATS
                    dprintf (SNOOP_LOG, ("heap%d: marking %Ix from %d [%d] tl:%dms",
                            heap_number, (size_t)o, (heap_number+1)%n_heaps, level,
                            (GCToOSInterface::GetLowPrecisionTimeStamp()-begin_tick)));
                    uint32_t start_tick = GCToOSInterface::GetLowPrecisionTimeStamp();
#endif //SNOOP_STATS

                    mark_object_simple1 (o, start, heap_number);

#ifdef SNOOP_STATS
                    dprintf (SNOOP_LOG, ("heap%d: done marking %Ix from %d [%d] %dms tl:%dms",
                            heap_number, (size_t)o, (heap_number+1)%n_heaps, level,
                            (GCToOSInterface::GetLowPrecisionTimeStamp()-start_tick),(GCToOSInterface::GetLowPrecisionTimeStamp()-begin_tick)));
#endif //SNOOP_STATS

                    mark_stack_busy() = 0;

                    //clear the mark stack in snooping range
                    for (int i = 0; i < max_snoop_level; i++)
                    {
                        if (((uint8_t**)mark_stack_array)[i] != 0)
                        {
                            ((VOLATILE(uint8_t*)*)(mark_stack_array))[i] = 0;
#ifdef SNOOP_STATS
                            snoop_stat.stack_bottom_clear_count++;
#endif //SNOOP_STATS
                        }
                    }

                    level = 0;
                }
                mark_stack_busy() = 0;
            }
            else
            {
                //slot is either partial or stolen
                level++;
            }
        }
        if ((first_not_ready_level != 0) && hp->mark_stack_busy())
        {
            continue;
        }
        if (!hp->mark_stack_busy())
        {
            first_not_ready_level = 0;
            idle_loop_count++;

            if ((idle_loop_count % (6) )==1)
            {
#ifdef SNOOP_STATS
                snoop_stat.switch_to_thread_count++;
#endif //SNOOP_STATS
                GCToOSInterface::Sleep(1);
            }
            int free_count = 1;
#ifdef SNOOP_STATS
            snoop_stat.stack_idle_count++;
            //dprintf (SNOOP_LOG, ("heap%d: counting idle threads", heap_number));
#endif //SNOOP_STATS
            for (int hpn = (heap_number+1)%n_heaps; hpn != heap_number;)
            {
                if (!((g_heaps [hpn])->mark_stack_busy()))
                {
                    free_count++;
#ifdef SNOOP_STATS
                dprintf (SNOOP_LOG, ("heap%d: %d idle", heap_number, free_count));
#endif //SNOOP_STATS
                }
                else if (same_numa_node_p (hpn, heap_number) || ((idle_loop_count%1000))==999)
                {
                    thpn = hpn;
                    break;
                }
                hpn = (hpn+1)%n_heaps;
                YieldProcessor();
            }
            if (free_count == n_heaps)
            {
                break;
            }
        }
    }
}

inline
BOOL gc_heap::check_next_mark_stack (gc_heap* next_heap)
{
#ifdef SNOOP_STATS
    snoop_stat.check_level_count++;
#endif //SNOOP_STATS
    return (next_heap->mark_stack_busy()>=1);
}
#endif //MH_SC_MARK

#ifdef SNOOP_STATS
void gc_heap::print_snoop_stat()
{
    dprintf (1234, ("%4s | %8s | %8s | %8s | %8s | %8s | %8s | %8s",
        "heap", "check", "zero", "mark", "stole", "pstack", "nstack", "nonsk"));
    dprintf (1234, ("%4d | %8d | %8d | %8d | %8d | %8d | %8d | %8d",
        snoop_stat.heap_index,
        snoop_stat.objects_checked_count,
        snoop_stat.zero_ref_count,
        snoop_stat.objects_marked_count,
        snoop_stat.stolen_stack_count,
        snoop_stat.partial_stack_count,
        snoop_stat.normal_stack_count,
        snoop_stat.non_stack_count));
    dprintf (1234, ("%4s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s",
        "heap", "level", "busy", "xchg", "pmparent", "s_pm", "stolen", "nready", "clear"));
    dprintf (1234, ("%4d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d\n",
        snoop_stat.heap_index,
        snoop_stat.check_level_count,
        snoop_stat.busy_count,
        snoop_stat.interlocked_count,
        snoop_stat.partial_mark_parent_count,
        snoop_stat.stolen_or_pm_count,
        snoop_stat.stolen_entry_count,
        snoop_stat.pm_not_ready_count,
        snoop_stat.normal_count,
        snoop_stat.stack_bottom_clear_count));

    printf ("\n%4s | %8s | %8s | %8s | %8s | %8s\n",
        "heap", "check", "zero", "mark", "idle", "switch");
    printf ("%4d | %8d | %8d | %8d | %8d | %8d\n",
        snoop_stat.heap_index,
        snoop_stat.objects_checked_count,
        snoop_stat.zero_ref_count,
        snoop_stat.objects_marked_count,
        snoop_stat.stack_idle_count,
        snoop_stat.switch_to_thread_count);
    printf ("%4s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s\n",
        "heap", "level", "busy", "xchg", "pmparent", "s_pm", "stolen", "nready", "normal", "clear");
    printf ("%4d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d\n",
        snoop_stat.heap_index,
        snoop_stat.check_level_count,
        snoop_stat.busy_count,
        snoop_stat.interlocked_count,
        snoop_stat.partial_mark_parent_count,
        snoop_stat.stolen_or_pm_count,
        snoop_stat.stolen_entry_count,
        snoop_stat.pm_not_ready_count,
        snoop_stat.normal_count,
        snoop_stat.stack_bottom_clear_count);
}
#endif //SNOOP_STATS

#ifdef HEAP_ANALYZE
void
gc_heap::ha_mark_object_simple (uint8_t** po THREAD_NUMBER_DCL)
{
    if (!internal_root_array)
    {
        internal_root_array = new (nothrow) uint8_t* [internal_root_array_length];
        if (!internal_root_array)
        {
            heap_analyze_success = FALSE;
        }
    }

    if (heap_analyze_success && (internal_root_array_length <= internal_root_array_index))
    {
        size_t new_size = 2*internal_root_array_length;

        uint64_t available_physical = 0;
        get_memory_info (NULL, &available_physical);
        if (new_size > (size_t)(available_physical / 10))
        {
            heap_analyze_success = FALSE;
        }
        else
        {
            uint8_t** tmp = new (nothrow) uint8_t* [new_size];
            if (tmp)
            {
                memcpy (tmp, internal_root_array,
                        internal_root_array_length*sizeof (uint8_t*));
                delete[] internal_root_array;
                internal_root_array = tmp;
                internal_root_array_length = new_size;
            }
            else
            {
                heap_analyze_success = FALSE;
            }
        }
    }

    if (heap_analyze_success)
    {
        PREFIX_ASSUME(internal_root_array_index < internal_root_array_length);

        uint8_t* ref = (uint8_t*)po;
        if (!current_obj ||
            !((ref >= current_obj) && (ref < (current_obj + current_obj_size))))
        {
            gc_heap* hp = gc_heap::heap_of (ref);
            current_obj = hp->find_object (ref, hp->lowest_address);
            current_obj_size = size (current_obj);

            internal_root_array[internal_root_array_index] = current_obj;
            internal_root_array_index++;
        }
    }

    mark_object_simple (po THREAD_NUMBER_ARG);
}
#endif //HEAP_ANALYZE

//this method assumes that *po is in the [low. high[ range
void
gc_heap::mark_object_simple (uint8_t** po THREAD_NUMBER_DCL)
{
    uint8_t* o = *po;
#ifdef MULTIPLE_HEAPS
#else  //MULTIPLE_HEAPS
    const int thread = 0;
#endif //MULTIPLE_HEAPS
    {
#ifdef SNOOP_STATS
        snoop_stat.objects_checked_count++;
#endif //SNOOP_STATS

        if (gc_mark1 (o))
        {
            m_boundary (o);
            size_t s = size (o);
            promoted_bytes (thread) += s;
            {
                go_through_object_cl (method_table(o), o, s, poo,
                                        {
                                            uint8_t* oo = *poo;
                                            if (gc_mark (oo, gc_low, gc_high))
                                            {
                                                m_boundary (oo);
                                                size_t obj_size = size (oo);
                                                promoted_bytes (thread) += obj_size;

                                                if (contain_pointers_or_collectible (oo))
                                                    mark_object_simple1 (oo, oo THREAD_NUMBER_ARG);
                                            }
                                        }
                    );
            }
        }
    }
}

inline
uint8_t* gc_heap::mark_object (uint8_t* o THREAD_NUMBER_DCL)
{
    if ((o >= gc_low) && (o < gc_high))
        mark_object_simple (&o THREAD_NUMBER_ARG);
#ifdef MULTIPLE_HEAPS
    else if (o)
    {
        //find the heap
        gc_heap* hp = heap_of (o);
        assert (hp);
        if ((o >= hp->gc_low) && (o < hp->gc_high))
            mark_object_simple (&o THREAD_NUMBER_ARG);
    }
#endif //MULTIPLE_HEAPS

    return o;
}

#ifdef BACKGROUND_GC

void gc_heap::background_mark_simple1 (uint8_t* oo THREAD_NUMBER_DCL)
{
    uint8_t** mark_stack_limit = &background_mark_stack_array[background_mark_stack_array_length];

#ifdef SORT_MARK_STACK
    uint8_t** sorted_tos = background_mark_stack_array;
#endif //SORT_MARK_STACK

    background_mark_stack_tos = background_mark_stack_array;

    while (1)
    {
#ifdef MULTIPLE_HEAPS
#else  //MULTIPLE_HEAPS
        const int thread = 0;
#endif //MULTIPLE_HEAPS
        if (oo)
        {
            size_t s = 0;
            if ((((size_t)oo & 1) == 0) && ((s = size (oo)) < (partial_size_th*sizeof (uint8_t*))))
            {
                BOOL overflow_p = FALSE;

                if (background_mark_stack_tos + (s) /sizeof (uint8_t*) >= (mark_stack_limit - 1))
                {
                    size_t num_components = ((method_table(oo))->HasComponentSize() ? ((CObjectHeader*)oo)->GetNumComponents() : 0);
                    size_t num_pointers = CGCDesc::GetNumPointers(method_table(oo), s, num_components);
                    if (background_mark_stack_tos + num_pointers >= (mark_stack_limit - 1))
                    {
                        dprintf (2, ("h%d: %Id left, obj (mt: %Ix) %Id ptrs",
                            heap_number,
                            (size_t)(mark_stack_limit - 1 - background_mark_stack_tos),
                            method_table(oo),
                            num_pointers));

                        bgc_overflow_count++;
                        overflow_p = TRUE;
                    }
                }

                if (overflow_p == FALSE)
                {
                    dprintf(3,("pushing mark for %Ix ", (size_t)oo));

                    go_through_object_cl (method_table(oo), oo, s, ppslot,
                    {
                        uint8_t* o = *ppslot;
                        Prefetch(o);
                        if (background_mark (o,
                                             background_saved_lowest_address,
                                             background_saved_highest_address))
                        {
                            //m_boundary (o);
                            size_t obj_size = size (o);
                            bpromoted_bytes (thread) += obj_size;
                            if (contain_pointers_or_collectible (o))
                            {
                                *(background_mark_stack_tos++) = o;

                            }
                        }
                    }
                        );
                }
                else
                {
                    dprintf (3,("mark stack overflow for object %Ix ", (size_t)oo));
                    background_min_overflow_address = min (background_min_overflow_address, oo);
                    background_max_overflow_address = max (background_max_overflow_address, oo);
                }
            }
            else
            {
                uint8_t* start = oo;
                if ((size_t)oo & 1)
                {
                    oo = (uint8_t*)((size_t)oo & ~1);
                    start = *(--background_mark_stack_tos);
                    dprintf (4, ("oo: %Ix, start: %Ix\n", (size_t)oo, (size_t)start));
                }
#ifdef COLLECTIBLE_CLASS
                else
                {
                    // If there's a class object, push it now. We are guaranteed to have the slot since
                    // we just popped one object off.
                    if (is_collectible (oo))
                    {
                        uint8_t* class_obj = get_class_object (oo);
                        if (background_mark (class_obj,
                                            background_saved_lowest_address,
                                            background_saved_highest_address))
                        {
                            size_t obj_size = size (class_obj);
                            bpromoted_bytes (thread) += obj_size;

                            *(background_mark_stack_tos++) = class_obj;
                        }
                    }

                    if (!contain_pointers (oo))
                    {
                        goto next_level;
                    }
                }
#endif //COLLECTIBLE_CLASS

                s = size (oo);

                BOOL overflow_p = FALSE;

                if (background_mark_stack_tos + (num_partial_refs + 2)  >= mark_stack_limit)
                {
                    size_t num_components = ((method_table(oo))->HasComponentSize() ? ((CObjectHeader*)oo)->GetNumComponents() : 0);
                    size_t num_pointers = CGCDesc::GetNumPointers(method_table(oo), s, num_components);

                    dprintf (2, ("h%d: PM: %Id left, obj %Ix (mt: %Ix) start: %Ix, total: %Id",
                        heap_number,
                        (size_t)(mark_stack_limit - background_mark_stack_tos),
                        oo,
                        method_table(oo),
                        start,
                        num_pointers));

                    bgc_overflow_count++;
                    overflow_p = TRUE;
                }
                if (overflow_p == FALSE)
                {
                    dprintf(3,("pushing mark for %Ix ", (size_t)oo));

                    //push the object and its current
                    uint8_t** place = background_mark_stack_tos++;
                    *(place) = start;
                    *(background_mark_stack_tos++) = (uint8_t*)((size_t)oo | 1);

                    int num_pushed_refs = num_partial_refs;
                    int num_processed_refs = num_pushed_refs * 16;

                    go_through_object (method_table(oo), oo, s, ppslot,
                                       start, use_start, (oo + s),
                    {
                        uint8_t* o = *ppslot;
                        Prefetch(o);

                        if (background_mark (o,
                                            background_saved_lowest_address,
                                            background_saved_highest_address))
                        {
                            //m_boundary (o);
                            size_t obj_size = size (o);
                            bpromoted_bytes (thread) += obj_size;
                            if (contain_pointers_or_collectible (o))
                            {
                                *(background_mark_stack_tos++) = o;
                                if (--num_pushed_refs == 0)
                                {
                                    //update the start
                                    *place = (uint8_t*)(ppslot+1);
                                    goto more_to_do;
                                }

                            }
                        }
                        if (--num_processed_refs == 0)
                        {
                            // give foreground GC a chance to run
                            *place = (uint8_t*)(ppslot + 1);
                            goto more_to_do;
                        }

                        }
                        );
                    //we are finished with this object
                    *place = 0;
                    *(place+1) = 0;
                
                more_to_do:;
                }
                else
                {
                    dprintf (3,("mark stack overflow for object %Ix ", (size_t)oo));
                    background_min_overflow_address = min (background_min_overflow_address, oo);
                    background_max_overflow_address = max (background_max_overflow_address, oo);
                }
            }
        }
#ifdef SORT_MARK_STACK
        if (background_mark_stack_tos > sorted_tos + mark_stack_array_length/8)
        {
            rqsort1 (sorted_tos, background_mark_stack_tos-1);
            sorted_tos = background_mark_stack_tos-1;
        }
#endif //SORT_MARK_STACK

#ifdef COLLECTIBLE_CLASS
next_level:
#endif // COLLECTIBLE_CLASS
        allow_fgc();

        if (!(background_mark_stack_tos == background_mark_stack_array))
        {
            oo = *(--background_mark_stack_tos);

#ifdef SORT_MARK_STACK
            sorted_tos = (uint8_t**)min ((size_t)sorted_tos, (size_t)background_mark_stack_tos);
#endif //SORT_MARK_STACK
        }
        else
            break;
    }

    assert (background_mark_stack_tos == background_mark_stack_array);


}

//this version is different than the foreground GC because
//it can't keep pointers to the inside of an object
//while calling background_mark_simple1. The object could be moved
//by an intervening foreground gc.
//this method assumes that *po is in the [low. high[ range
void
gc_heap::background_mark_simple (uint8_t* o THREAD_NUMBER_DCL)
{
#ifdef MULTIPLE_HEAPS
#else  //MULTIPLE_HEAPS
    const int thread = 0;
#endif //MULTIPLE_HEAPS
    {
        dprintf (3, ("bmarking %Ix", o));

        if (background_mark1 (o))
        {
            //m_boundary (o);
            size_t s = size (o);
            bpromoted_bytes (thread) += s;

            if (contain_pointers_or_collectible (o))
            {
                background_mark_simple1 (o THREAD_NUMBER_ARG);
            }
        }
        allow_fgc();
    }
}

inline
uint8_t* gc_heap::background_mark_object (uint8_t* o THREAD_NUMBER_DCL)
{
    if ((o >= background_saved_lowest_address) && (o < background_saved_highest_address))
    {
        background_mark_simple (o THREAD_NUMBER_ARG);
    }
    else
    {
        if (o)
        {
            dprintf (3, ("or-%Ix", o));
        }
    }
    return o;
}

void gc_heap::background_verify_mark (Object*& object, ScanContext* sc, uint32_t flags)
{
    UNREFERENCED_PARAMETER(sc);

    assert (settings.concurrent);
    uint8_t* o = (uint8_t*)object;

    gc_heap* hp = gc_heap::heap_of (o);
#ifdef INTERIOR_POINTERS
    if (flags & GC_CALL_INTERIOR)
    {
        o = hp->find_object (o, background_saved_lowest_address);
    }
#endif //INTERIOR_POINTERS

    if (!background_object_marked (o, FALSE))
    {
        FATAL_GC_ERROR();
    }
}

void gc_heap::background_promote (Object** ppObject, ScanContext* sc, uint32_t flags)
{
    UNREFERENCED_PARAMETER(sc);
    //in order to save space on the array, mark the object,
    //knowing that it will be visited later
    assert (settings.concurrent);

    THREAD_NUMBER_FROM_CONTEXT;
#ifndef MULTIPLE_HEAPS
    const int thread = 0;
#endif //!MULTIPLE_HEAPS

    uint8_t* o = (uint8_t*)*ppObject;

    if (o == 0)
        return;

#ifdef DEBUG_DestroyedHandleValue
    // we can race with destroy handle during concurrent scan
    if (o == (uint8_t*)DEBUG_DestroyedHandleValue)
        return;
#endif //DEBUG_DestroyedHandleValue

    HEAP_FROM_THREAD;

    gc_heap* hp = gc_heap::heap_of (o);

    if ((o < hp->background_saved_lowest_address) || (o >= hp->background_saved_highest_address))
    {
        return;
    }

#ifdef INTERIOR_POINTERS
    if (flags & GC_CALL_INTERIOR)
    {
        o = hp->find_object (o, hp->background_saved_lowest_address);
        if (o == 0)
            return;
    }
#endif //INTERIOR_POINTERS

#ifdef FEATURE_CONSERVATIVE_GC
    // For conservative GC, a value on stack may point to middle of a free object.
    // In this case, we don't need to promote the pointer.
    if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree())
    {
        return;
    }
#endif //FEATURE_CONSERVATIVE_GC

#ifdef _DEBUG
    ((CObjectHeader*)o)->Validate();
#endif //_DEBUG

    //needs to be called before the marking because it is possible for a foreground
    //gc to take place during the mark and move the object
    STRESS_LOG3(LF_GC|LF_GCROOTS, LL_INFO1000000, "    GCHeap::Promote: Promote GC Root *%p = %p MT = %pT", ppObject, o, o ? ((Object*) o)->GetGCSafeMethodTable() : NULL);

    hpt->background_mark_simple (o THREAD_NUMBER_ARG);
}

//used by the ephemeral collection to scan the local background structures
//containing references.
void
gc_heap::scan_background_roots (promote_func* fn, int hn, ScanContext *pSC)
{
    ScanContext sc;
    if (pSC == 0)
        pSC = &sc;

    pSC->thread_number = hn;

    BOOL relocate_p = (fn == &GCHeap::Relocate);

    dprintf (3, ("Scanning background mark list"));

    //scan mark_list
    size_t mark_list_finger = 0;
    while (mark_list_finger < c_mark_list_index)
    {
        uint8_t** o = &c_mark_list [mark_list_finger];
        if (!relocate_p)
        {
            // We may not be able to calculate the size during relocate as POPO
            // may have written over the object.
            size_t s = size (*o);
            assert (Align (s) >= Align (min_obj_size));
            dprintf(3,("background root %Ix", (size_t)*o));
        }
        (*fn) ((Object**)o, pSC, 0);
        mark_list_finger++;
    }

    //scan the mark stack
    dprintf (3, ("Scanning background mark stack"));

    uint8_t** finger = background_mark_stack_array;
    while (finger < background_mark_stack_tos)
    {
        if ((finger + 1) < background_mark_stack_tos)
        {
            // We need to check for the partial mark case here.
            uint8_t* parent_obj = *(finger + 1);
            if ((size_t)parent_obj & 1)
            {
                uint8_t* place = *finger;
                size_t place_offset = 0;
                uint8_t* real_parent_obj = (uint8_t*)((size_t)parent_obj & ~1);

                if (relocate_p)
                {
                    *(finger + 1) = real_parent_obj;
                    place_offset = place - real_parent_obj;
                    dprintf(3,("relocating background root %Ix", (size_t)real_parent_obj));
                    (*fn) ((Object**)(finger + 1), pSC, 0);
                    real_parent_obj = *(finger + 1);
                    *finger = real_parent_obj + place_offset;
                    *(finger + 1) = (uint8_t*)((size_t)real_parent_obj | 1);
                    dprintf(3,("roots changed to %Ix, %Ix", *finger, *(finger + 1)));
                }
                else
                {
                    uint8_t** temp = &real_parent_obj;
                    dprintf(3,("marking background root %Ix", (size_t)real_parent_obj));
                    (*fn) ((Object**)temp, pSC, 0);
                }

                finger += 2;
                continue;
            }
        }
        dprintf(3,("background root %Ix", (size_t)*finger));
        (*fn) ((Object**)finger, pSC, 0);
        finger++;
    }
}

inline
void gc_heap::background_mark_through_object (uint8_t* oo THREAD_NUMBER_DCL)
{
    if (contain_pointers (oo))
    {
        size_t total_refs = 0;
        size_t s = size (oo);
        go_through_object_nostart (method_table(oo), oo, s, po,
                          {
                            uint8_t* o = *po;
                            total_refs++;
                            background_mark_object (o THREAD_NUMBER_ARG);
                          }
            );

        dprintf (3,("Background marking through %Ix went through %Id refs",
                          (size_t)oo,
                           total_refs));
    }
}

uint8_t* gc_heap::background_seg_end (heap_segment* seg, BOOL concurrent_p)
{
    if (concurrent_p && (seg == saved_overflow_ephemeral_seg))
    {
        // for now we stop at where gen1 started when we started processing
        return background_min_soh_overflow_address;
    }
    else
    {
        return heap_segment_allocated (seg);
    }
}

uint8_t* gc_heap::background_first_overflow (uint8_t* min_add,
                                          heap_segment* seg,
                                          BOOL concurrent_p,
                                          BOOL small_object_p)
{
    uint8_t* o = 0;

    if (small_object_p)
    {
        if (in_range_for_segment (min_add, seg))
        {
            // min_add was the beginning of gen1 when we did the concurrent
            // overflow. Now we could be in a situation where min_add is
            // actually the same as allocated for that segment (because
            // we expanded heap), in which case we can not call
            // find first on this address or we will AV.
            if (min_add >= heap_segment_allocated (seg))
            {
                return min_add;
            }
            else
            {
                if (concurrent_p &&
                    ((seg == saved_overflow_ephemeral_seg) && (min_add >= background_min_soh_overflow_address)))
                {
                    return background_min_soh_overflow_address;
                }
                else
                {
                    o = find_first_object (min_add, heap_segment_mem (seg));
                    return o;
                }
            }
        }
    }

    o = max (heap_segment_mem (seg), min_add);
    return o;
}

void gc_heap::background_process_mark_overflow_internal (int condemned_gen_number,
                                                         uint8_t* min_add, uint8_t* max_add,
                                                         BOOL concurrent_p)
{
    if (concurrent_p)
    {
        current_bgc_state = bgc_overflow_soh;
    }

    size_t total_marked_objects = 0;

#ifdef MULTIPLE_HEAPS
    int thread = heap_number;
#endif //MULTIPLE_HEAPS

    exclusive_sync* loh_alloc_lock = 0;

    dprintf (2,("Processing Mark overflow [%Ix %Ix]", (size_t)min_add, (size_t)max_add));
#ifdef MULTIPLE_HEAPS
    // We don't have each heap scan all heaps concurrently because we are worried about
    // multiple threads calling things like find_first_object.
    int h_start = (concurrent_p ? heap_number : 0);
    int h_end = (concurrent_p ? (heap_number + 1) : n_heaps);
    for (int hi = h_start; hi < h_end; hi++)
    {
        gc_heap*  hp = (concurrent_p ? this : g_heaps [(heap_number + hi) % n_heaps]);

#else
    {
        gc_heap*  hp = 0;

#endif //MULTIPLE_HEAPS
        BOOL small_object_segments = TRUE;
        int align_const = get_alignment_constant (small_object_segments);
        generation* gen = hp->generation_of (condemned_gen_number);
        heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
        PREFIX_ASSUME(seg != NULL);
        loh_alloc_lock = hp->bgc_alloc_lock;

        uint8_t* o = hp->background_first_overflow (min_add,
                                                    seg,
                                                    concurrent_p,
                                                    small_object_segments);

        while (1)
        {
            while ((o < hp->background_seg_end (seg, concurrent_p)) && (o <= max_add))
            {
                dprintf (3, ("considering %Ix", (size_t)o));

                size_t s;

                if (concurrent_p && !small_object_segments)
                {
                    loh_alloc_lock->bgc_mark_set (o);

                    if (((CObjectHeader*)o)->IsFree())
                    {
                        s = unused_array_size (o);
                    }
                    else
                    {
                        s = size (o);
                    }
                }
                else
                {
                    s = size (o);
                }

                if (background_object_marked (o, FALSE) && contain_pointers_or_collectible (o))
                {
                    total_marked_objects++;
                    go_through_object_cl (method_table(o), o, s, poo,
                                          uint8_t* oo = *poo;
                                          background_mark_object (oo THREAD_NUMBER_ARG);
                                         );
                }

                if (concurrent_p && !small_object_segments)
                {
                    loh_alloc_lock->bgc_mark_done ();
                }

                o = o + Align (s, align_const);

                if (concurrent_p)
                {
                    allow_fgc();
                }
            }

            dprintf (2, ("went through overflow objects in segment %Ix (%d) (so far %Id marked)",
                heap_segment_mem (seg), (small_object_segments ? 0 : 1), total_marked_objects));

            if ((concurrent_p && (seg == hp->saved_overflow_ephemeral_seg)) ||
                (seg = heap_segment_next_in_range (seg)) == 0)
            {
                if (small_object_segments)
                {
                    if (concurrent_p)
                    {
                        current_bgc_state = bgc_overflow_loh;
                    }

                    dprintf (2, ("h%d: SOH: ov-mo: %Id", heap_number, total_marked_objects));
                    fire_overflow_event (min_add, max_add, total_marked_objects, !small_object_segments);
                    concurrent_print_time_delta (concurrent_p ? "Cov SOH" : "Nov SOH");
                    total_marked_objects = 0;
                    small_object_segments = FALSE;
                    align_const = get_alignment_constant (small_object_segments);
                    seg = heap_segment_in_range (generation_start_segment (hp->generation_of (max_generation+1)));

                    PREFIX_ASSUME(seg != NULL);

                    o = max (heap_segment_mem (seg), min_add);
                    continue;
                }
                else
                {
                    dprintf (GTC_LOG, ("h%d: LOH: ov-mo: %Id", heap_number, total_marked_objects));
                    fire_overflow_event (min_add, max_add, total_marked_objects, !small_object_segments);
                    break;
                }
            }
            else
            {
                o = hp->background_first_overflow (min_add,
                                                   seg,
                                                   concurrent_p,
                                                   small_object_segments);
                continue;
            }
        }
    }
}

BOOL gc_heap::background_process_mark_overflow (BOOL concurrent_p)
{
    BOOL grow_mark_array_p = TRUE;

    if (concurrent_p)
    {
        assert (!processed_soh_overflow_p);

        if ((background_max_overflow_address != 0) &&
            (background_min_overflow_address != MAX_PTR))
        {
            // We have overflow to process but we know we can't process the ephemeral generations
            // now (we actually could process till the current gen1 start but since we are going to
            // make overflow per segment, for now I'll just stop at the saved gen1 start.
            saved_overflow_ephemeral_seg = ephemeral_heap_segment;
            background_max_soh_overflow_address = heap_segment_reserved (saved_overflow_ephemeral_seg);
            background_min_soh_overflow_address = generation_allocation_start (generation_of (max_generation-1));
        }
    }
    else
    {
        assert ((saved_overflow_ephemeral_seg == 0) ||
                ((background_max_soh_overflow_address != 0) &&
                 (background_min_soh_overflow_address != MAX_PTR)));

        if (!processed_soh_overflow_p)
        {
            // if there was no more overflow we just need to process what we didn't process
            // on the saved ephemeral segment.
            if ((background_max_overflow_address == 0) && (background_min_overflow_address == MAX_PTR))
            {
                dprintf (2, ("final processing mark overflow - no more overflow since last time"));
                grow_mark_array_p = FALSE;
            }

            background_min_overflow_address = min (background_min_overflow_address,
                                                background_min_soh_overflow_address);
            background_max_overflow_address = max (background_max_overflow_address,
                                                background_max_soh_overflow_address);
            processed_soh_overflow_p = TRUE;
        }
    }

    BOOL  overflow_p = FALSE;
recheck:
    if ((! ((background_max_overflow_address == 0)) ||
         ! ((background_min_overflow_address == MAX_PTR))))
    {
        overflow_p = TRUE;

        if (grow_mark_array_p)
        {
            // Try to grow the array.
            size_t new_size = max (MARK_STACK_INITIAL_LENGTH, 2*background_mark_stack_array_length);

            if ((new_size * sizeof(mark)) > 100*1024)
            {
                size_t new_max_size = (get_total_heap_size() / 10) / sizeof(mark);

                new_size = min(new_max_size, new_size);
            }

            if ((background_mark_stack_array_length < new_size) &&
                ((new_size - background_mark_stack_array_length) > (background_mark_stack_array_length / 2)))
            {
                dprintf (2, ("h%d: ov grow to %Id", heap_number, new_size));

                uint8_t** tmp = new (nothrow) uint8_t* [new_size];
                if (tmp)
                {
                    delete [] background_mark_stack_array;
                    background_mark_stack_array = tmp;
                    background_mark_stack_array_length = new_size;
                    background_mark_stack_tos = background_mark_stack_array;
                }
            }
        }
        else
        {
            grow_mark_array_p = TRUE;
        }

        uint8_t*  min_add = background_min_overflow_address;
        uint8_t*  max_add = background_max_overflow_address;

        background_max_overflow_address = 0;
        background_min_overflow_address = MAX_PTR;

        background_process_mark_overflow_internal (max_generation, min_add, max_add, concurrent_p);
        if (!concurrent_p)
        {
            goto recheck;
        }
    }

    return overflow_p;
}

#endif //BACKGROUND_GC

inline
void gc_heap::mark_through_object (uint8_t* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL)
{
#ifndef COLLECTIBLE_CLASS
    UNREFERENCED_PARAMETER(mark_class_object_p);
    BOOL to_mark_class_object = FALSE;
#else //COLLECTIBLE_CLASS
    BOOL to_mark_class_object = (mark_class_object_p && (is_collectible(oo)));
#endif //COLLECTIBLE_CLASS
    if (contain_pointers (oo) || to_mark_class_object)
    {
        dprintf(3,( "Marking through %Ix", (size_t)oo));
        size_t s = size (oo);

#ifdef COLLECTIBLE_CLASS
        if (to_mark_class_object)
        {
            uint8_t* class_obj = get_class_object (oo);
            mark_object (class_obj THREAD_NUMBER_ARG);
        }
#endif //COLLECTIBLE_CLASS

        if (contain_pointers (oo))
        {
            go_through_object_nostart (method_table(oo), oo, s, po,
                                uint8_t* o = *po;
                                mark_object (o THREAD_NUMBER_ARG);
                                );
        }
    }
}

size_t gc_heap::get_total_heap_size()
{
    size_t total_heap_size = 0;

#ifdef MULTIPLE_HEAPS
    int hn = 0;

    for (hn = 0; hn < gc_heap::n_heaps; hn++)
    {
        gc_heap* hp2 = gc_heap::g_heaps [hn];
        total_heap_size += hp2->generation_size (max_generation + 1) + hp2->generation_sizes (hp2->generation_of (max_generation));
    }
#else
    total_heap_size = generation_size (max_generation + 1) + generation_sizes (generation_of (max_generation));
#endif //MULTIPLE_HEAPS

    return total_heap_size;
}

size_t gc_heap::get_total_fragmentation()
{
    size_t total_fragmentation = 0;

#ifdef MULTIPLE_HEAPS
    for (int hn = 0; hn < gc_heap::n_heaps; hn++)
    {
        gc_heap* hp = gc_heap::g_heaps[hn];
#else //MULTIPLE_HEAPS
    {
        gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS
        for (int i = 0; i <= (max_generation + 1); i++)
        {
            generation* gen = hp->generation_of (i);
            total_fragmentation += (generation_free_list_space (gen) + generation_free_obj_space (gen));
        }
    }

    return total_fragmentation;
}

size_t gc_heap::get_total_gen_fragmentation (int gen_number)
{
    size_t total_fragmentation = 0;

#ifdef MULTIPLE_HEAPS
    for (int hn = 0; hn < gc_heap::n_heaps; hn++)
    {
        gc_heap* hp = gc_heap::g_heaps[hn];
#else //MULTIPLE_HEAPS
    {
        gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS
        generation* gen = hp->generation_of (gen_number);
        total_fragmentation += (generation_free_list_space (gen) + generation_free_obj_space (gen));
    }

    return total_fragmentation;
}

size_t gc_heap::get_total_gen_estimated_reclaim (int gen_number)
{
    size_t total_estimated_reclaim = 0;

#ifdef MULTIPLE_HEAPS
    for (int hn = 0; hn < gc_heap::n_heaps; hn++)
    {
        gc_heap* hp = gc_heap::g_heaps[hn];
#else //MULTIPLE_HEAPS
    {
        gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS
        total_estimated_reclaim += hp->estimated_reclaim (gen_number);
    }

    return total_estimated_reclaim;
}

size_t gc_heap::committed_size()
{
    generation* gen = generation_of (max_generation);
    heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
    size_t total_committed = 0;

    while (1)
    {
        total_committed += heap_segment_committed (seg) - (uint8_t*)seg;

        seg = heap_segment_next (seg);
        if (!seg)
        {
            if (gen != large_object_generation)
            {
                gen = generation_of (max_generation + 1);
                seg = generation_start_segment (gen);
            }
            else
                break;
        }
    }

    return total_committed;
}

size_t gc_heap::get_total_committed_size()
{
    size_t total_committed = 0;

#ifdef MULTIPLE_HEAPS
    int hn = 0;

    for (hn = 0; hn < gc_heap::n_heaps; hn++)
    {
        gc_heap* hp = gc_heap::g_heaps [hn];
        total_committed += hp->committed_size();
    }
#else
    total_committed = committed_size();
#endif //MULTIPLE_HEAPS

    return total_committed;
}

size_t gc_heap::committed_size (bool loh_p, size_t* allocated)
{
    int gen_number = (loh_p ? (max_generation + 1) : max_generation);
    generation* gen = generation_of (gen_number);
    heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
    size_t total_committed = 0;
    size_t total_allocated = 0;

    while (seg)
    {
        total_committed += heap_segment_committed (seg) - (uint8_t*)seg;
        total_allocated += heap_segment_allocated (seg) - (uint8_t*)seg;
        seg = heap_segment_next (seg);
    }

    *allocated = total_allocated;
    return total_committed;
}

void gc_heap::get_memory_info (uint32_t* memory_load,
                               uint64_t* available_physical,
                               uint64_t* available_page_file)
{
    GCToOSInterface::GetMemoryStatus(memory_load, available_physical, available_page_file);
}

void fire_mark_event (int heap_num, int root_type, size_t bytes_marked)
{
    dprintf (DT_LOG_0, ("-----------[%d]mark %d: %Id", heap_num, root_type, bytes_marked));
    FIRE_EVENT(GCMarkWithType, heap_num, root_type, bytes_marked);
}

//returns TRUE is an overflow happened.
BOOL gc_heap::process_mark_overflow(int condemned_gen_number)
{
    size_t last_promoted_bytes = promoted_bytes (heap_number);
    BOOL  overflow_p = FALSE;
recheck:
    if ((! (max_overflow_address == 0) ||
         ! (min_overflow_address == MAX_PTR)))
    {
        overflow_p = TRUE;
        // Try to grow the array.
        size_t new_size =
            max (MARK_STACK_INITIAL_LENGTH, 2*mark_stack_array_length);

        if ((new_size * sizeof(mark)) > 100*1024)
        {
            size_t new_max_size = (get_total_heap_size() / 10) / sizeof(mark);

            new_size = min(new_max_size, new_size);
        }

        if ((mark_stack_array_length < new_size) &&
            ((new_size - mark_stack_array_length) > (mark_stack_array_length / 2)))
        {
            mark* tmp = new (nothrow) mark [new_size];
            if (tmp)
            {
                delete mark_stack_array;
                mark_stack_array = tmp;
                mark_stack_array_length = new_size;
            }
        }

        uint8_t*  min_add = min_overflow_address;
        uint8_t*  max_add = max_overflow_address;
        max_overflow_address = 0;
        min_overflow_address = MAX_PTR;
        process_mark_overflow_internal (condemned_gen_number, min_add, max_add);
        goto recheck;
    }

    size_t current_promoted_bytes = promoted_bytes (heap_number);

    if (current_promoted_bytes != last_promoted_bytes)
        fire_mark_event (heap_number, ETW::GC_ROOT_OVERFLOW, (current_promoted_bytes - last_promoted_bytes));
    return overflow_p;
}

void gc_heap::process_mark_overflow_internal (int condemned_gen_number,
                                              uint8_t* min_add, uint8_t* max_add)
{
#ifdef MULTIPLE_HEAPS
    int thread = heap_number;
#endif //MULTIPLE_HEAPS
    BOOL  full_p = (condemned_gen_number == max_generation);

        dprintf(3,("Processing Mark overflow [%Ix %Ix]", (size_t)min_add, (size_t)max_add));
#ifdef MULTIPLE_HEAPS
            for (int hi = 0; hi < n_heaps; hi++)
            {
                gc_heap*  hp = g_heaps [(heap_number + hi) % n_heaps];

#else
            {
                gc_heap*  hp = 0;

#endif //MULTIPLE_HEAPS
        BOOL small_object_segments = TRUE;
        int align_const = get_alignment_constant (small_object_segments);
        generation* gen = hp->generation_of (condemned_gen_number);
        heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));

        PREFIX_ASSUME(seg != NULL);
        uint8_t*  o = max (heap_segment_mem (seg), min_add);
        while (1)
        {
            uint8_t*  end = heap_segment_allocated (seg);

            while ((o < end) && (o <= max_add))
            {
                assert ((min_add <= o) && (max_add >= o));
                dprintf (3, ("considering %Ix", (size_t)o));
                if (marked (o))
                {
                    mark_through_object (o, TRUE THREAD_NUMBER_ARG);
                }

                o = o + Align (size (o), align_const);
            }

            if (( seg = heap_segment_next_in_range (seg)) == 0)
            {
                if (small_object_segments && full_p)
                {
                    small_object_segments = FALSE;
                    align_const = get_alignment_constant (small_object_segments);
                    seg = heap_segment_in_range (generation_start_segment (hp->generation_of (max_generation+1)));

                    PREFIX_ASSUME(seg != NULL);

                    o = max (heap_segment_mem (seg), min_add);
                    continue;
                }
                else
                {
                    break;
                }
            }
            else
            {
                o = max (heap_segment_mem (seg), min_add);
                continue;
            }
        }
    }
}

// Scanning for promotion for dependent handles need special handling. Because the primary holds a strong
// reference to the secondary (when the primary itself is reachable) and this can cause a cascading series of
// promotions (the secondary of one handle is or promotes the primary of another) we might need to perform the
// promotion scan multiple times.
// This helper encapsulates the logic to complete all dependent handle promotions when running a server GC. It
// also has the effect of processing any mark stack overflow.

#ifdef MULTIPLE_HEAPS
// When multiple heaps are enabled we have must utilize a more complex algorithm in order to keep all the GC
// worker threads synchronized. The algorithms are sufficiently divergent that we have different
// implementations based on whether MULTIPLE_HEAPS is defined or not.
//
// Define some static variables used for synchronization in the method below. These should really be defined
// locally but MSVC complains when the VOLATILE macro is expanded into an instantiation of the Volatile class.
//
// A note about the synchronization used within this method. Communication between the worker threads is
// achieved via two shared booleans (defined below). These both act as latches that are transitioned only from
// false -> true by unsynchronized code. They are only read or reset to false by a single thread under the
// protection of a join.
static VOLATILE(BOOL) s_fUnpromotedHandles = FALSE;
static VOLATILE(BOOL) s_fUnscannedPromotions = FALSE;
static VOLATILE(BOOL) s_fScanRequired;
void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p)
{
    // Whenever we call this method there may have been preceding object promotions. So set
    // s_fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set
    // based on the how the scanning proceeded).
    s_fUnscannedPromotions = TRUE;

    // We don't know how many times we need to loop yet. In particular we can't base the loop condition on
    // the state of this thread's portion of the dependent handle table. That's because promotions on other
    // threads could cause handle promotions to become necessary here. Even if there are definitely no more
    // promotions possible in this thread's handles, we still have to stay in lock-step with those worker
    // threads that haven't finished yet (each GC worker thread has to join exactly the same number of times
    // as all the others or they'll get out of step).
    while (true)
    {
        // The various worker threads are all currently racing in this code. We need to work out if at least
        // one of them think they have work to do this cycle. Each thread needs to rescan its portion of the
        // dependent handle table when both of the following conditions apply:
        //  1) At least one (arbitrary) object might have been promoted since the last scan (because if this
        //     object happens to correspond to a primary in one of our handles we might potentially have to
        //     promote the associated secondary).
        //  2) The table for this thread has at least one handle with a secondary that isn't promoted yet.
        //
        // The first condition is represented by s_fUnscannedPromotions. This is always non-zero for the first
        // iteration of this loop (see comment above) and in subsequent cycles each thread updates this
        // whenever a mark stack overflow occurs or scanning their dependent handles results in a secondary
        // being promoted. This value is cleared back to zero in a synchronized fashion in the join that
        // follows below. Note that we can't read this outside of the join since on any iteration apart from
        // the first threads will be racing between reading this value and completing their previous
        // iteration's table scan.
        //
        // The second condition is tracked by the dependent handle code itself on a per worker thread basis
        // (and updated by the GcDhReScan() method). We call GcDhUnpromotedHandlesExist() on each thread to
        // determine the local value and collect the results into the s_fUnpromotedHandles variable in what is
        // effectively an OR operation. As per s_fUnscannedPromotions we can't read the final result until
        // we're safely joined.
        if (GCScan::GcDhUnpromotedHandlesExist(sc))
            s_fUnpromotedHandles = TRUE;

        // Synchronize all the threads so we can read our state variables safely. The shared variable
        // s_fScanRequired, indicating whether we should scan the tables or terminate the loop, will be set by
        // a single thread inside the join.
        gc_t_join.join(this, gc_join_scan_dependent_handles);
        if (gc_t_join.joined())
        {
            // We're synchronized so it's safe to read our shared state variables. We update another shared
            // variable to indicate to all threads whether we'll be scanning for another cycle or terminating
            // the loop. We scan if there has been at least one object promotion since last time and at least
            // one thread has a dependent handle table with a potential handle promotion possible.
            s_fScanRequired = s_fUnscannedPromotions && s_fUnpromotedHandles;

            // Reset our shared state variables (ready to be set again on this scan or with a good initial
            // value for the next call if we're terminating the loop).
            s_fUnscannedPromotions = FALSE;
            s_fUnpromotedHandles = FALSE;

            if (!s_fScanRequired)
            {
                // We're terminating the loop. Perform any last operations that require single threaded access.
                if (!initial_scan_p)
                {
                    // On the second invocation we reconcile all mark overflow ranges across the heaps. This can help
                    // load balance if some of the heaps have an abnormally large workload.
                    uint8_t* all_heaps_max = 0;
                    uint8_t* all_heaps_min = MAX_PTR;
                    int i;
                    for (i = 0; i < n_heaps; i++)
                    {
                        if (all_heaps_max < g_heaps[i]->max_overflow_address)
                            all_heaps_max = g_heaps[i]->max_overflow_address;
                        if (all_heaps_min > g_heaps[i]->min_overflow_address)
                            all_heaps_min = g_heaps[i]->min_overflow_address;
                    }
                    for (i = 0; i < n_heaps; i++)
                    {
                        g_heaps[i]->max_overflow_address = all_heaps_max;
                        g_heaps[i]->min_overflow_address = all_heaps_min;
                    }
                }
            }

            // Restart all the workers.
            dprintf(3, ("Starting all gc thread mark stack overflow processing"));
            gc_t_join.restart();
        }

        // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions
        // being visible. If there really was an overflow (process_mark_overflow returns true) then set the
        // global flag indicating that at least one object promotion may have occurred (the usual comment
        // about races applies). (Note it's OK to set this flag even if we're about to terminate the loop and
        // exit the method since we unconditionally set this variable on method entry anyway).
        if (process_mark_overflow(condemned_gen_number))
            s_fUnscannedPromotions = TRUE;

        // If we decided that no scan was required we can terminate the loop now.
        if (!s_fScanRequired)
            break;

        // Otherwise we must join with the other workers to ensure that all mark stack overflows have been
        // processed before we start scanning dependent handle tables (if overflows remain while we scan we
        // could miss noting the promotion of some primary objects).
        gc_t_join.join(this, gc_join_rescan_dependent_handles);
        if (gc_t_join.joined())
        {
            // Restart all the workers.
            dprintf(3, ("Starting all gc thread for dependent handle promotion"));
            gc_t_join.restart();
        }

        // If the portion of the dependent handle table managed by this worker has handles that could still be
        // promoted perform a rescan. If the rescan resulted in at least one promotion note this fact since it
        // could require a rescan of handles on this or other workers.
        if (GCScan::GcDhUnpromotedHandlesExist(sc))
            if (GCScan::GcDhReScan(sc))
                s_fUnscannedPromotions = TRUE;
    }
}
#else //MULTIPLE_HEAPS
// Non-multiple heap version of scan_dependent_handles: much simpler without the need to keep multiple worker
// threads synchronized.
void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p)
{
    UNREFERENCED_PARAMETER(initial_scan_p);

    // Whenever we call this method there may have been preceding object promotions. So set
    // fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set
    // based on the how the scanning proceeded).
    bool fUnscannedPromotions = true;

    // Loop until there are either no more dependent handles that can have their secondary promoted or we've
    // managed to perform a scan without promoting anything new.
    while (GCScan::GcDhUnpromotedHandlesExist(sc) && fUnscannedPromotions)
    {
        // On each iteration of the loop start with the assumption that no further objects have been promoted.
        fUnscannedPromotions = false;

        // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions
        // being visible. If there was an overflow (process_mark_overflow returned true) then additional
        // objects now appear to be promoted and we should set the flag.
        if (process_mark_overflow(condemned_gen_number))
            fUnscannedPromotions = true;

        // Perform the scan and set the flag if any promotions resulted.
        if (GCScan::GcDhReScan(sc))
            fUnscannedPromotions = true;
    }

    // Process any mark stack overflow that may have resulted from scanning handles (or if we didn't need to
    // scan any handles at all this is the processing of overflows that may have occurred prior to this method
    // invocation).
    process_mark_overflow(condemned_gen_number);
}
#endif //MULTIPLE_HEAPS

void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
{
    assert (settings.concurrent == FALSE);

    ScanContext sc;
    sc.thread_number = heap_number;
    sc.promotion = TRUE;
    sc.concurrent = FALSE;

    dprintf(2,("---- Mark Phase condemning %d ----", condemned_gen_number));
    BOOL  full_p = (condemned_gen_number == max_generation);

#ifdef TIME_GC
    unsigned start;
    unsigned finish;
    start = GetCycleCount32();
#endif //TIME_GC

    int gen_to_init = condemned_gen_number;
    if (condemned_gen_number == max_generation)
    {
        gen_to_init = max_generation + 1;
    }
    for (int gen_idx = 0; gen_idx <= gen_to_init; gen_idx++)
    {
        dynamic_data* dd = dynamic_data_of (gen_idx);
        dd_begin_data_size (dd) = generation_size (gen_idx) -
                                   dd_fragmentation (dd) -
                                   Align (size (generation_allocation_start (generation_of (gen_idx))));
        dprintf (2, ("begin data size for gen%d is %Id", gen_idx, dd_begin_data_size (dd)));
        dd_survived_size (dd) = 0;
        dd_pinned_survived_size (dd) = 0;
        dd_artificial_pinned_survived_size (dd) = 0;
        dd_added_pinned_size (dd) = 0;
#ifdef SHORT_PLUGS
        dd_padding_size (dd) = 0;
#endif //SHORT_PLUGS
#if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
        dd_num_npinned_plugs (dd) = 0;
#endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN
    }

    if (gen0_must_clear_bricks > 0)
        gen0_must_clear_bricks--;

    size_t last_promoted_bytes = 0;

    promoted_bytes (heap_number) = 0;
    reset_mark_stack();

#ifdef SNOOP_STATS
    memset (&snoop_stat, 0, sizeof(snoop_stat));
    snoop_stat.heap_index = heap_number;
#endif //SNOOP_STATS

#ifdef MH_SC_MARK
    if (full_p)
    {
        //initialize the mark stack
        for (int i = 0; i < max_snoop_level; i++)
        {
            ((uint8_t**)(mark_stack_array))[i] = 0;
        }

        mark_stack_busy() = 1;
    }
#endif //MH_SC_MARK

    static uint32_t num_sizedrefs = 0;

#ifdef MH_SC_MARK
    static BOOL do_mark_steal_p = FALSE;
#endif //MH_SC_MARK

#ifdef FEATURE_CARD_MARKING_STEALING
    reset_card_marking_enumerators();
#endif // FEATURE_CARD_MARKING_STEALING

#ifdef MULTIPLE_HEAPS
    gc_t_join.join(this, gc_join_begin_mark_phase);
    if (gc_t_join.joined())
    {
#endif //MULTIPLE_HEAPS

        maxgen_size_inc_p = false;

        num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();

#ifdef MULTIPLE_HEAPS

#ifdef MH_SC_MARK
        if (full_p)
        {
            size_t total_heap_size = get_total_heap_size();

            if (total_heap_size > (100 * 1024 * 1024))
            {
                do_mark_steal_p = TRUE;
            }
            else
            {
                do_mark_steal_p = FALSE;
            }
        }
        else
        {
            do_mark_steal_p = FALSE;
        }
#endif //MH_SC_MARK

        gc_t_join.restart();
    }
#endif //MULTIPLE_HEAPS

    {

#ifdef MARK_LIST
        //set up the mark lists from g_mark_list
        assert (g_mark_list);
#ifdef MULTIPLE_HEAPS
        mark_list = &g_mark_list [heap_number*mark_list_size];
#else
        mark_list = g_mark_list;
#endif //MULTIPLE_HEAPS
        //dont use the mark list for full gc
        //because multiple segments are more complex to handle and the list
        //is likely to overflow
        if (condemned_gen_number != max_generation)
            mark_list_end = &mark_list [mark_list_size-1];
        else
            mark_list_end = &mark_list [0];
        mark_list_index = &mark_list [0];
#endif //MARK_LIST

#ifndef MULTIPLE_HEAPS
        shigh = (uint8_t*) 0;
        slow  = MAX_PTR;
#endif //MULTIPLE_HEAPS

        //%type%  category = quote (mark);

        if ((condemned_gen_number == max_generation) && (num_sizedrefs > 0))
        {
            GCScan::GcScanSizedRefs(GCHeap::Promote, condemned_gen_number, max_generation, &sc);
            fire_mark_event (heap_number, ETW::GC_ROOT_SIZEDREF, (promoted_bytes (heap_number) - last_promoted_bytes));
            last_promoted_bytes = promoted_bytes (heap_number);

#ifdef MULTIPLE_HEAPS
            gc_t_join.join(this, gc_join_scan_sizedref_done);
            if (gc_t_join.joined())
            {
                dprintf(3, ("Done with marking all sized refs. Starting all gc thread for marking other strong roots"));
                gc_t_join.restart();
            }
#endif //MULTIPLE_HEAPS
        }

        dprintf(3,("Marking Roots"));

        GCScan::GcScanRoots(GCHeap::Promote,
                                condemned_gen_number, max_generation,
                                &sc);

        fire_mark_event (heap_number, ETW::GC_ROOT_STACK, (promoted_bytes (heap_number) - last_promoted_bytes));
        last_promoted_bytes = promoted_bytes (heap_number);

#ifdef BACKGROUND_GC
        if (recursive_gc_sync::background_running_p())
        {
            scan_background_roots (GCHeap::Promote, heap_number, &sc);
        }
#endif //BACKGROUND_GC

#ifdef FEATURE_PREMORTEM_FINALIZATION
        dprintf(3, ("Marking finalization data"));
        finalize_queue->GcScanRoots(GCHeap::Promote, heap_number, 0);
#endif // FEATURE_PREMORTEM_FINALIZATION

        fire_mark_event (heap_number, ETW::GC_ROOT_FQ, (promoted_bytes (heap_number) - last_promoted_bytes));
        last_promoted_bytes = promoted_bytes (heap_number);

// MTHTS
        {

            dprintf(3,("Marking handle table"));
            GCScan::GcScanHandles(GCHeap::Promote,
                                      condemned_gen_number, max_generation,
                                      &sc);
            fire_mark_event (heap_number, ETW::GC_ROOT_HANDLES, (promoted_bytes (heap_number) - last_promoted_bytes));
            last_promoted_bytes = promoted_bytes (heap_number);
        }

#ifdef TRACE_GC
        size_t promoted_before_cards = promoted_bytes (heap_number);
#endif //TRACE_GC

        dprintf (3, ("before cards: %Id", promoted_before_cards));
        if (!full_p)
        {
#ifdef CARD_BUNDLE
#ifdef MULTIPLE_HEAPS
            if (gc_t_join.r_join(this, gc_r_join_update_card_bundle))
            {
#endif //MULTIPLE_HEAPS

#ifndef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
                // If we are manually managing card bundles, every write to the card table should already be
                // accounted for in the card bundle table so there's nothing to update here.
                update_card_table_bundle();
#endif
                if (card_bundles_enabled())
                {
                    verify_card_bundles();
                }

#ifdef MULTIPLE_HEAPS
                gc_t_join.r_restart();
            }
#endif //MULTIPLE_HEAPS
#endif //CARD_BUNDLE

            card_fn mark_object_fn = &gc_heap::mark_object_simple;
#ifdef HEAP_ANALYZE
            heap_analyze_success = TRUE;
            if (heap_analyze_enabled)
            {
                internal_root_array_index = 0;
                current_obj = 0;
                current_obj_size = 0;
                mark_object_fn = &gc_heap::ha_mark_object_simple;
            }
#endif //HEAP_ANALYZE

#if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING)
            if (!card_mark_done_soh)
#endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING
            {
                dprintf (3, ("Marking cross generation pointers on heap %d", heap_number));
                mark_through_cards_for_segments(mark_object_fn, FALSE THIS_ARG);
#if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING)
                card_mark_done_soh = true;
#endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING
            }

#if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING)
            if (!card_mark_done_loh)
#endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING
            {
                dprintf (3, ("Marking cross generation pointers for large objects on heap %d", heap_number));
                mark_through_cards_for_large_objects(mark_object_fn, FALSE THIS_ARG);
#if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING)
                card_mark_done_loh = true;
#endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING
            }

#if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING)
            // check the other heaps cyclically and try to help out where the marking isn't done
            for (int i = 0; i < gc_heap::n_heaps; i++)
            {
                int heap_number_to_look_at = (i + heap_number) % gc_heap::n_heaps;
                gc_heap* hp = gc_heap::g_heaps[heap_number_to_look_at];
                if (!hp->card_mark_done_soh)
                {
                    dprintf(3, ("Marking cross generation pointers on heap %d", hp->heap_number));
                    hp->mark_through_cards_for_segments(mark_object_fn, FALSE THIS_ARG);
                    hp->card_mark_done_soh = true;
                }

                if (!hp->card_mark_done_loh)
                {
                    dprintf(3, ("Marking cross generation pointers for large objects on heap %d", hp->heap_number));
                    hp->mark_through_cards_for_large_objects(mark_object_fn, FALSE THIS_ARG);
                    hp->card_mark_done_loh = true;
                }
            }
#endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING

            dprintf (3, ("marked by cards: %Id",
                (promoted_bytes (heap_number) - promoted_before_cards)));
            fire_mark_event (heap_number, ETW::GC_ROOT_OLDER, (promoted_bytes (heap_number) - last_promoted_bytes));
            last_promoted_bytes = promoted_bytes (heap_number);
        }
    }

#ifdef MH_SC_MARK
    if (do_mark_steal_p)
    {
        mark_steal();
    }
#endif //MH_SC_MARK

    // Dependent handles need to be scanned with a special algorithm (see the header comment on
    // scan_dependent_handles for more detail). We perform an initial scan without synchronizing with other
    // worker threads or processing any mark stack overflow. This is not guaranteed to complete the operation
    // but in a common case (where there are no dependent handles that are due to be collected) it allows us
    // to optimize away further scans. The call to scan_dependent_handles is what will cycle through more
    // iterations if required and will also perform processing of any mark stack overflow once the dependent
    // handle table has been fully promoted.
    GCScan::GcDhInitialScan(GCHeap::Promote, condemned_gen_number, max_generation, &sc);
    scan_dependent_handles(condemned_gen_number, &sc, true);

#ifdef MULTIPLE_HEAPS
    dprintf(3, ("Joining for short weak handle scan"));
    gc_t_join.join(this, gc_join_null_dead_short_weak);
    if (gc_t_join.joined())
#endif //MULTIPLE_HEAPS
    {
#ifdef HEAP_ANALYZE
        heap_analyze_enabled = FALSE;
        GCToEEInterface::AnalyzeSurvivorsFinished(condemned_gen_number);
#endif // HEAP_ANALYZE
        GCToEEInterface::AfterGcScanRoots (condemned_gen_number, max_generation, &sc);

#ifdef MULTIPLE_HEAPS
        if (!full_p)
        {
            // we used r_join and need to reinitialize states for it here.
            gc_t_join.r_init();
        }

        //start all threads on the roots.
        dprintf(3, ("Starting all gc thread for short weak handle scan"));
        gc_t_join.restart();
#endif //MULTIPLE_HEAPS

    }

#ifdef FEATURE_CARD_MARKING_STEALING
    reset_card_marking_enumerators();
#endif // FEATURE_CARD_MARKING_STEALING

    // null out the target of short weakref that were not promoted.
    GCScan::GcShortWeakPtrScan(GCHeap::Promote, condemned_gen_number, max_generation,&sc);

// MTHTS: keep by single thread
#ifdef MULTIPLE_HEAPS
    dprintf(3, ("Joining for finalization"));
    gc_t_join.join(this, gc_join_scan_finalization);
    if (gc_t_join.joined())
#endif //MULTIPLE_HEAPS

    {
#ifdef MULTIPLE_HEAPS
        //start all threads on the roots.
        dprintf(3, ("Starting all gc thread for Finalization"));
        gc_t_join.restart();
#endif //MULTIPLE_HEAPS
    }

    //Handle finalization.
    size_t promoted_bytes_live = promoted_bytes (heap_number);

#ifdef FEATURE_PREMORTEM_FINALIZATION
    dprintf (3, ("Finalize marking"));
    finalize_queue->ScanForFinalization (GCHeap::Promote, condemned_gen_number, mark_only_p, __this);

    GCToEEInterface::DiagWalkFReachableObjects(__this);
#endif // FEATURE_PREMORTEM_FINALIZATION

    // Scan dependent handles again to promote any secondaries associated with primaries that were promoted
    // for finalization. As before scan_dependent_handles will also process any mark stack overflow.
    scan_dependent_handles(condemned_gen_number, &sc, false);

#ifdef MULTIPLE_HEAPS
    dprintf(3, ("Joining for weak pointer deletion"));
    gc_t_join.join(this, gc_join_null_dead_long_weak);
    if (gc_t_join.joined())
    {
        //start all threads on the roots.
        dprintf(3, ("Starting all gc thread for weak pointer deletion"));
        gc_t_join.restart();
    }
#endif //MULTIPLE_HEAPS

    // null out the target of long weakref that were not promoted.
    GCScan::GcWeakPtrScan (GCHeap::Promote, condemned_gen_number, max_generation, &sc);

// MTHTS: keep by single thread
#ifdef MULTIPLE_HEAPS
#ifdef MARK_LIST
#ifdef PARALLEL_MARK_LIST_SORT
//    unsigned long start = GetCycleCount32();
    sort_mark_list();
//    printf("sort_mark_list took %u cycles\n", GetCycleCount32() - start);
#endif //PARALLEL_MARK_LIST_SORT
#endif //MARK_LIST

    dprintf (3, ("Joining for sync block cache entry scanning"));
    gc_t_join.join(this, gc_join_null_dead_syncblk);
    if (gc_t_join.joined())
#endif //MULTIPLE_HEAPS
    {
        // scan for deleted entries in the syncblk cache
        GCScan::GcWeakPtrScanBySingleThread (condemned_gen_number, max_generation, &sc);

#ifdef MULTIPLE_HEAPS

#ifdef MARK_LIST
#ifndef PARALLEL_MARK_LIST_SORT
        //compact g_mark_list and sort it.
        combine_mark_lists();
#endif //PARALLEL_MARK_LIST_SORT
#endif //MARK_LIST

        //decide on promotion
        if (!settings.promotion)
        {
            size_t m = 0;
            for (int n = 0; n <= condemned_gen_number;n++)
            {
                m +=  (size_t)(dd_min_size (dynamic_data_of (n))*(n+1)*0.1);
            }

            for (int i = 0; i < n_heaps; i++)
            {
                dynamic_data* dd = g_heaps[i]->dynamic_data_of (min (condemned_gen_number +1,
                                                                     max_generation));
                size_t older_gen_size = (dd_current_size (dd) +
                                         (dd_desired_allocation (dd) -
                                         dd_new_allocation (dd)));

                if ((m > (older_gen_size)) ||
                    (promoted_bytes (i) > m))
                {
                    settings.promotion = TRUE;
                }
            }
        }

#ifdef SNOOP_STATS
        if (do_mark_steal_p)
        {
            size_t objects_checked_count = 0;
            size_t zero_ref_count = 0;
            size_t objects_marked_count = 0;
            size_t check_level_count = 0;
            size_t busy_count = 0;
            size_t interlocked_count = 0;
            size_t partial_mark_parent_count = 0;
            size_t stolen_or_pm_count = 0;
            size_t stolen_entry_count = 0;
            size_t pm_not_ready_count = 0;
            size_t normal_count = 0;
            size_t stack_bottom_clear_count = 0;

            for (int i = 0; i < n_heaps; i++)
            {
                gc_heap* hp = g_heaps[i];
                hp->print_snoop_stat();
                objects_checked_count += hp->snoop_stat.objects_checked_count;
                zero_ref_count += hp->snoop_stat.zero_ref_count;
                objects_marked_count += hp->snoop_stat.objects_marked_count;
                check_level_count += hp->snoop_stat.check_level_count;
                busy_count += hp->snoop_stat.busy_count;
                interlocked_count += hp->snoop_stat.interlocked_count;
                partial_mark_parent_count += hp->snoop_stat.partial_mark_parent_count;
                stolen_or_pm_count += hp->snoop_stat.stolen_or_pm_count;
                stolen_entry_count += hp->snoop_stat.stolen_entry_count;
                pm_not_ready_count += hp->snoop_stat.pm_not_ready_count;
                normal_count += hp->snoop_stat.normal_count;
                stack_bottom_clear_count += hp->snoop_stat.stack_bottom_clear_count;
            }

            fflush (stdout);

            printf ("-------total stats-------\n");
            printf ("%8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s | %8s\n",
                "checked", "zero", "marked", "level", "busy", "xchg", "pmparent", "s_pm", "stolen", "nready", "normal", "clear");
            printf ("%8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d | %8d\n",
                objects_checked_count,
                zero_ref_count,
                objects_marked_count,
                check_level_count,
                busy_count,
                interlocked_count,
                partial_mark_parent_count,
                stolen_or_pm_count,
                stolen_entry_count,
                pm_not_ready_count,
                normal_count,
                stack_bottom_clear_count);
        }
#endif //SNOOP_STATS

        //start all threads.
        dprintf(3, ("Starting all threads for end of mark phase"));
        gc_t_join.restart();
#else //MULTIPLE_HEAPS

        //decide on promotion
        if (!settings.promotion)
        {
            size_t m = 0;
            for (int n = 0; n <= condemned_gen_number;n++)
            {
                m +=  (size_t)(dd_min_size (dynamic_data_of (n))*(n+1)*0.06);
            }
            dynamic_data* dd = dynamic_data_of (min (condemned_gen_number +1,
                                                     max_generation));
            size_t older_gen_size = (dd_current_size (dd) +
                                     (dd_desired_allocation (dd) -
                                     dd_new_allocation (dd)));

            dprintf (2, ("promotion threshold: %Id, promoted bytes: %Id size n+1: %Id",
                         m, promoted_bytes (heap_number), older_gen_size));

            if ((m > older_gen_size) ||
                    (promoted_bytes (heap_number) > m))
            {
                settings.promotion = TRUE;
            }
        }

#endif //MULTIPLE_HEAPS
    }

#ifdef MULTIPLE_HEAPS
#ifdef MARK_LIST
#ifdef PARALLEL_MARK_LIST_SORT
//    start = GetCycleCount32();
    merge_mark_lists();
//    printf("merge_mark_lists took %u cycles\n", GetCycleCount32() - start);
#endif //PARALLEL_MARK_LIST_SORT
#endif //MARK_LIST
#endif //MULTIPLE_HEAPS

#ifdef BACKGROUND_GC
    total_promoted_bytes = promoted_bytes (heap_number);
#endif //BACKGROUND_GC

    promoted_bytes (heap_number) -= promoted_bytes_live;

#ifdef TIME_GC
        finish = GetCycleCount32();
        mark_time = finish - start;
#endif //TIME_GC

    dprintf(2,("---- End of mark phase ----"));
}

inline
void gc_heap::pin_object (uint8_t* o, uint8_t** ppObject, uint8_t* low, uint8_t* high)
{
    dprintf (3, ("Pinning %Ix", (size_t)o));
    if ((o >= low) && (o < high))
    {
        dprintf(3,("^%Ix^", (size_t)o));
        set_pinned (o);

#ifdef FEATURE_EVENT_TRACE
        if(EVENT_ENABLED(PinObjectAtGCTime))
        {
            fire_etw_pin_object_event(o, ppObject);
        }
#endif // FEATURE_EVENT_TRACE

#if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
        num_pinned_objects++;
#endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE
    }
}

#if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE)
size_t gc_heap::get_total_pinned_objects()
{
#ifdef MULTIPLE_HEAPS
    size_t total_num_pinned_objects = 0;
    for (int i = 0; i < gc_heap::n_heaps; i++)
    {
        gc_heap* hp = gc_heap::g_heaps[i];
        total_num_pinned_objects += hp->num_pinned_objects;
    }
    return total_num_pinned_objects;
#else //MULTIPLE_HEAPS
    return num_pinned_objects;
#endif //MULTIPLE_HEAPS
}
#endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE

void gc_heap::reset_mark_stack ()
{
    reset_pinned_queue();
    max_overflow_address = 0;
    min_overflow_address = MAX_PTR;
}

#ifdef FEATURE_STRUCTALIGN
//
// The word with left child, right child, and align info is laid out as follows:
//
//      |   upper short word   |   lower short word   |
//      |<------------> <----->|<------------> <----->|
//      |  left child   info hi| right child   info lo|
// x86: |    10 bits     6 bits|   10 bits      6 bits|
//
// where left/right child are signed values and concat(info hi, info lo) is unsigned.
//
// The "align info" encodes two numbers: the required alignment (a power of two)
// and the misalignment (the number of machine words the destination address needs
// to be adjusted by to provide alignment - so this number is always smaller than
// the required alignment).  Thus, the two can be represented as the "logical or"
// of the two numbers.  Note that the actual pad is computed from the misalignment
// by adding the alignment iff the misalignment is non-zero and less than min_obj_size.
//

// The number of bits in a brick.
#if defined (_TARGET_AMD64_)
#define brick_bits (12)
#else
#define brick_bits (11)
#endif //_TARGET_AMD64_
C_ASSERT(brick_size == (1 << brick_bits));

// The number of bits needed to represent the offset to a child node.
// "brick_bits + 1" allows us to represent a signed offset within a brick.
#define child_bits (brick_bits + 1 - LOG2_PTRSIZE)

// The number of bits in each of the pad hi, pad lo fields.
#define pad_bits (sizeof(short) * 8 - child_bits)

#define child_from_short(w) (((signed short)(w) / (1 << (pad_bits - LOG2_PTRSIZE))) & ~((1 << LOG2_PTRSIZE) - 1))
#define pad_mask ((1 << pad_bits) - 1)
#define pad_from_short(w) ((size_t)(w) & pad_mask)
#else // FEATURE_STRUCTALIGN
#define child_from_short(w) (w)
#endif // FEATURE_STRUCTALIGN

inline
short node_left_child(uint8_t* node)
{
    return child_from_short(((plug_and_pair*)node)[-1].m_pair.left);
}

inline
void set_node_left_child(uint8_t* node, ptrdiff_t val)
{
    assert (val > -(ptrdiff_t)brick_size);
    assert (val < (ptrdiff_t)brick_size);
    assert (Aligned (val));
#ifdef FEATURE_STRUCTALIGN
    size_t pad = pad_from_short(((plug_and_pair*)node)[-1].m_pair.left);
    ((plug_and_pair*)node)[-1].m_pair.left = ((short)val << (pad_bits - LOG2_PTRSIZE)) | (short)pad;
#else // FEATURE_STRUCTALIGN
    ((plug_and_pair*)node)[-1].m_pair.left = (short)val;
#endif // FEATURE_STRUCTALIGN
    assert (node_left_child (node) == val);
}

inline
short node_right_child(uint8_t* node)
{
    return child_from_short(((plug_and_pair*)node)[-1].m_pair.right);
}

inline
void set_node_right_child(uint8_t* node, ptrdiff_t val)
{
    assert (val > -(ptrdiff_t)brick_size);
    assert (val < (ptrdiff_t)brick_size);
    assert (Aligned (val));
#ifdef FEATURE_STRUCTALIGN
    size_t pad = pad_from_short(((plug_and_pair*)node)[-1].m_pair.right);
    ((plug_and_pair*)node)[-1].m_pair.right = ((short)val << (pad_bits - LOG2_PTRSIZE)) | (short)pad;
#else // FEATURE_STRUCTALIGN
    ((plug_and_pair*)node)[-1].m_pair.right = (short)val;
#endif // FEATURE_STRUCTALIGN
    assert (node_right_child (node) == val);
}

#ifdef FEATURE_STRUCTALIGN
void node_aligninfo (uint8_t* node, int& requiredAlignment, ptrdiff_t& pad)
{
    // Extract the single-number aligninfo from the fields.
    short left = ((plug_and_pair*)node)[-1].m_pair.left;
    short right = ((plug_and_pair*)node)[-1].m_pair.right;
    ptrdiff_t pad_shifted = (pad_from_short(left) << pad_bits) | pad_from_short(right);
    ptrdiff_t aligninfo = pad_shifted * DATA_ALIGNMENT;

    // Replicate the topmost bit into all lower bits.
    ptrdiff_t x = aligninfo;
    x |= x >> 8;
    x |= x >> 4;
    x |= x >> 2;
    x |= x >> 1;

    // Clear all bits but the highest.
    requiredAlignment = (int)(x ^ (x >> 1));
    pad = aligninfo - requiredAlignment;
    pad += AdjustmentForMinPadSize(pad, requiredAlignment);
}

inline
ptrdiff_t node_alignpad (uint8_t* node)
{
    int requiredAlignment;
    ptrdiff_t alignpad;
    node_aligninfo (node, requiredAlignment, alignpad);
    return alignpad;
}

void clear_node_aligninfo (uint8_t* node)
{
    ((plug_and_pair*)node)[-1].m_pair.left &= ~0 << pad_bits;
    ((plug_and_pair*)node)[-1].m_pair.right &= ~0 << pad_bits;
}

void set_node_aligninfo (uint8_t* node, int requiredAlignment, ptrdiff_t pad)
{
    // Encode the alignment requirement and alignment offset as a single number
    // as described above.
    ptrdiff_t aligninfo = (size_t)requiredAlignment + (pad & (requiredAlignment-1));
    assert (Aligned (aligninfo));
    ptrdiff_t aligninfo_shifted = aligninfo / DATA_ALIGNMENT;
    assert (aligninfo_shifted < (1 << (pad_bits + pad_bits)));

    ptrdiff_t hi = aligninfo_shifted >> pad_bits;
    assert (pad_from_short(((plug_and_gap*)node)[-1].m_pair.left) == 0);
    ((plug_and_pair*)node)[-1].m_pair.left |= hi;

    ptrdiff_t lo = aligninfo_shifted & pad_mask;
    assert (pad_from_short(((plug_and_gap*)node)[-1].m_pair.right) == 0);
    ((plug_and_pair*)node)[-1].m_pair.right |= lo;

#ifdef _DEBUG
    int requiredAlignment2;
    ptrdiff_t pad2;
    node_aligninfo (node, requiredAlignment2, pad2);
    assert (requiredAlignment == requiredAlignment2);
    assert (pad == pad2);
#endif // _DEBUG
}
#endif // FEATURE_STRUCTALIGN

inline
void loh_set_node_relocation_distance(uint8_t* node, ptrdiff_t val)
{
    ptrdiff_t* place = &(((loh_obj_and_pad*)node)[-1].reloc);
    *place = val;
}

inline
ptrdiff_t loh_node_relocation_distance(uint8_t* node)
{
    return (((loh_obj_and_pad*)node)[-1].reloc);
}

inline
ptrdiff_t node_relocation_distance (uint8_t* node)
{
    return (((plug_and_reloc*)(node))[-1].reloc & ~3);
}

inline
void set_node_relocation_distance(uint8_t* node, ptrdiff_t val)
{
    assert (val == (val & ~3));
    ptrdiff_t* place = &(((plug_and_reloc*)node)[-1].reloc);
    //clear the left bit and the relocation field
    *place &= 1;
    // store the value
    *place |= val;
}

#define node_left_p(node) (((plug_and_reloc*)(node))[-1].reloc & 2)

#define set_node_left(node) ((plug_and_reloc*)(node))[-1].reloc |= 2;

#ifndef FEATURE_STRUCTALIGN
void set_node_realigned(uint8_t* node)
{
    ((plug_and_reloc*)(node))[-1].reloc |= 1;
}

void clear_node_realigned(uint8_t* node)
{
#ifdef RESPECT_LARGE_ALIGNMENT
    ((plug_and_reloc*)(node))[-1].reloc &= ~1;
#else
    UNREFERENCED_PARAMETER(node);
#endif //RESPECT_LARGE_ALIGNMENT
}
#endif // FEATURE_STRUCTALIGN

inline
size_t  node_gap_size (uint8_t* node)
{
    return ((plug_and_gap *)node)[-1].gap;
}

void set_gap_size (uint8_t* node, size_t size)
{
    assert (Aligned (size));

    // clear the 2 uint32_t used by the node.
    ((plug_and_gap *)node)[-1].reloc = 0;
    ((plug_and_gap *)node)[-1].lr =0;
    ((plug_and_gap *)node)[-1].gap = size;

    assert ((size == 0 )||(size >= sizeof(plug_and_reloc)));

}

uint8_t* gc_heap::insert_node (uint8_t* new_node, size_t sequence_number,
                   uint8_t* tree, uint8_t* last_node)
{
    dprintf (3, ("IN: %Ix(%Ix), T: %Ix(%Ix), L: %Ix(%Ix) [%Ix]",
                 (size_t)new_node, brick_of(new_node),
                 (size_t)tree, brick_of(tree),
                 (size_t)last_node, brick_of(last_node),
                 sequence_number));
    if (power_of_two_p (sequence_number))
    {
        set_node_left_child (new_node, (tree - new_node));
        dprintf (3, ("NT: %Ix, LC->%Ix", (size_t)new_node, (tree - new_node)));
        tree = new_node;
    }
    else
    {
        if (oddp (sequence_number))
        {
            set_node_right_child (last_node, (new_node - last_node));
            dprintf (3, ("%Ix RC->%Ix", last_node, (new_node - last_node)));
        }
        else
        {
            uint8_t*  earlier_node = tree;
            size_t imax = logcount(sequence_number) - 2;
            for (size_t i = 0; i != imax; i++)
            {
                earlier_node = earlier_node + node_right_child (earlier_node);
            }
            int tmp_offset = node_right_child (earlier_node);
            assert (tmp_offset); // should never be empty
            set_node_left_child (new_node, ((earlier_node + tmp_offset ) - new_node));
            set_node_right_child (earlier_node, (new_node - earlier_node));

            dprintf (3, ("%Ix LC->%Ix, %Ix RC->%Ix",
                new_node, ((earlier_node + tmp_offset ) - new_node),
                earlier_node, (new_node - earlier_node)));
        }
    }
    return tree;
}

size_t gc_heap::update_brick_table (uint8_t* tree, size_t current_brick,
                                    uint8_t* x, uint8_t* plug_end)
{
    dprintf (3, ("tree: %Ix, current b: %Ix, x: %Ix, plug_end: %Ix",
        tree, current_brick, x, plug_end));

    if (tree != NULL)
    {
        dprintf (3, ("b- %Ix->%Ix pointing to tree %Ix",
            current_brick, (size_t)(tree - brick_address (current_brick)), tree));
        set_brick (current_brick, (tree - brick_address (current_brick)));
    }
    else
    {
        dprintf (3, ("b- %Ix->-1", current_brick));
        set_brick (current_brick, -1);
    }
    size_t  b = 1 + current_brick;
    ptrdiff_t  offset = 0;
    size_t last_br = brick_of (plug_end-1);
    current_brick = brick_of (x-1);
    dprintf (3, ("ubt: %Ix->%Ix]->%Ix]", b, last_br, current_brick));
    while (b <= current_brick)
    {
        if (b <= last_br)
        {
            set_brick (b, --offset);
        }
        else
        {
            set_brick (b,-1);
        }
        b++;
    }
    return brick_of (x);
}

void gc_heap::plan_generation_start (generation* gen, generation* consing_gen, uint8_t* next_plug_to_allocate)
{
#ifdef BIT64
    // We should never demote big plugs to gen0.
    if (gen == youngest_generation)
    {
        heap_segment* seg = ephemeral_heap_segment;
        size_t mark_stack_large_bos = mark_stack_bos;
        size_t large_plug_pos = 0;
        while (mark_stack_large_bos < mark_stack_tos)
        {
            if (mark_stack_array[mark_stack_large_bos].len > demotion_plug_len_th)
            {
                while (mark_stack_bos <= mark_stack_large_bos)
                {
                    size_t entry = deque_pinned_plug();
                    size_t len = pinned_len (pinned_plug_of (entry));
                    uint8_t* plug = pinned_plug (pinned_plug_of(entry));
                    if (len > demotion_plug_len_th)
                    {
                        dprintf (2, ("ps(%d): S %Ix (%Id)(%Ix)", gen->gen_num, plug, len, (plug+len)));
                    }
                    pinned_len (pinned_plug_of (entry)) = plug - generation_allocation_pointer (consing_gen);
                    assert(mark_stack_array[entry].len == 0 ||
                            mark_stack_array[entry].len >= Align(min_obj_size));
                    generation_allocation_pointer (consing_gen) = plug + len;
                    generation_allocation_limit (consing_gen) = heap_segment_plan_allocated (seg);
                    set_allocator_next_pin (consing_gen);
                }
            }

            mark_stack_large_bos++;
        }
    }
#endif // BIT64

    generation_plan_allocation_start (gen) =
        allocate_in_condemned_generations (consing_gen, Align (min_obj_size), -1);
    generation_plan_allocation_start_size (gen) = Align (min_obj_size);
    size_t allocation_left = (size_t)(generation_allocation_limit (consing_gen) - generation_allocation_pointer (consing_gen));
    if (next_plug_to_allocate)
    {
        size_t dist_to_next_plug = (size_t)(next_plug_to_allocate - generation_allocation_pointer (consing_gen));
        if (allocation_left > dist_to_next_plug)
        {
            allocation_left = dist_to_next_plug;
        }
    }
    if (allocation_left < Align (min_obj_size))
    {
        generation_plan_allocation_start_size (gen) += allocation_left;
        generation_allocation_pointer (consing_gen) += allocation_left;
    }

    dprintf (2, ("plan alloc gen%d(%Ix) start at %Ix (ptr: %Ix, limit: %Ix, next: %Ix)", gen->gen_num,
        generation_plan_allocation_start (gen),
        generation_plan_allocation_start_size (gen),
        generation_allocation_pointer (consing_gen), generation_allocation_limit (consing_gen),
        next_plug_to_allocate));
}

void gc_heap::realloc_plan_generation_start (generation* gen, generation* consing_gen)
{
    BOOL adjacentp = FALSE;

    generation_plan_allocation_start (gen) =
        allocate_in_expanded_heap (consing_gen, Align(min_obj_size), adjacentp, 0,
#ifdef SHORT_PLUGS
                                   FALSE, NULL,
#endif //SHORT_PLUGS
                                   FALSE, -1 REQD_ALIGN_AND_OFFSET_ARG);

    generation_plan_allocation_start_size (gen) = Align (min_obj_size);
    size_t allocation_left = (size_t)(generation_allocation_limit (consing_gen) - generation_allocation_pointer (consing_gen));
    if ((allocation_left < Align (min_obj_size)) &&
         (generation_allocation_limit (consing_gen)!=heap_segment_plan_allocated (generation_allocation_segment (consing_gen))))
    {
        generation_plan_allocation_start_size (gen) += allocation_left;
        generation_allocation_pointer (consing_gen) += allocation_left;
    }

    dprintf (1, ("plan re-alloc gen%d start at %Ix (ptr: %Ix, limit: %Ix)", gen->gen_num,
        generation_plan_allocation_start (consing_gen),
        generation_allocation_pointer (consing_gen),
        generation_allocation_limit (consing_gen)));
}

void gc_heap::plan_generation_starts (generation*& consing_gen)
{
    //make sure that every generation has a planned allocation start
    int  gen_number = settings.condemned_generation;
    while (gen_number >= 0)
    {
        if (gen_number < max_generation)
        {
            consing_gen = ensure_ephemeral_heap_segment (consing_gen);
        }
        generation* gen = generation_of (gen_number);
        if (0 == generation_plan_allocation_start (gen))
        {
            plan_generation_start (gen, consing_gen, 0);
            assert (generation_plan_allocation_start (gen));
        }
        gen_number--;
    }
    // now we know the planned allocation size
    heap_segment_plan_allocated (ephemeral_heap_segment) =
        generation_allocation_pointer (consing_gen);
}

void gc_heap::advance_pins_for_demotion (generation* gen)
{
    uint8_t* original_youngest_start = generation_allocation_start (youngest_generation);
    heap_segment* seg = ephemeral_heap_segment;

    if ((!(pinned_plug_que_empty_p())))
    {
        size_t gen1_pinned_promoted = generation_pinned_allocation_compact_size (generation_of (max_generation));
        size_t gen1_pins_left = dd_pinned_survived_size (dynamic_data_of (max_generation - 1)) - gen1_pinned_promoted;
        size_t total_space_to_skip = last_gen1_pin_end - generation_allocation_pointer (gen);
        float pin_frag_ratio = (float)gen1_pins_left / (float)total_space_to_skip;
        float pin_surv_ratio = (float)gen1_pins_left / (float)(dd_survived_size (dynamic_data_of (max_generation - 1)));
        if ((pin_frag_ratio > 0.15) && (pin_surv_ratio > 0.30))
        {
            while (!pinned_plug_que_empty_p() &&
                    (pinned_plug (oldest_pin()) < original_youngest_start))
            {
                size_t entry = deque_pinned_plug();
                size_t len = pinned_len (pinned_plug_of (entry));
                uint8_t* plug = pinned_plug (pinned_plug_of(entry));
                pinned_len (pinned_plug_of (entry)) = plug - generation_allocation_pointer (gen);
                assert(mark_stack_array[entry].len == 0 ||
                        mark_stack_array[entry].len >= Align(min_obj_size));
                generation_allocation_pointer (gen) = plug + len;
                generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
                set_allocator_next_pin (gen);

                //Add the size of the pinned plug to the right pinned allocations
                //find out which gen this pinned plug came from
                int frgn = object_gennum (plug);
                if ((frgn != (int)max_generation) && settings.promotion)
                {
                    int togn = object_gennum_plan (plug);
                    generation_pinned_allocation_sweep_size ((generation_of (frgn +1))) += len;
                    if (frgn < togn)
                    {
                        generation_pinned_allocation_compact_size (generation_of (togn)) += len;
                    }
                }

                dprintf (2, ("skipping gap %d, pin %Ix (%Id)",
                    pinned_len (pinned_plug_of (entry)), plug, len));
            }
        }
        dprintf (2, ("ad_p_d: PL: %Id, SL: %Id, pfr: %d, psr: %d",
            gen1_pins_left, total_space_to_skip, (int)(pin_frag_ratio*100), (int)(pin_surv_ratio*100)));
    }
}

void gc_heap::process_ephemeral_boundaries (uint8_t* x,
                                            int& active_new_gen_number,
                                            int& active_old_gen_number,
                                            generation*& consing_gen,
                                            BOOL& allocate_in_condemned)
{
retry:
    if ((active_old_gen_number > 0) &&
        (x >= generation_allocation_start (generation_of (active_old_gen_number - 1))))
    {
        dprintf (2, ("crossing gen%d, x is %Ix", active_old_gen_number - 1, x));

        if (!pinned_plug_que_empty_p())
        {
            dprintf (2, ("oldest pin: %Ix(%Id)",
                pinned_plug (oldest_pin()),
                (x - pinned_plug (oldest_pin()))));
        }

        if (active_old_gen_number <= (settings.promotion ? (max_generation - 1) : max_generation))
        {
            active_new_gen_number--;
        }

        active_old_gen_number--;
        assert ((!settings.promotion) || (active_new_gen_number>0));

        if (active_new_gen_number == (max_generation - 1))
        {
#ifdef FREE_USAGE_STATS
            if (settings.condemned_generation == max_generation)
            {
                // We need to do this before we skip the rest of the pinned plugs.
                generation* gen_2 = generation_of (max_generation);
                generation* gen_1 = generation_of (max_generation - 1);

                size_t total_num_pinned_free_spaces_left = 0;

                // We are about to allocate gen1, check to see how efficient fitting in gen2 pinned free spaces is.
                for (int j = 0; j < NUM_GEN_POWER2; j++)
                {
                    dprintf (1, ("[h%d][#%Id]2^%d: current: %Id, S: 2: %Id, 1: %Id(%Id)",
                        heap_number,
                        settings.gc_index,
                        (j + 10),
                        gen_2->gen_current_pinned_free_spaces[j],
                        gen_2->gen_plugs[j], gen_1->gen_plugs[j],
                        (gen_2->gen_plugs[j] + gen_1->gen_plugs[j])));

                    total_num_pinned_free_spaces_left += gen_2->gen_current_pinned_free_spaces[j];
                }

                float pinned_free_list_efficiency = 0;
                size_t total_pinned_free_space = generation_allocated_in_pinned_free (gen_2) + generation_pinned_free_obj_space (gen_2);
                if (total_pinned_free_space != 0)
                {
                    pinned_free_list_efficiency = (float)(generation_allocated_in_pinned_free (gen_2)) / (float)total_pinned_free_space;
                }

                dprintf (1, ("[h%d] gen2 allocated %Id bytes with %Id bytes pinned free spaces (effi: %d%%), %Id (%Id) left",
                            heap_number,
                            generation_allocated_in_pinned_free (gen_2),
                            total_pinned_free_space,
                            (int)(pinned_free_list_efficiency * 100),
                            generation_pinned_free_obj_space (gen_2),
                            total_num_pinned_free_spaces_left));
            }
#endif //FREE_USAGE_STATS

            //Go past all of the pinned plugs for this generation.
            while (!pinned_plug_que_empty_p() &&
                   (!in_range_for_segment ((pinned_plug (oldest_pin())), ephemeral_heap_segment)))
            {
                size_t  entry = deque_pinned_plug();
                mark*  m = pinned_plug_of (entry);
                uint8_t*  plug = pinned_plug (m);
                size_t  len = pinned_len (m);
                // detect pinned block in different segment (later) than
                // allocation segment, skip those until the oldest pin is in the ephemeral seg.
                // adjust the allocation segment along the way (at the end it will
                // be the ephemeral segment.
                heap_segment* nseg = heap_segment_in_range (generation_allocation_segment (consing_gen));

                PREFIX_ASSUME(nseg != NULL);

                while (!((plug >= generation_allocation_pointer (consing_gen))&&
                        (plug < heap_segment_allocated (nseg))))
                {
                    //adjust the end of the segment to be the end of the plug
                    assert (generation_allocation_pointer (consing_gen)>=
                            heap_segment_mem (nseg));
                    assert (generation_allocation_pointer (consing_gen)<=
                            heap_segment_committed (nseg));

                    heap_segment_plan_allocated (nseg) =
                        generation_allocation_pointer (consing_gen);
                    //switch allocation segment
                    nseg = heap_segment_next_rw (nseg);
                    generation_allocation_segment (consing_gen) = nseg;
                    //reset the allocation pointer and limits
                    generation_allocation_pointer (consing_gen) =
                        heap_segment_mem (nseg);
                }
                set_new_pin_info (m, generation_allocation_pointer (consing_gen));
                assert(pinned_len(m) == 0 || pinned_len(m) >= Align(min_obj_size));
                generation_allocation_pointer (consing_gen) = plug + len;
                generation_allocation_limit (consing_gen) =
                    generation_allocation_pointer (consing_gen);
            }
            allocate_in_condemned = TRUE;
            consing_gen = ensure_ephemeral_heap_segment (consing_gen);
        }

        if (active_new_gen_number != max_generation)
        {
            if (active_new_gen_number == (max_generation - 1))
            {
                maxgen_pinned_compact_before_advance = generation_pinned_allocation_compact_size (generation_of (max_generation));
                if (!demote_gen1_p)
                    advance_pins_for_demotion (consing_gen);
            }

            plan_generation_start (generation_of (active_new_gen_number), consing_gen, x);

            dprintf (2, ("process eph: allocated gen%d start at %Ix",
                active_new_gen_number,
                generation_plan_allocation_start (generation_of (active_new_gen_number))));

            if ((demotion_low == MAX_PTR) && !pinned_plug_que_empty_p())
            {
                uint8_t* pplug = pinned_plug (oldest_pin());
                if (object_gennum (pplug) > 0)
                {
                    demotion_low = pplug;
                    dprintf (3, ("process eph: dlow->%Ix", demotion_low));
                }
            }

            assert (generation_plan_allocation_start (generation_of (active_new_gen_number)));
        }

        goto retry;
    }
}

inline
void gc_heap::seg_clear_mark_bits (heap_segment* seg)
{
    uint8_t* o = heap_segment_mem (seg);
    while (o < heap_segment_allocated (seg))
    {
        if (marked (o))
        {
            clear_marked (o);
        }
        o = o  + Align (size (o));
    }
}

#ifdef FEATURE_BASICFREEZE
void gc_heap::sweep_ro_segments (heap_segment* start_seg)
{
    //go through all of the segment in range and reset the mark bit
    //TODO works only on small object segments

    heap_segment* seg = start_seg;

    while (seg)
    {
        if (heap_segment_read_only_p (seg) &&
            heap_segment_in_range_p (seg))
        {
#ifdef BACKGROUND_GC
            if (settings.concurrent)
            {
                seg_clear_mark_array_bits_soh (seg);
            }
            else
            {
                seg_clear_mark_bits (seg);
            }
#else //BACKGROUND_GC

#ifdef MARK_ARRAY
            if(gc_can_use_concurrent)
            {
                clear_mark_array (max (heap_segment_mem (seg), lowest_address),
                              min (heap_segment_allocated (seg), highest_address),
                              FALSE); // read_only segments need the mark clear
            }
#else //MARK_ARRAY
            seg_clear_mark_bits (seg);
#endif //MARK_ARRAY

#endif //BACKGROUND_GC
        }
        seg = heap_segment_next (seg);
    }
}
#endif // FEATURE_BASICFREEZE

#ifdef FEATURE_LOH_COMPACTION
inline
BOOL gc_heap::loh_pinned_plug_que_empty_p()
{
    return (loh_pinned_queue_bos == loh_pinned_queue_tos);
}

void gc_heap::loh_set_allocator_next_pin()
{
    if (!(loh_pinned_plug_que_empty_p()))
    {
        mark*  oldest_entry = loh_oldest_pin();
        uint8_t* plug = pinned_plug (oldest_entry);
        generation* gen = large_object_generation;
        if ((plug >= generation_allocation_pointer (gen)) &&
            (plug <  generation_allocation_limit (gen)))
        {
            generation_allocation_limit (gen) = pinned_plug (oldest_entry);
        }
        else
            assert (!((plug < generation_allocation_pointer (gen)) &&
                      (plug >= heap_segment_mem (generation_allocation_segment (gen)))));
    }
}

size_t gc_heap::loh_deque_pinned_plug ()
{
    size_t m = loh_pinned_queue_bos;
    loh_pinned_queue_bos++;
    return m;
}

inline
mark* gc_heap::loh_pinned_plug_of (size_t bos)
{
    return &loh_pinned_queue[bos];
}

inline
mark* gc_heap::loh_oldest_pin()
{
    return loh_pinned_plug_of (loh_pinned_queue_bos);
}

// If we can't grow the queue, then don't compact.
BOOL gc_heap::loh_enque_pinned_plug (uint8_t* plug, size_t len)
{
    assert(len >= Align(min_obj_size, get_alignment_constant (FALSE)));

    if (loh_pinned_queue_length <= loh_pinned_queue_tos)
    {
        if (!grow_mark_stack (loh_pinned_queue, loh_pinned_queue_length, LOH_PIN_QUEUE_LENGTH))
        {
            return FALSE;
        }
    }
    dprintf (3, (" P: %Ix(%Id)", plug, len));
    mark& m = loh_pinned_queue[loh_pinned_queue_tos];
    m.first = plug;
    m.len = len;
    loh_pinned_queue_tos++;
    loh_set_allocator_next_pin();
    return TRUE;
}

inline
BOOL gc_heap::loh_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit)
{
    dprintf (1235, ("trying to fit %Id(%Id) between %Ix and %Ix (%Id)",
        size,
        (2* AlignQword (loh_padding_obj_size) +  size),
        alloc_pointer,
        alloc_limit,
        (alloc_limit - alloc_pointer)));

    return ((alloc_pointer + 2* AlignQword (loh_padding_obj_size) +  size) <= alloc_limit);
}

uint8_t* gc_heap::loh_allocate_in_condemned (uint8_t* old_loc, size_t size)
{
    UNREFERENCED_PARAMETER(old_loc);

    generation* gen = large_object_generation;
    dprintf (1235, ("E: p:%Ix, l:%Ix, s: %Id",
        generation_allocation_pointer (gen),
        generation_allocation_limit (gen),
        size));

retry:
    {
        heap_segment* seg = generation_allocation_segment (gen);
        if (!(loh_size_fit_p (size, generation_allocation_pointer (gen), generation_allocation_limit (gen))))
        {
            if ((!(loh_pinned_plug_que_empty_p()) &&
                 (generation_allocation_limit (gen) ==
                  pinned_plug (loh_oldest_pin()))))
            {
                mark* m = loh_pinned_plug_of (loh_deque_pinned_plug());
                size_t len = pinned_len (m);
                uint8_t* plug = pinned_plug (m);
                dprintf (1235, ("AIC: %Ix->%Ix(%Id)", generation_allocation_pointer (gen), plug, plug - generation_allocation_pointer (gen)));
                pinned_len (m) = plug - generation_allocation_pointer (gen);
                generation_allocation_pointer (gen) = plug + len;

                generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
                loh_set_allocator_next_pin();
                dprintf (1235, ("s: p: %Ix, l: %Ix (%Id)",
                    generation_allocation_pointer (gen),
                    generation_allocation_limit (gen),
                    (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));

                goto retry;
            }

            if (generation_allocation_limit (gen) != heap_segment_plan_allocated (seg))
            {
                generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
                dprintf (1235, ("l->pa(%Ix)", generation_allocation_limit (gen)));
            }
            else
            {
                if (heap_segment_plan_allocated (seg) != heap_segment_committed (seg))
                {
                    heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
                    generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);
                    dprintf (1235, ("l->c(%Ix)", generation_allocation_limit (gen)));
                }
                else
                {
                    if (loh_size_fit_p (size, generation_allocation_pointer (gen), heap_segment_reserved (seg)) &&
                        (grow_heap_segment (seg, (generation_allocation_pointer (gen) + size + 2* AlignQword (loh_padding_obj_size)))))
                    {
                        dprintf (1235, ("growing seg from %Ix to %Ix\n", heap_segment_committed (seg),
                                         (generation_allocation_pointer (gen) + size)));

                        heap_segment_plan_allocated (seg) = heap_segment_committed (seg);
                        generation_allocation_limit (gen) = heap_segment_plan_allocated (seg);

                        dprintf (1235, ("g: p: %Ix, l: %Ix (%Id)",
                            generation_allocation_pointer (gen),
                            generation_allocation_limit (gen),
                            (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
                    }
                    else
                    {
                        heap_segment* next_seg = heap_segment_next (seg);
                        assert (generation_allocation_pointer (gen)>=
                                heap_segment_mem (seg));
                        // Verify that all pinned plugs for this segment are consumed
                        if (!loh_pinned_plug_que_empty_p() &&
                            ((pinned_plug (loh_oldest_pin()) <
                              heap_segment_allocated (seg)) &&
                             (pinned_plug (loh_oldest_pin()) >=
                              generation_allocation_pointer (gen))))
                        {
                            LOG((LF_GC, LL_INFO10, "remaining pinned plug %Ix while leaving segment on allocation",
                                         pinned_plug (loh_oldest_pin())));
                            dprintf (1236, ("queue empty: %d", loh_pinned_plug_que_empty_p()));
                            FATAL_GC_ERROR();
                        }
                        assert (generation_allocation_pointer (gen)>=
                                heap_segment_mem (seg));
                        assert (generation_allocation_pointer (gen)<=
                                heap_segment_committed (seg));
                        heap_segment_plan_allocated (seg) = generation_allocation_pointer (gen);

                        if (next_seg)
                        {
                            // for LOH do we want to try starting from the first LOH every time though?
                            generation_allocation_segment (gen) = next_seg;
                            generation_allocation_pointer (gen) = heap_segment_mem (next_seg);
                            generation_allocation_limit (gen) = generation_allocation_pointer (gen);

                            dprintf (1235, ("n: p: %Ix, l: %Ix (%Id)",
                                generation_allocation_pointer (gen),
                                generation_allocation_limit (gen),
                                (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));
                        }
                        else
                        {
                            dprintf (1, ("We ran out of space compacting, shouldn't happen"));
                            FATAL_GC_ERROR();
                        }
                    }
                }
            }
            loh_set_allocator_next_pin();

            dprintf (1235, ("r: p: %Ix, l: %Ix (%Id)",
                generation_allocation_pointer (gen),
                generation_allocation_limit (gen),
                (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));

            goto retry;
        }
    }

    {
        assert (generation_allocation_pointer (gen)>=
                heap_segment_mem (generation_allocation_segment (gen)));
        uint8_t* result = generation_allocation_pointer (gen);
        size_t loh_pad = AlignQword (loh_padding_obj_size);

        generation_allocation_pointer (gen) += size + loh_pad;
        assert (generation_allocation_pointer (gen) <= generation_allocation_limit (gen));

        dprintf (1235, ("p: %Ix, l: %Ix (%Id)",
            generation_allocation_pointer (gen),
            generation_allocation_limit (gen),
            (generation_allocation_limit (gen) - generation_allocation_pointer (gen))));

        assert (result + loh_pad);
        return result + loh_pad;
    }
}

BOOL gc_heap::loh_compaction_requested()
{
    // If hard limit is specified GC will automatically decide if LOH needs to be compacted.
    return (loh_compaction_always_p || (loh_compaction_mode != loh_compaction_default));
}

inline
void gc_heap::check_loh_compact_mode (BOOL all_heaps_compacted_p)
{
    if (settings.loh_compaction && (loh_compaction_mode == loh_compaction_once))
    {
        if (all_heaps_compacted_p)
        {
            // If the compaction mode says to compact once and we are going to compact LOH,
            // we need to revert it back to no compaction.
            loh_compaction_mode = loh_compaction_default;
        }
    }
}

BOOL gc_heap::plan_loh()
{
    if (!loh_pinned_queue)
    {
        loh_pinned_queue = new (nothrow) (mark [LOH_PIN_QUEUE_LENGTH]);
        if (!loh_pinned_queue)
        {
            dprintf (1, ("Cannot allocate the LOH pinned queue (%Id bytes), no compaction",
                         LOH_PIN_QUEUE_LENGTH * sizeof (mark)));
            return FALSE;
        }

        loh_pinned_queue_length = LOH_PIN_QUEUE_LENGTH;
    }

    if (heap_number == 0)
        loh_pinned_queue_decay = LOH_PIN_DECAY;

    loh_pinned_queue_tos = 0;
    loh_pinned_queue_bos = 0;

    generation* gen        = large_object_generation;
    heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen));
    PREFIX_ASSUME(start_seg != NULL);
    heap_segment* seg      = start_seg;
    uint8_t* o             = generation_allocation_start (gen);

    dprintf (1235, ("before GC LOH size: %Id, free list: %Id, free obj: %Id\n",
        generation_size (max_generation + 1),
        generation_free_list_space (gen),
        generation_free_obj_space (gen)));

    while (seg)
    {
        heap_segment_plan_allocated (seg) = heap_segment_mem (seg);
        seg = heap_segment_next (seg);
    }

    seg = start_seg;

    //Skip the generation gap object
    o = o + AlignQword (size (o));
    // We don't need to ever realloc gen3 start so don't touch it.
    heap_segment_plan_allocated (seg) = o;
    generation_allocation_pointer (gen) = o;
    generation_allocation_limit (gen) = generation_allocation_pointer (gen);
    generation_allocation_segment (gen) = start_seg;

    uint8_t* free_space_start = o;
    uint8_t* free_space_end = o;
    uint8_t* new_address = 0;

    while (1)
    {
        if (o >= heap_segment_allocated (seg))
        {
            seg = heap_segment_next (seg);
            if (seg == 0)
            {
                break;
            }

            o = heap_segment_mem (seg);
        }

        if (marked (o))
        {
            free_space_end = o;
            size_t size = AlignQword (size (o));
            dprintf (1235, ("%Ix(%Id) M", o, size));

            if (pinned (o))
            {
                // We don't clear the pinned bit yet so we can check in
                // compact phase how big a free object we should allocate
                // in front of the pinned object. We use the reloc address
                // field to store this.
                if (!loh_enque_pinned_plug (o, size))
                {
                    return FALSE;
                }
                new_address = o;
            }
            else
            {
                new_address = loh_allocate_in_condemned (o, size);
            }

            loh_set_node_relocation_distance (o, (new_address - o));
            dprintf (1235, ("lobj %Ix-%Ix -> %Ix-%Ix (%Id)", o, (o + size), new_address, (new_address + size), (new_address - o)));

            o = o + size;
            free_space_start = o;
            if (o < heap_segment_allocated (seg))
            {
                assert (!marked (o));
            }
        }
        else
        {
            while (o < heap_segment_allocated (seg) && !marked (o))
            {
                dprintf (1235, ("%Ix(%Id) F (%d)", o, AlignQword (size (o)), ((method_table (o) == g_gc_pFreeObjectMethodTable) ? 1 : 0)));
                o = o + AlignQword (size (o));
            }
        }
    }

    while (!loh_pinned_plug_que_empty_p())
    {
        mark* m = loh_pinned_plug_of (loh_deque_pinned_plug());
        size_t len = pinned_len (m);
        uint8_t* plug = pinned_plug (m);

        // detect pinned block in different segment (later) than
        // allocation segment
        heap_segment* nseg = heap_segment_rw (generation_allocation_segment (gen));

        while ((plug < generation_allocation_pointer (gen)) ||
               (plug >= heap_segment_allocated (nseg)))
        {
            assert ((plug < heap_segment_mem (nseg)) ||
                    (plug > heap_segment_reserved (nseg)));
            //adjust the end of the segment to be the end of the plug
            assert (generation_allocation_pointer (gen)>=
                    heap_segment_mem (nseg));
            assert (generation_allocation_pointer (gen)<=
                    heap_segment_committed (nseg));

            heap_segment_plan_allocated (nseg) =
                generation_allocation_pointer (gen);
            //switch allocation segment
            nseg = heap_segment_next_rw (nseg);
            generation_allocation_segment (gen) = nseg;
            //reset the allocation pointer and limits
            generation_allocation_pointer (gen) =
                heap_segment_mem (nseg);
        }

        dprintf (1235, ("SP: %Ix->%Ix(%Id)", generation_allocation_pointer (gen), plug, plug - generation_allocation_pointer (gen)));
        pinned_len (m) = plug - generation_allocation_pointer (gen);
        generation_allocation_pointer (gen) = plug + len;
    }

    heap_segment_plan_allocated (generation_allocation_segment (gen)) = generation_allocation_pointer (gen);
    generation_allocation_pointer (gen) = 0;
    generation_allocation_limit (gen) = 0;

    return TRUE;
}

void gc_heap::compact_loh()
{
    assert (loh_compaction_requested() || heap_hard_limit);

    generation* gen        = large_object_generation;
    heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen));
    PREFIX_ASSUME(start_seg != NULL);
    heap_segment* seg      = start_seg;
    heap_segment* prev_seg = 0;
    uint8_t* o             = generation_allocation_start (gen);

    //Skip the generation gap object
    o = o + AlignQword (size (o));
    // We don't need to ever realloc gen3 start so don't touch it.
    uint8_t* free_space_start = o;
    uint8_t* free_space_end = o;
    generation_allocator (gen)->clear();
    generation_free_list_space (gen) = 0;
    generation_free_obj_space (gen) = 0;

    loh_pinned_queue_bos = 0;

    while (1)
    {
        if (o >= heap_segment_allocated (seg))
        {
            heap_segment* next_seg = heap_segment_next (seg);

            if ((heap_segment_plan_allocated (seg) == heap_segment_mem (seg)) &&
                (seg != start_seg) && !heap_segment_read_only_p (seg))
            {
                dprintf (3, ("Preparing empty large segment %Ix", (size_t)seg));
                assert (prev_seg);
                heap_segment_next (prev_seg) = next_seg;
                heap_segment_next (seg) = freeable_large_heap_segment;
                freeable_large_heap_segment = seg;
            }
            else
            {
                if (!heap_segment_read_only_p (seg))
                {
                    // We grew the segment to accommodate allocations.
                    if (heap_segment_plan_allocated (seg) > heap_segment_allocated (seg))
                    {
                        if ((heap_segment_plan_allocated (seg) - plug_skew)  > heap_segment_used (seg))
                        {
                            heap_segment_used (seg) = heap_segment_plan_allocated (seg) - plug_skew;
                        }
                    }

                    heap_segment_allocated (seg) = heap_segment_plan_allocated (seg);
                    dprintf (3, ("Trimming seg to %Ix[", heap_segment_allocated (seg)));
                    decommit_heap_segment_pages (seg, 0);
                    dprintf (1236, ("CLOH: seg: %Ix, alloc: %Ix, used: %Ix, committed: %Ix",
                        seg,
                        heap_segment_allocated (seg),
                        heap_segment_used (seg),
                        heap_segment_committed (seg)));
                    //heap_segment_used (seg) = heap_segment_allocated (seg) - plug_skew;
                    dprintf (1236, ("CLOH: used is set to %Ix", heap_segment_used (seg)));
                }
                prev_seg = seg;
            }

            seg = next_seg;
            if (seg == 0)
                break;
            else
            {
                o = heap_segment_mem (seg);
            }
        }

        if (marked (o))
        {
            free_space_end = o;
            size_t size = AlignQword (size (o));

            size_t loh_pad;
            uint8_t* reloc = o;
            clear_marked (o);

            if (pinned (o))
            {
                // We are relying on the fact the pinned objects are always looked at in the same order
                // in plan phase and in compact phase.
                mark* m = loh_pinned_plug_of (loh_deque_pinned_plug());
                uint8_t* plug = pinned_plug (m);
                assert (plug == o);

                loh_pad = pinned_len (m);
                clear_pinned (o);
            }
            else
            {
                loh_pad = AlignQword (loh_padding_obj_size);

                reloc += loh_node_relocation_distance (o);
                gcmemcopy (reloc, o, size, TRUE);
            }

            thread_gap ((reloc - loh_pad), loh_pad, gen);

            o = o + size;
            free_space_start = o;
            if (o < heap_segment_allocated (seg))
            {
                assert (!marked (o));
            }
        }
        else
        {
            while (o < heap_segment_allocated (seg) && !marked (o))
            {
                o = o + AlignQword (size (o));
            }
        }
    }

    assert (loh_pinned_plug_que_empty_p());

    dprintf (1235, ("after GC LOH size: %Id, free list: %Id, free obj: %Id\n\n",
        generation_size (max_generation + 1),
        generation_free_list_space (gen),
        generation_free_obj_space (gen)));
}

void gc_heap::relocate_in_loh_compact()
{
    generation* gen        = large_object_generation;
    heap_segment* seg      = heap_segment_rw (generation_start_segment (gen));
    uint8_t* o             = generation_allocation_start (gen);

    //Skip the generation gap object
    o = o + AlignQword (size (o));

    relocate_args args;
    args.low = gc_low;
    args.high = gc_high;
    args.last_plug = 0;

    while (1)
    {
        if (o >= heap_segment_allocated (seg))
        {
            seg = heap_segment_next (seg);
            if (seg == 0)
            {
                break;
            }

            o = heap_segment_mem (seg);
        }

        if (marked (o))
        {
            size_t size = AlignQword (size (o));

            check_class_object_demotion (o);
            if (contain_pointers (o))
            {
                go_through_object_nostart (method_table (o), o, size(o), pval,
                {
                    reloc_survivor_helper (pval);
                });
            }

            o = o + size;
            if (o < heap_segment_allocated (seg))
            {
                assert (!marked (o));
            }
        }
        else
        {
            while (o < heap_segment_allocated (seg) && !marked (o))
            {
                o = o + AlignQword (size (o));
            }
        }
    }

    dprintf (1235, ("after GC LOH size: %Id, free list: %Id, free obj: %Id\n\n",
        generation_size (max_generation + 1),
        generation_free_list_space (gen),
        generation_free_obj_space (gen)));
}

void gc_heap::walk_relocation_for_loh (void* profiling_context, record_surv_fn fn)
{
    generation* gen        = large_object_generation;
    heap_segment* seg      = heap_segment_rw (generation_start_segment (gen));
    uint8_t* o             = generation_allocation_start (gen);

    //Skip the generation gap object
    o = o + AlignQword (size (o));

    while (1)
    {
        if (o >= heap_segment_allocated (seg))
        {
            seg = heap_segment_next (seg);
            if (seg == 0)
            {
                break;
            }

            o = heap_segment_mem (seg);
        }

        if (marked (o))
        {
            size_t size = AlignQword (size (o));

            ptrdiff_t reloc = loh_node_relocation_distance (o);

            STRESS_LOG_PLUG_MOVE(o, (o + size), -reloc);

            fn (o, (o + size), reloc, profiling_context, !!settings.compaction, false);

            o = o + size;
            if (o < heap_segment_allocated (seg))
            {
                assert (!marked (o));
            }
        }
        else
        {
            while (o < heap_segment_allocated (seg) && !marked (o))
            {
                o = o + AlignQword (size (o));
            }
        }
    }
}

BOOL gc_heap::loh_object_p (uint8_t* o)
{
#ifdef MULTIPLE_HEAPS
    gc_heap* hp = gc_heap::g_heaps [0];
    int brick_entry = hp->brick_table[hp->brick_of (o)];
#else //MULTIPLE_HEAPS
    int brick_entry = brick_table[brick_of (o)];
#endif //MULTIPLE_HEAPS

    return (brick_entry == 0);
}
#endif //FEATURE_LOH_COMPACTION

void gc_heap::convert_to_pinned_plug (BOOL& last_npinned_plug_p,
                                      BOOL& last_pinned_plug_p,
                                      BOOL& pinned_plug_p,
                                      size_t ps,
                                      size_t& artificial_pinned_size)
{
    last_npinned_plug_p = FALSE;
    last_pinned_plug_p = TRUE;
    pinned_plug_p = TRUE;
    artificial_pinned_size = ps;
}

// Because we have the artificial pinning, we can't guarantee that pinned and npinned
// plugs are always interleaved.
void gc_heap::store_plug_gap_info (uint8_t* plug_start,
                                   uint8_t* plug_end,
                                   BOOL& last_npinned_plug_p,
                                   BOOL& last_pinned_plug_p,
                                   uint8_t*& last_pinned_plug,
                                   BOOL& pinned_plug_p,
                                   uint8_t* last_object_in_last_plug,
                                   BOOL& merge_with_last_pin_p,
                                   // this is only for verification purpose
                                   size_t last_plug_len)
{
    UNREFERENCED_PARAMETER(last_plug_len);

    if (!last_npinned_plug_p && !last_pinned_plug_p)
    {
        //dprintf (3, ("last full plug end: %Ix, full plug start: %Ix", plug_end, plug_start));
        dprintf (3, ("Free: %Ix", (plug_start - plug_end)));
        assert ((plug_start == plug_end) || ((size_t)(plug_start - plug_end) >= Align (min_obj_size)));
        set_gap_size (plug_start, plug_start - plug_end);
    }

    if (pinned (plug_start))
    {
        BOOL save_pre_plug_info_p = FALSE;

        if (last_npinned_plug_p || last_pinned_plug_p)
        {
            //if (last_plug_len == Align (min_obj_size))
            //{
            //    dprintf (3, ("debugging only - last npinned plug is min, check to see if it's correct"));
            //    GCToOSInterface::DebugBreak();
            //}
            save_pre_plug_info_p = TRUE;
        }

        pinned_plug_p = TRUE;
        last_npinned_plug_p = FALSE;

        if (last_pinned_plug_p)
        {
            dprintf (3, ("last plug %Ix was also pinned, should merge", last_pinned_plug));
            merge_with_last_pin_p = TRUE;
        }
        else
        {
            last_pinned_plug_p = TRUE;
            last_pinned_plug = plug_start;

            enque_pinned_plug (last_pinned_plug, save_pre_plug_info_p, last_object_in_last_plug);

            if (save_pre_plug_info_p)
            {
                set_gap_size (plug_start, sizeof (gap_reloc_pair));
            }
        }
    }
    else
    {
        if (last_pinned_plug_p)
        {
            //if (Align (last_plug_len) < min_pre_pin_obj_size)
            //{
            //    dprintf (3, ("debugging only - last pinned plug is min, check to see if it's correct"));
            //    GCToOSInterface::DebugBreak();
            //}

            save_post_plug_info (last_pinned_plug, last_object_in_last_plug, plug_start);
            set_gap_size (plug_start, sizeof (gap_reloc_pair));

            verify_pins_with_post_plug_info("after saving post plug info");
        }
        last_npinned_plug_p = TRUE;
        last_pinned_plug_p = FALSE;
    }
}

void gc_heap::record_interesting_data_point (interesting_data_point idp)
{
#ifdef GC_CONFIG_DRIVEN
    (interesting_data_per_gc[idp])++;
#else
    UNREFERENCED_PARAMETER(idp);
#endif //GC_CONFIG_DRIVEN
}

#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
#endif //_PREFAST_
void gc_heap::plan_phase (int condemned_gen_number)
{
    size_t old_gen2_allocated = 0;
    size_t old_gen2_size = 0;

    if (condemned_gen_number == (max_generation - 1))
    {
        old_gen2_allocated = generation_free_list_allocated (generation_of (max_generation));
        old_gen2_size = generation_size (max_generation);
    }

    assert (settings.concurrent == FALSE);

    // %type%  category = quote (plan);
#ifdef TIME_GC
    unsigned start;
    unsigned finish;
    start = GetCycleCount32();
#endif //TIME_GC

    dprintf (2,("---- Plan Phase ---- Condemned generation %d, promotion: %d",
                condemned_gen_number, settings.promotion ? 1 : 0));

    generation*  condemned_gen1 = generation_of (condemned_gen_number);

#ifdef MARK_LIST
    BOOL use_mark_list = FALSE;
    uint8_t** mark_list_next = &mark_list[0];
#ifdef GC_CONFIG_DRIVEN
    dprintf (3, ("total number of marked objects: %Id (%Id)",
                 (mark_list_index - &mark_list[0]), ((mark_list_end - &mark_list[0]))));

    if (mark_list_index >= (mark_list_end + 1))
        mark_list_index = mark_list_end + 1;
#else
    dprintf (3, ("mark_list length: %Id",
                 (mark_list_index - &mark_list[0])));
#endif //GC_CONFIG_DRIVEN

    if ((condemned_gen_number < max_generation) &&
        (mark_list_index <= mark_list_end)
#ifdef BACKGROUND_GC
        && (!recursive_gc_sync::background_running_p())
#endif //BACKGROUND_GC
        )
    {
#ifndef MULTIPLE_HEAPS
        _sort (&mark_list[0], mark_list_index-1, 0);
        //printf ("using mark list at GC #%d", dd_collection_count (dynamic_data_of (0)));
        //verify_qsort_array (&mark_list[0], mark_list_index-1);
#endif //!MULTIPLE_HEAPS
        use_mark_list = TRUE;
        get_gc_data_per_heap()->set_mechanism_bit (gc_mark_list_bit);
    }
    else
    {
        dprintf (3, ("mark_list not used"));
    }

#endif //MARK_LIST

#ifdef FEATURE_BASICFREEZE
    if ((generation_start_segment (condemned_gen1) != ephemeral_heap_segment) &&
        ro_segments_in_range)
    {
        sweep_ro_segments (generation_start_segment (condemned_gen1));
    }
#endif // FEATURE_BASICFREEZE

#ifndef MULTIPLE_HEAPS
    if (shigh != (uint8_t*)0)
    {
        heap_segment* seg = heap_segment_rw (generation_start_segment (condemned_gen1));

        PREFIX_ASSUME(seg != NULL);

        heap_segment* fseg = seg;
        do
        {
            if (slow > heap_segment_mem (seg) &&
                slow < heap_segment_reserved (seg))
            {
                if (seg == fseg)
                {
                    uint8_t* o = generation_allocation_start (condemned_gen1) +
                        Align (size (generation_allocation_start (condemned_gen1)));
                    if (slow > o)
                    {
                        assert ((slow - o) >= (int)Align (min_obj_size));
#ifdef BACKGROUND_GC
                        if (current_c_gc_state == c_gc_state_marking)
                        {
                            bgc_clear_batch_mark_array_bits (o, slow);
                        }
#endif //BACKGROUND_GC
                        make_unused_array (o, slow - o);
                    }
                }
                else
                {
                    assert (condemned_gen_number == max_generation);
                    make_unused_array (heap_segment_mem (seg),
                                       slow - heap_segment_mem (seg));
                }
            }
            if (in_range_for_segment (shigh, seg))
            {
#ifdef BACKGROUND_GC
                if (current_c_gc_state == c_gc_state_marking)
                {
                    bgc_clear_batch_mark_array_bits ((shigh + Align (size (shigh))), heap_segment_allocated (seg));
                }
#endif //BACKGROUND_GC
                heap_segment_allocated (seg) = shigh + Align (size (shigh));
            }
            // test if the segment is in the range of [slow, shigh]
            if (!((heap_segment_reserved (seg) >= slow) &&
                  (heap_segment_mem (seg) <= shigh)))
            {
                // shorten it to minimum
                heap_segment_allocated (seg) =  heap_segment_mem (seg);
            }
            seg = heap_segment_next_rw (seg);
        } while (seg);
    }
    else
    {
        heap_segment* seg = heap_segment_rw (generation_start_segment (condemned_gen1));

        PREFIX_ASSUME(seg != NULL);

        heap_segment* sseg = seg;
        do
        {
            // shorten it to minimum
            if (seg == sseg)
            {
                // no survivors make all generations look empty
                uint8_t* o = generation_allocation_start (condemned_gen1) +
                    Align (size (generation_allocation_start (condemned_gen1)));
#ifdef BACKGROUND_GC
                if (current_c_gc_state == c_gc_state_marking)
                {
                    bgc_clear_batch_mark_array_bits (o, heap_segment_allocated (seg));
                }
#endif //BACKGROUND_GC
                heap_segment_allocated (seg) = o;
            }
            else
            {
                assert (condemned_gen_number == max_generation);
#ifdef BACKGROUND_GC
                if (current_c_gc_state == c_gc_state_marking)
                {
                    bgc_clear_batch_mark_array_bits (heap_segment_mem (seg), heap_segment_allocated (seg));
                }
#endif //BACKGROUND_GC
                heap_segment_allocated (seg) =  heap_segment_mem (seg);
            }
            seg = heap_segment_next_rw (seg);
        } while (seg);
    }

#endif //MULTIPLE_HEAPS

    heap_segment*  seg1 = heap_segment_rw (generation_start_segment (condemned_gen1));

    PREFIX_ASSUME(seg1 != NULL);

    uint8_t*  end = heap_segment_allocated (seg1);
    uint8_t*  first_condemned_address = generation_allocation_start (condemned_gen1);
    uint8_t*  x = first_condemned_address;

    assert (!marked (x));
    uint8_t*  plug_end = x;
    uint8_t*  tree = 0;
    size_t  sequence_number = 0;
    uint8_t*  last_node = 0;
    size_t  current_brick = brick_of (x);
    BOOL  allocate_in_condemned = ((condemned_gen_number == max_generation)||
                                   (settings.promotion == FALSE));
    int  active_old_gen_number = condemned_gen_number;
    int  active_new_gen_number = (allocate_in_condemned ? condemned_gen_number:
                                  (1 + condemned_gen_number));
    generation*  older_gen = 0;
    generation* consing_gen = condemned_gen1;
    alloc_list  r_free_list [MAX_BUCKET_COUNT];

    size_t r_free_list_space = 0;
    size_t r_free_obj_space = 0;
    size_t r_older_gen_free_list_allocated = 0;
    size_t r_older_gen_condemned_allocated = 0;
    size_t r_older_gen_end_seg_allocated = 0;
    uint8_t*  r_allocation_pointer = 0;
    uint8_t*  r_allocation_limit = 0;
    uint8_t* r_allocation_start_region = 0;
    heap_segment*  r_allocation_segment = 0;
#ifdef FREE_USAGE_STATS
    size_t r_older_gen_free_space[NUM_GEN_POWER2];
#endif //FREE_USAGE_STATS

    if ((condemned_gen_number < max_generation))
    {
        older_gen = generation_of (min (max_generation, 1 + condemned_gen_number));
        generation_allocator (older_gen)->copy_to_alloc_list (r_free_list);

        r_free_list_space = generation_free_list_space (older_gen);
        r_free_obj_space = generation_free_obj_space (older_gen);
#ifdef FREE_USAGE_STATS
        memcpy (r_older_gen_free_space, older_gen->gen_free_spaces, sizeof (r_older_gen_free_space));
#endif //FREE_USAGE_STATS
        generation_allocate_end_seg_p (older_gen) = FALSE;
        r_older_gen_free_list_allocated = generation_free_list_allocated (older_gen);
        r_older_gen_condemned_allocated = generation_condemned_allocated (older_gen);
        r_older_gen_end_seg_allocated = generation_end_seg_allocated (older_gen);
        r_allocation_limit = generation_allocation_limit (older_gen);
        r_allocation_pointer = generation_allocation_pointer (older_gen);
        r_allocation_start_region = generation_allocation_context_start_region (older_gen);
        r_allocation_segment = generation_allocation_segment (older_gen);
        heap_segment* start_seg = heap_segment_rw (generation_start_segment (older_gen));

        PREFIX_ASSUME(start_seg != NULL);

        if (start_seg != ephemeral_heap_segment)
        {
            assert (condemned_gen_number == (max_generation - 1));
            while (start_seg && (start_seg != ephemeral_heap_segment))
            {
                assert (heap_segment_allocated (start_seg) >=
                        heap_segment_mem (start_seg));
                assert (heap_segment_allocated (start_seg) <=
                        heap_segment_reserved (start_seg));
                heap_segment_plan_allocated (start_seg) =
                    heap_segment_allocated (start_seg);
                start_seg = heap_segment_next_rw (start_seg);
            }
        }
    }

    //reset all of the segment allocated sizes
    {
        heap_segment*  seg2 = heap_segment_rw (generation_start_segment (condemned_gen1));

        PREFIX_ASSUME(seg2 != NULL);

        while (seg2)
        {
            heap_segment_plan_allocated (seg2) =
                heap_segment_mem (seg2);
            seg2 = heap_segment_next_rw (seg2);
        }
    }
    int  condemned_gn = condemned_gen_number;

    int bottom_gen = 0;
    init_free_and_plug();

    while (condemned_gn >= bottom_gen)
    {
        generation*  condemned_gen2 = generation_of (condemned_gn);
        generation_allocator (condemned_gen2)->clear();
        generation_free_list_space (condemned_gen2) = 0;
        generation_free_obj_space (condemned_gen2) = 0;
        generation_allocation_size (condemned_gen2) = 0;
        generation_condemned_allocated (condemned_gen2) = 0;
        generation_sweep_allocated (condemned_gen2) = 0;
        generation_pinned_allocated (condemned_gen2) = 0;
        generation_free_list_allocated(condemned_gen2) = 0;
        generation_end_seg_allocated (condemned_gen2) = 0;
        generation_pinned_allocation_sweep_size (condemned_gen2) = 0;
        generation_pinned_allocation_compact_size (condemned_gen2) = 0;
#ifdef FREE_USAGE_STATS
        generation_pinned_free_obj_space (condemned_gen2) = 0;
        generation_allocated_in_pinned_free (condemned_gen2) = 0;
        generation_allocated_since_last_pin (condemned_gen2) = 0;
#endif //FREE_USAGE_STATS
        generation_plan_allocation_start (condemned_gen2) = 0;
        generation_allocation_segment (condemned_gen2) =
            heap_segment_rw (generation_start_segment (condemned_gen2));

        PREFIX_ASSUME(generation_allocation_segment(condemned_gen2) != NULL);

        if (generation_start_segment (condemned_gen2) != ephemeral_heap_segment)
        {
            generation_allocation_pointer (condemned_gen2) =
                heap_segment_mem (generation_allocation_segment (condemned_gen2));
        }
        else
        {
            generation_allocation_pointer (condemned_gen2) = generation_allocation_start (condemned_gen2);
        }

        generation_allocation_limit (condemned_gen2) = generation_allocation_pointer (condemned_gen2);
        generation_allocation_context_start_region (condemned_gen2) = generation_allocation_pointer (condemned_gen2);

        condemned_gn--;
    }

    BOOL allocate_first_generation_start = FALSE;

    if (allocate_in_condemned)
    {
        allocate_first_generation_start = TRUE;
    }

    dprintf(3,( " From %Ix to %Ix", (size_t)x, (size_t)end));

    demotion_low = MAX_PTR;
    demotion_high = heap_segment_allocated (ephemeral_heap_segment);

    // If we are doing a gen1 only because of cards, it means we should not demote any pinned plugs
    // from gen1. They should get promoted to gen2.
    demote_gen1_p = !(settings.promotion &&
                      (settings.condemned_generation == (max_generation - 1)) &&
                      gen_to_condemn_reasons.is_only_condition (gen_low_card_p));

    total_ephemeral_size = 0;

    print_free_and_plug ("BP");

    for (int gen_idx = 0; gen_idx <= max_generation; gen_idx++)
    {
        generation* temp_gen = generation_of (gen_idx);

        dprintf (2, ("gen%d start %Ix, plan start %Ix",
            gen_idx,
            generation_allocation_start (temp_gen),
            generation_plan_allocation_start (temp_gen)));
    }

    BOOL fire_pinned_plug_events_p = EVENT_ENABLED(PinPlugAtGCTime);
    size_t last_plug_len = 0;

    while (1)
    {
        if (x >= end)
        {
            assert (x == end);
            assert (heap_segment_allocated (seg1) == end);
            heap_segment_allocated (seg1) = plug_end;

            current_brick = update_brick_table (tree, current_brick, x, plug_end);
            dprintf (3, ("end of seg: new tree, sequence# 0"));
            sequence_number = 0;
            tree = 0;

            if (heap_segment_next_rw (seg1))
            {
                seg1 = heap_segment_next_rw (seg1);
                end = heap_segment_allocated (seg1);
                plug_end = x = heap_segment_mem (seg1);
                current_brick = brick_of (x);
                dprintf(3,( " From %Ix to %Ix", (size_t)x, (size_t)end));
                continue;
            }
            else
            {
                break;
            }
        }

        BOOL last_npinned_plug_p = FALSE;
        BOOL last_pinned_plug_p = FALSE;

        // last_pinned_plug is the beginning of the last pinned plug. If we merge a plug into a pinned
        // plug we do not change the value of last_pinned_plug. This happens with artificially pinned plugs -
        // it can be merged with a previous pinned plug and a pinned plug after it can be merged with it.
        uint8_t* last_pinned_plug = 0;
        size_t num_pinned_plugs_in_plug = 0;

        uint8_t* last_object_in_plug = 0;

        while ((x < end) && marked (x))
        {
            uint8_t*  plug_start = x;
            uint8_t*  saved_plug_end = plug_end;
            BOOL   pinned_plug_p = FALSE;
            BOOL   npin_before_pin_p = FALSE;
            BOOL   saved_last_npinned_plug_p = last_npinned_plug_p;
            uint8_t*  saved_last_object_in_plug = last_object_in_plug;
            BOOL   merge_with_last_pin_p = FALSE;

            size_t added_pinning_size = 0;
            size_t artificial_pinned_size = 0;

            store_plug_gap_info (plug_start, plug_end, last_npinned_plug_p, last_pinned_plug_p,
                                 last_pinned_plug, pinned_plug_p, last_object_in_plug,
                                 merge_with_last_pin_p, last_plug_len);

#ifdef FEATURE_STRUCTALIGN
            int requiredAlignment = ((CObjectHeader*)plug_start)->GetRequiredAlignment();
            size_t alignmentOffset = OBJECT_ALIGNMENT_OFFSET;
#endif // FEATURE_STRUCTALIGN

            {
                uint8_t* xl = x;
                while ((xl < end) && marked (xl) && (pinned (xl) == pinned_plug_p))
                {
                    assert (xl < end);
                    if (pinned(xl))
                    {
                        clear_pinned (xl);
                    }
#ifdef FEATURE_STRUCTALIGN
                    else
                    {
                        int obj_requiredAlignment = ((CObjectHeader*)xl)->GetRequiredAlignment();
                        if (obj_requiredAlignment > requiredAlignment)
                        {
                            requiredAlignment = obj_requiredAlignment;
                            alignmentOffset = xl - plug_start + OBJECT_ALIGNMENT_OFFSET;
                        }
                    }
#endif // FEATURE_STRUCTALIGN

                    clear_marked (xl);

                    dprintf(4, ("+%Ix+", (size_t)xl));
                    assert ((size (xl) > 0));
                    assert ((size (xl) <= loh_size_threshold));

                    last_object_in_plug = xl;

                    xl = xl + Align (size (xl));
                    Prefetch (xl);
                }

                BOOL next_object_marked_p = ((xl < end) && marked (xl));

                if (pinned_plug_p)
                {
                    // If it is pinned we need to extend to the next marked object as we can't use part of
                    // a pinned object to make the artificial gap (unless the last 3 ptr sized words are all
                    // references but for now I am just using the next non pinned object for that).
                    if (next_object_marked_p)
                    {
                        clear_marked (xl);
                        last_object_in_plug = xl;
                        size_t extra_size = Align (size (xl));
                        xl = xl + extra_size;
                        added_pinning_size = extra_size;
                    }
                }
                else
                {
                    if (next_object_marked_p)
                        npin_before_pin_p = TRUE;
                }

                assert (xl <= end);
                x = xl;
            }
            dprintf (3, ( "%Ix[", (size_t)x));
            plug_end = x;
            size_t ps = plug_end - plug_start;
            last_plug_len = ps;
            dprintf (3, ( "%Ix[(%Ix)", (size_t)x, ps));
            uint8_t*  new_address = 0;

            if (!pinned_plug_p)
            {
                if (allocate_in_condemned &&
                    (settings.condemned_generation == max_generation) &&
                    (ps > OS_PAGE_SIZE))
                {
                    ptrdiff_t reloc = plug_start - generation_allocation_pointer (consing_gen);
                    //reloc should >=0 except when we relocate
                    //across segments and the dest seg is higher then the src

                    if ((ps > (8*OS_PAGE_SIZE)) &&
                        (reloc > 0) &&
                        ((size_t)reloc < (ps/16)))
                    {
                        dprintf (3, ("Pinning %Ix; reloc would have been: %Ix",
                                     (size_t)plug_start, reloc));
                        // The last plug couldn't have been a npinned plug or it would have
                        // included this plug.
                        assert (!saved_last_npinned_plug_p);

                        if (last_pinned_plug)
                        {
                            dprintf (3, ("artificially pinned plug merged with last pinned plug"));
                            merge_with_last_pin_p = TRUE;
                        }
                        else
                        {
                            enque_pinned_plug (plug_start, FALSE, 0);
                            last_pinned_plug = plug_start;
                        }

                        convert_to_pinned_plug (last_npinned_plug_p, last_pinned_plug_p, pinned_plug_p,
                                                ps, artificial_pinned_size);
                    }
                }
            }

            if (allocate_first_generation_start)
            {
                allocate_first_generation_start = FALSE;
                plan_generation_start (condemned_gen1, consing_gen, plug_start);
                assert (generation_plan_allocation_start (condemned_gen1));
            }

            if (seg1 == ephemeral_heap_segment)
            {
                process_ephemeral_boundaries (plug_start, active_new_gen_number,
                                              active_old_gen_number,
                                              consing_gen,
                                              allocate_in_condemned);
            }

            dprintf (3, ("adding %Id to gen%d surv", ps, active_old_gen_number));

            dynamic_data* dd_active_old = dynamic_data_of (active_old_gen_number);
            dd_survived_size (dd_active_old) += ps;

            BOOL convert_to_pinned_p = FALSE;

            if (!pinned_plug_p)
            {
#if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN)
                dd_num_npinned_plugs (dd_active_old)++;
#endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN

                add_gen_plug (active_old_gen_number, ps);

                if (allocate_in_condemned)
                {
                    verify_pins_with_post_plug_info("before aic");

                    new_address =
                        allocate_in_condemned_generations (consing_gen,
                                                           ps,
                                                           active_old_gen_number,
#ifdef SHORT_PLUGS
                                                           &convert_to_pinned_p,
                                                           (npin_before_pin_p ? plug_end : 0),
                                                           seg1,
#endif //SHORT_PLUGS
                                                           plug_start REQD_ALIGN_AND_OFFSET_ARG);
                    verify_pins_with_post_plug_info("after aic");
                }
                else
                {
                    new_address = allocate_in_older_generation (older_gen, ps, active_old_gen_number, plug_start REQD_ALIGN_AND_OFFSET_ARG);

                    if (new_address != 0)
                    {
                        if (settings.condemned_generation == (max_generation - 1))
                        {
                            dprintf (3, (" NA: %Ix-%Ix -> %Ix, %Ix (%Ix)",
                                plug_start, plug_end,
                                (size_t)new_address, (size_t)new_address + (plug_end - plug_start),
                                (size_t)(plug_end - plug_start)));
                        }
                    }
                    else
                    {
                        if (generation_allocator(older_gen)->discard_if_no_fit_p())
                        {
                            allocate_in_condemned = TRUE;
                        }

                        new_address = allocate_in_condemned_generations (consing_gen, ps, active_old_gen_number,
#ifdef SHORT_PLUGS
                                                                         &convert_to_pinned_p,
                                                                         (npin_before_pin_p ? plug_end : 0),
                                                                         seg1,
#endif //SHORT_PLUGS
                                                                         plug_start REQD_ALIGN_AND_OFFSET_ARG);
                    }
                }

                if (convert_to_pinned_p)
                {
                    assert (last_npinned_plug_p != FALSE);
                    assert (last_pinned_plug_p == FALSE);
                    convert_to_pinned_plug (last_npinned_plug_p, last_pinned_plug_p, pinned_plug_p,
                                            ps, artificial_pinned_size);
                    enque_pinned_plug (plug_start, FALSE, 0);
                    last_pinned_plug = plug_start;
                }
                else
                {
                    if (!new_address)
                    {
                        //verify that we are at then end of the ephemeral segment
                        assert (generation_allocation_segment (consing_gen) ==
                                ephemeral_heap_segment);
                        //verify that we are near the end
                        assert ((generation_allocation_pointer (consing_gen) + Align (ps)) <
                                heap_segment_allocated (ephemeral_heap_segment));
                        assert ((generation_allocation_pointer (consing_gen) + Align (ps)) >
                                (heap_segment_allocated (ephemeral_heap_segment) + Align (min_obj_size)));
                    }
                    else
                    {
#ifdef SIMPLE_DPRINTF
                        dprintf (3, ("(%Ix)[%Ix->%Ix, NA: [%Ix(%Id), %Ix[: %Ix(%d)",
                            (size_t)(node_gap_size (plug_start)),
                            plug_start, plug_end, (size_t)new_address, (size_t)(plug_start - new_address),
                                (size_t)new_address + ps, ps,
                                (is_plug_padded (plug_start) ? 1 : 0)));
#endif //SIMPLE_DPRINTF

#ifdef SHORT_PLUGS
                        if (is_plug_padded (plug_start))
                        {
                            dprintf (3, ("%Ix was padded", plug_start));
                            dd_padding_size (dd_active_old) += Align (min_obj_size);
                        }
#endif //SHORT_PLUGS
                    }
                }
            }

            if (pinned_plug_p)
            {
                if (fire_pinned_plug_events_p)
                {
                    FIRE_EVENT(PinPlugAtGCTime, plug_start, plug_end,
                               (merge_with_last_pin_p ? 0 : (uint8_t*)node_gap_size (plug_start)));
                }

                if (merge_with_last_pin_p)
                {
                    merge_with_last_pinned_plug (last_pinned_plug, ps);
                }
                else
                {
                    assert (last_pinned_plug == plug_start);
                    set_pinned_info (plug_start, ps, consing_gen);
                }

                new_address = plug_start;

                dprintf (3, ( "(%Ix)PP: [%Ix, %Ix[%Ix](m:%d)",
                            (size_t)(node_gap_size (plug_start)), (size_t)plug_start,
                            (size_t)plug_end, ps,
                            (merge_with_last_pin_p ? 1 : 0)));

                dprintf (3, ("adding %Id to gen%d pinned surv", plug_end - plug_start, active_old_gen_number));
                dd_pinned_survived_size (dd_active_old) += plug_end - plug_start;
                dd_added_pinned_size (dd_active_old) += added_pinning_size;
                dd_artificial_pinned_survived_size (dd_active_old) += artificial_pinned_size;

                if (!demote_gen1_p && (active_old_gen_number == (max_generation - 1)))
                {
                    last_gen1_pin_end = plug_end;
                }
            }

#ifdef _DEBUG
            // detect forward allocation in the same segment
            assert (!((new_address > plug_start) &&
                (new_address < heap_segment_reserved (seg1))));
#endif //_DEBUG

            if (!merge_with_last_pin_p)
            {
                if (current_brick != brick_of (plug_start))
                {
                    current_brick = update_brick_table (tree, current_brick, plug_start, saved_plug_end);
                    sequence_number = 0;
                    tree = 0;
                }

                set_node_relocation_distance (plug_start, (new_address - plug_start));
                if (last_node && (node_relocation_distance (last_node) ==
                                  (node_relocation_distance (plug_start) +
                                   (ptrdiff_t)node_gap_size (plug_start))))
                {
                    //dprintf(3,( " Lb"));
                    dprintf (3, ("%Ix Lb", plug_start));
                    set_node_left (plug_start);
                }
                if (0 == sequence_number)
                {
                    dprintf (2, ("sn: 0, tree is set to %Ix", plug_start));
                    tree = plug_start;
                }

                verify_pins_with_post_plug_info("before insert node");

                tree = insert_node (plug_start, ++sequence_number, tree, last_node);
                dprintf (3, ("tree is %Ix (b: %Ix) after insert_node", tree, brick_of (tree)));
                last_node = plug_start;

#ifdef _DEBUG
                // If we detect if the last plug is pinned plug right before us, we should save this gap info
                if (!pinned_plug_p)
                {
                    if (mark_stack_tos > 0)
                    {
                        mark& m = mark_stack_array[mark_stack_tos - 1];
                        if (m.has_post_plug_info())
                        {
                            uint8_t* post_plug_info_start = m.saved_post_plug_info_start;
                            size_t* current_plug_gap_start = (size_t*)(plug_start - sizeof (plug_and_gap));
                            if ((uint8_t*)current_plug_gap_start == post_plug_info_start)
                            {
                                dprintf (3, ("Ginfo: %Ix, %Ix, %Ix",
                                    *current_plug_gap_start, *(current_plug_gap_start + 1),
                                    *(current_plug_gap_start + 2)));
                                memcpy (&(m.saved_post_plug_debug), current_plug_gap_start, sizeof (gap_reloc_pair));
                            }
                        }
                    }
                }
#endif //_DEBUG

                verify_pins_with_post_plug_info("after insert node");
            }
        }

        if (num_pinned_plugs_in_plug > 1)
        {
            dprintf (3, ("more than %Id pinned plugs in this plug", num_pinned_plugs_in_plug));
        }

        {
#ifdef MARK_LIST
            if (use_mark_list)
            {
               while ((mark_list_next < mark_list_index) &&
                      (*mark_list_next <= x))
               {
                   mark_list_next++;
               }
               if ((mark_list_next < mark_list_index)
#ifdef MULTIPLE_HEAPS
                   && (*mark_list_next < end) //for multiple segments
#endif //MULTIPLE_HEAPS
                   )
                   x = *mark_list_next;
               else
                   x = end;
            }
            else
#endif //MARK_LIST
            {
                uint8_t* xl = x;
#ifdef BACKGROUND_GC
                if (current_c_gc_state == c_gc_state_marking)
                {
                    assert (recursive_gc_sync::background_running_p());
                    while ((xl < end) && !marked (xl))
                    {
                        dprintf (4, ("-%Ix-", (size_t)xl));
                        assert ((size (xl) > 0));
                        background_object_marked (xl, TRUE);
                        xl = xl + Align (size (xl));
                        Prefetch (xl);
                    }
                }
                else
#endif //BACKGROUND_GC
                {
                    while ((xl < end) && !marked (xl))
                    {
                        dprintf (4, ("-%Ix-", (size_t)xl));
                        assert ((size (xl) > 0));
                        xl = xl + Align (size (xl));
                        Prefetch (xl);
                    }
                }
                assert (xl <= end);
                x = xl;
            }
        }
    }

    while (!pinned_plug_que_empty_p())
    {
        if (settings.promotion)
        {
            uint8_t* pplug = pinned_plug (oldest_pin());
            if (in_range_for_segment (pplug, ephemeral_heap_segment))
            {
                consing_gen = ensure_ephemeral_heap_segment (consing_gen);
                //allocate all of the generation gaps
                while (active_new_gen_number > 0)
                {
                    active_new_gen_number--;

                    if (active_new_gen_number == (max_generation - 1))
                    {
                        maxgen_pinned_compact_before_advance = generation_pinned_allocation_compact_size (generation_of (max_generation));
                        if (!demote_gen1_p)
                            advance_pins_for_demotion (consing_gen);
                    }

                    generation* gen = generation_of (active_new_gen_number);
                    plan_generation_start (gen, consing_gen, 0);

                    if (demotion_low == MAX_PTR)
                    {
                        demotion_low = pplug;
                        dprintf (3, ("end plan: dlow->%Ix", demotion_low));
                    }

                    dprintf (2, ("(%d)gen%d plan start: %Ix",
                                  heap_number, active_new_gen_number, (size_t)generation_plan_allocation_start (gen)));
                    assert (generation_plan_allocation_start (gen));
                }
            }
        }

        if (pinned_plug_que_empty_p())
            break;

        size_t  entry = deque_pinned_plug();
        mark*  m = pinned_plug_of (entry);
        uint8_t*  plug = pinned_plug (m);
        size_t  len = pinned_len (m);

        // detect pinned block in different segment (later) than
        // allocation segment
        heap_segment* nseg = heap_segment_rw (generation_allocation_segment (consing_gen));

        while ((plug < generation_allocation_pointer (consing_gen)) ||
               (plug >= heap_segment_allocated (nseg)))
        {
            assert ((plug < heap_segment_mem (nseg)) ||
                    (plug > heap_segment_reserved (nseg)));
            //adjust the end of the segment to be the end of the plug
            assert (generation_allocation_pointer (consing_gen)>=
                    heap_segment_mem (nseg));
            assert (generation_allocation_pointer (consing_gen)<=
                    heap_segment_committed (nseg));

            heap_segment_plan_allocated (nseg) =
                generation_allocation_pointer (consing_gen);
            //switch allocation segment
            nseg = heap_segment_next_rw (nseg);
            generation_allocation_segment (consing_gen) = nseg;
            //reset the allocation pointer and limits
            generation_allocation_pointer (consing_gen) =
                heap_segment_mem (nseg);
        }

        set_new_pin_info (m, generation_allocation_pointer (consing_gen));
        dprintf (2, ("pin %Ix b: %Ix->%Ix", plug, brick_of (plug),
            (size_t)(brick_table[brick_of (plug)])));

        generation_allocation_pointer (consing_gen) = plug + len;
        generation_allocation_limit (consing_gen) =
            generation_allocation_pointer (consing_gen);
        //Add the size of the pinned plug to the right pinned allocations
        //find out which gen this pinned plug came from
        int frgn = object_gennum (plug);
        if ((frgn != (int)max_generation) && settings.promotion)
        {
            generation_pinned_allocation_sweep_size ((generation_of (frgn +1))) += len;
        }

    }

    plan_generation_starts (consing_gen);
    print_free_and_plug ("AP");

    {
#ifdef SIMPLE_DPRINTF
        for (int gen_idx = 0; gen_idx <= max_generation; gen_idx++)
        {
            generation* temp_gen = generation_of (gen_idx);
            dynamic_data* temp_dd = dynamic_data_of (gen_idx);

            int added_pinning_ratio = 0;
            int artificial_pinned_ratio = 0;

            if (dd_pinned_survived_size (temp_dd) != 0)
            {
                added_pinning_ratio = (int)((float)dd_added_pinned_size (temp_dd) * 100 / (float)dd_pinned_survived_size (temp_dd));
                artificial_pinned_ratio = (int)((float)dd_artificial_pinned_survived_size (temp_dd) * 100 / (float)dd_pinned_survived_size (temp_dd));
            }

            size_t padding_size =
#ifdef SHORT_PLUGS
                dd_padding_size (temp_dd);
#else
                0;
#endif //SHORT_PLUGS
            dprintf (1, ("gen%d: %Ix, %Ix(%Id), NON PIN alloc: %Id, pin com: %Id, sweep: %Id, surv: %Id, pinsurv: %Id(%d%% added, %d%% art), np surv: %Id, pad: %Id",
                gen_idx,
                generation_allocation_start (temp_gen),
                generation_plan_allocation_start (temp_gen),
                (size_t)(generation_plan_allocation_start (temp_gen) - generation_allocation_start (temp_gen)),
                generation_allocation_size (temp_gen),
                generation_pinned_allocation_compact_size (temp_gen),
                generation_pinned_allocation_sweep_size (temp_gen),
                dd_survived_size (temp_dd),
                dd_pinned_survived_size (temp_dd),
                added_pinning_ratio,
                artificial_pinned_ratio,
                (dd_survived_size (temp_dd) - dd_pinned_survived_size (temp_dd)),
                padding_size));
        }
#endif //SIMPLE_DPRINTF
    }

    if (settings.condemned_generation == (max_generation - 1 ))
    {
        generation* older_gen = generation_of (settings.condemned_generation + 1);
        size_t rejected_free_space = generation_free_obj_space (older_gen) - r_free_obj_space;
        size_t free_list_allocated = generation_free_list_allocated (older_gen) - r_older_gen_free_list_allocated;
        size_t end_seg_allocated = generation_end_seg_allocated (older_gen) - r_older_gen_end_seg_allocated;
        size_t condemned_allocated = generation_condemned_allocated (older_gen) - r_older_gen_condemned_allocated;

        size_t growth = end_seg_allocated + condemned_allocated;

        if (growth > 0)
        {
            dprintf (1, ("gen2 grew %Id (end seg alloc: %Id, condemned alloc: %Id",
                         growth, end_seg_allocated, condemned_allocated));

            maxgen_size_inc_p = true;
        }
        else
        {
            dprintf (2, ("gen2 didn't grow (end seg alloc: %Id, , condemned alloc: %Id, gen1 c alloc: %Id",
                         end_seg_allocated, condemned_allocated,
                         generation_condemned_allocated (generation_of (max_generation - 1))));
        }

        dprintf (1, ("older gen's free alloc: %Id->%Id, seg alloc: %Id->%Id, condemned alloc: %Id->%Id",
                    r_older_gen_free_list_allocated, generation_free_list_allocated (older_gen),
                    r_older_gen_end_seg_allocated, generation_end_seg_allocated (older_gen),
                    r_older_gen_condemned_allocated, generation_condemned_allocated (older_gen)));

        dprintf (1, ("this GC did %Id free list alloc(%Id bytes free space rejected)",
            free_list_allocated, rejected_free_space));

        maxgen_size_increase* maxgen_size_info = &(get_gc_data_per_heap()->maxgen_size_info);
        maxgen_size_info->free_list_allocated = free_list_allocated;
        maxgen_size_info->free_list_rejected = rejected_free_space;
        maxgen_size_info->end_seg_allocated = end_seg_allocated;
        maxgen_size_info->condemned_allocated = condemned_allocated;
        maxgen_size_info->pinned_allocated = maxgen_pinned_compact_before_advance;
        maxgen_size_info->pinned_allocated_advance = generation_pinned_allocation_compact_size (generation_of (max_generation)) - maxgen_pinned_compact_before_advance;

#ifdef FREE_USAGE_STATS
        int free_list_efficiency = 0;
        if ((free_list_allocated + rejected_free_space) != 0)
            free_list_efficiency = (int)(((float) (free_list_allocated) / (float)(free_list_allocated + rejected_free_space)) * (float)100);

        int running_free_list_efficiency = (int)(generation_allocator_efficiency(older_gen)*100);

        dprintf (1, ("gen%d free list alloc effi: %d%%, current effi: %d%%",
                    older_gen->gen_num,
                    free_list_efficiency, running_free_list_efficiency));

        dprintf (1, ("gen2 free list change"));
        for (int j = 0; j < NUM_GEN_POWER2; j++)
        {
            dprintf (1, ("[h%d][#%Id]: 2^%d: F: %Id->%Id(%Id), P: %Id",
                heap_number,
                settings.gc_index,
                (j + 10), r_older_gen_free_space[j], older_gen->gen_free_spaces[j],
                (ptrdiff_t)(r_older_gen_free_space[j] - older_gen->gen_free_spaces[j]),
                (generation_of(max_generation - 1))->gen_plugs[j]));
        }
#endif //FREE_USAGE_STATS
    }

    size_t fragmentation =
        generation_fragmentation (generation_of (condemned_gen_number),
                                  consing_gen,
                                  heap_segment_allocated (ephemeral_heap_segment));

    dprintf (2,("Fragmentation: %Id", fragmentation));
    dprintf (2,("---- End of Plan phase ----"));

#ifdef TIME_GC
    finish = GetCycleCount32();
    plan_time = finish - start;
#endif //TIME_GC

    // We may update write barrier code.  We assume here EE has been suspended if we are on a GC thread.
    assert(IsGCInProgress());

    BOOL should_expand = FALSE;
    BOOL should_compact= FALSE;
    ephemeral_promotion = FALSE;

#ifdef BIT64
    if ((!settings.concurrent) &&
        !provisional_mode_triggered &&
        ((condemned_gen_number < max_generation) &&
         ((settings.gen0_reduction_count > 0) || (settings.entry_memory_load >= 95))))
    {
        dprintf (GTC_LOG, ("gen0 reduction count is %d, condemning %d, mem load %d",
                     settings.gen0_reduction_count,
                     condemned_gen_number,
                     settings.entry_memory_load));
        should_compact = TRUE;

        get_gc_data_per_heap()->set_mechanism (gc_heap_compact,
            ((settings.gen0_reduction_count > 0) ? compact_fragmented_gen0 : compact_high_mem_load));

        if ((condemned_gen_number >= (max_generation - 1)) &&
            dt_low_ephemeral_space_p (tuning_deciding_expansion))
        {
            dprintf (GTC_LOG, ("Not enough space for all ephemeral generations with compaction"));
            should_expand = TRUE;
        }
    }
    else
    {
#endif // BIT64
        should_compact = decide_on_compacting (condemned_gen_number, fragmentation, should_expand);
#ifdef BIT64
    }
#endif // BIT64

#ifdef FEATURE_LOH_COMPACTION
    loh_compacted_p = FALSE;
#endif //FEATURE_LOH_COMPACTION

    if (condemned_gen_number == max_generation)
    {
#ifdef FEATURE_LOH_COMPACTION
        if (settings.loh_compaction)
        {
            if (plan_loh())
            {
                should_compact = TRUE;
                get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_loh_forced);
                loh_compacted_p = TRUE;
            }
        }
        else
        {
            if ((heap_number == 0) && (loh_pinned_queue))
            {
                loh_pinned_queue_decay--;

                if (!loh_pinned_queue_decay)
                {
                    delete loh_pinned_queue;
                    loh_pinned_queue = 0;
                }
            }
        }

        if (!loh_compacted_p)
#endif //FEATURE_LOH_COMPACTION
        {
            GCToEEInterface::DiagWalkLOHSurvivors(__this);
            sweep_large_objects();
        }
    }
    else
    {
        settings.loh_compaction = FALSE;
    }

#ifdef MULTIPLE_HEAPS

    new_heap_segment = NULL;

    if (should_compact && should_expand)
        gc_policy = policy_expand;
    else if (should_compact)
        gc_policy = policy_compact;
    else
        gc_policy = policy_sweep;

    //vote for result of should_compact
    dprintf (3, ("Joining for compaction decision"));
    gc_t_join.join(this, gc_join_decide_on_compaction);
    if (gc_t_join.joined())
    {
        //safe place to delete large heap segments
        if (condemned_gen_number == max_generation)
        {
            for (int i = 0; i < n_heaps; i++)
            {
                g_heaps [i]->rearrange_large_heap_segments ();
            }
        }

        if (maxgen_size_inc_p && provisional_mode_triggered)
        {
            pm_trigger_full_gc = true;
            dprintf (GTC_LOG, ("in PM: maxgen size inc, doing a sweeping gen1 and trigger NGC2"));
        }
        else
        {
            settings.demotion = FALSE;
            int pol_max = policy_sweep;
#ifdef GC_CONFIG_DRIVEN
            BOOL is_compaction_mandatory = FALSE;
#endif //GC_CONFIG_DRIVEN

            int i;
            for (i = 0; i < n_heaps; i++)
            {
                if (pol_max < g_heaps[i]->gc_policy)
                    pol_max = policy_compact;
                // set the demotion flag is any of the heap has demotion
                if (g_heaps[i]->demotion_high >= g_heaps[i]->demotion_low)
                {
                    (g_heaps[i]->get_gc_data_per_heap())->set_mechanism_bit (gc_demotion_bit);
                    settings.demotion = TRUE;
                }

#ifdef GC_CONFIG_DRIVEN
                if (!is_compaction_mandatory)
                {
                    int compact_reason = (g_heaps[i]->get_gc_data_per_heap())->get_mechanism (gc_heap_compact);
                    if (compact_reason >= 0)
                    {
                        if (gc_heap_compact_reason_mandatory_p[compact_reason])
                            is_compaction_mandatory = TRUE;
                    }
                }
#endif //GC_CONFIG_DRIVEN
            }

#ifdef GC_CONFIG_DRIVEN
            if (!is_compaction_mandatory)
            {
                // If compaction is not mandatory we can feel free to change it to a sweeping GC.
                // Note that we may want to change this to only checking every so often instead of every single GC.
                if (should_do_sweeping_gc (pol_max >= policy_compact))
                {
                    pol_max = policy_sweep;
                }
                else
                {
                    if (pol_max == policy_sweep)
                        pol_max = policy_compact;
                }
            }
#endif //GC_CONFIG_DRIVEN

            for (i = 0; i < n_heaps; i++)
            {
                if (pol_max > g_heaps[i]->gc_policy)
                    g_heaps[i]->gc_policy = pol_max;
                //get the segment while we are serialized
                if (g_heaps[i]->gc_policy == policy_expand)
                {
                    g_heaps[i]->new_heap_segment =
                        g_heaps[i]->soh_get_segment_to_expand();
                    if (!g_heaps[i]->new_heap_segment)
                    {
                        set_expand_in_full_gc (condemned_gen_number);
                        //we are out of memory, cancel the expansion
                        g_heaps[i]->gc_policy = policy_compact;
                    }
                }
            }

            BOOL is_full_compacting_gc = FALSE;

            if ((gc_policy >= policy_compact) && (condemned_gen_number == max_generation))
            {
                full_gc_counts[gc_type_compacting]++;
                is_full_compacting_gc = TRUE;
            }

            for (i = 0; i < n_heaps; i++)
            {
                //copy the card and brick tables
                if (g_gc_card_table!= g_heaps[i]->card_table)
                {
                    g_heaps[i]->copy_brick_card_table();
                }

                if (is_full_compacting_gc)
                {
                    g_heaps[i]->loh_alloc_since_cg = 0;
                }
            }
        }

        //start all threads on the roots.
        dprintf(3, ("Starting all gc threads after compaction decision"));
        gc_t_join.restart();
    }

    //reset the local variable accordingly
    should_compact = (gc_policy >= policy_compact);
    should_expand  = (gc_policy >= policy_expand);

#else //MULTIPLE_HEAPS

    //safe place to delete large heap segments
    if (condemned_gen_number == max_generation)
    {
        rearrange_large_heap_segments ();
    }

    if (maxgen_size_inc_p && provisional_mode_triggered)
    {
        pm_trigger_full_gc = true;
        dprintf (GTC_LOG, ("in PM: maxgen size inc, doing a sweeping gen1 and trigger NGC2"));
    }
    else
    {
        settings.demotion = ((demotion_high >= demotion_low) ? TRUE : FALSE);
        if (settings.demotion)
            get_gc_data_per_heap()->set_mechanism_bit (gc_demotion_bit);

#ifdef GC_CONFIG_DRIVEN
        BOOL is_compaction_mandatory = FALSE;
        int compact_reason = get_gc_data_per_heap()->get_mechanism (gc_heap_compact);
        if (compact_reason >= 0)
            is_compaction_mandatory = gc_heap_compact_reason_mandatory_p[compact_reason];

        if (!is_compaction_mandatory)
        {
            if (should_do_sweeping_gc (should_compact))
                should_compact = FALSE;
            else
                should_compact = TRUE;
        }
#endif //GC_CONFIG_DRIVEN

        if (should_compact && (condemned_gen_number == max_generation))
        {
            full_gc_counts[gc_type_compacting]++;
            loh_alloc_since_cg = 0;
        }
    }
#endif //MULTIPLE_HEAPS

    if (!pm_trigger_full_gc && pm_stress_on && provisional_mode_triggered)
    {
        if ((settings.condemned_generation == (max_generation - 1)) &&
            ((settings.gc_index % 5) == 0))
        {
            pm_trigger_full_gc = true;
        }
    }

    if (settings.condemned_generation == (max_generation - 1))
    {
        if (provisional_mode_triggered)
        {
            if (should_expand)
            {
                should_expand = FALSE;
                dprintf (GTC_LOG, ("h%d in PM cannot expand", heap_number));
            }
        }

        if (pm_trigger_full_gc)
        {
            should_compact = FALSE;
            dprintf (GTC_LOG, ("h%d PM doing sweeping", heap_number));
        }
    }

    if (should_compact)
    {
        dprintf (2,( "**** Doing Compacting GC ****"));

        if (should_expand)
        {
#ifndef MULTIPLE_HEAPS
            heap_segment* new_heap_segment = soh_get_segment_to_expand();
#endif //!MULTIPLE_HEAPS
            if (new_heap_segment)
            {
                consing_gen = expand_heap(condemned_gen_number,
                                          consing_gen,
                                          new_heap_segment);
            }

            // If we couldn't get a new segment, or we were able to
            // reserve one but no space to commit, we couldn't
            // expand heap.
            if (ephemeral_heap_segment != new_heap_segment)
            {
                set_expand_in_full_gc (condemned_gen_number);
                should_expand = FALSE;
            }
        }
        generation_allocation_limit (condemned_gen1) =
            generation_allocation_pointer (condemned_gen1);
        if ((condemned_gen_number < max_generation))
        {
            generation_allocator (older_gen)->commit_alloc_list_changes();

            // Fix the allocation area of the older generation
            fix_older_allocation_area (older_gen);
        }
        assert (generation_allocation_segment (consing_gen) ==
                ephemeral_heap_segment);

        GCToEEInterface::DiagWalkSurvivors(__this, true);

        relocate_phase (condemned_gen_number, first_condemned_address);
        compact_phase (condemned_gen_number, first_condemned_address,
                       (!settings.demotion && settings.promotion));
        fix_generation_bounds (condemned_gen_number, consing_gen);
        assert (generation_allocation_limit (youngest_generation) ==
                generation_allocation_pointer (youngest_generation));
        if (condemned_gen_number >= (max_generation -1))
        {
#ifdef MULTIPLE_HEAPS
            // this needs be serialized just because we have one
            // segment_standby_list/seg_table for all heaps. We should make it at least
            // so that when hoarding is not on we don't need this join because
            // decommitting memory can take a long time.
            //must serialize on deleting segments
            gc_t_join.join(this, gc_join_rearrange_segs_compaction);
            if (gc_t_join.joined())
            {
                for (int i = 0; i < n_heaps; i++)
                {
                    g_heaps[i]->rearrange_heap_segments(TRUE);
                }
                gc_t_join.restart();
            }
#else
            rearrange_heap_segments(TRUE);
#endif //MULTIPLE_HEAPS

            if (should_expand)
            {
                //fix the start_segment for the ephemeral generations
                for (int i = 0; i < max_generation; i++)
                {
                    generation* gen = generation_of (i);
                    generation_start_segment (gen) = ephemeral_heap_segment;
                    generation_allocation_segment (gen) = ephemeral_heap_segment;
                }
            }
        }

        {
#ifdef FEATURE_PREMORTEM_FINALIZATION
            finalize_queue->UpdatePromotedGenerations (condemned_gen_number,
                                                       (!settings.demotion && settings.promotion));
#endif // FEATURE_PREMORTEM_FINALIZATION

#ifdef MULTIPLE_HEAPS
            dprintf(3, ("Joining after end of compaction"));
            gc_t_join.join(this, gc_join_adjust_handle_age_compact);
            if (gc_t_join.joined())
#endif //MULTIPLE_HEAPS
            {
#ifdef MULTIPLE_HEAPS
                //join all threads to make sure they are synchronized
                dprintf(3, ("Restarting after Promotion granted"));
                gc_t_join.restart();
#endif //MULTIPLE_HEAPS
            }

            ScanContext sc;
            sc.thread_number = heap_number;
            sc.promotion = FALSE;
            sc.concurrent = FALSE;
            // new generations bounds are set can call this guy
            if (settings.promotion && !settings.demotion)
            {
                dprintf (2, ("Promoting EE roots for gen %d",
                             condemned_gen_number));
                GCScan::GcPromotionsGranted(condemned_gen_number,
                                                max_generation, &sc);
            }
            else if (settings.demotion)
            {
                dprintf (2, ("Demoting EE roots for gen %d",
                             condemned_gen_number));
                GCScan::GcDemote (condemned_gen_number, max_generation, &sc);
            }
        }

        {
            gen0_big_free_spaces = 0;

            reset_pinned_queue_bos();
            unsigned int  gen_number = min (max_generation, 1 + condemned_gen_number);
            generation*  gen = generation_of (gen_number);
            uint8_t*  low = generation_allocation_start (generation_of (gen_number-1));
            uint8_t*  high =  heap_segment_allocated (ephemeral_heap_segment);

            while (!pinned_plug_que_empty_p())
            {
                mark*  m = pinned_plug_of (deque_pinned_plug());
                size_t len = pinned_len (m);
                uint8_t*  arr = (pinned_plug (m) - len);
                dprintf(3,("free [%Ix %Ix[ pin",
                            (size_t)arr, (size_t)arr + len));
                if (len != 0)
                {
                    assert (len >= Align (min_obj_size));
                    make_unused_array (arr, len);
                    // fix fully contained bricks + first one
                    // if the array goes beyond the first brick
                    size_t start_brick = brick_of (arr);
                    size_t end_brick = brick_of (arr + len);
                    if (end_brick != start_brick)
                    {
                        dprintf (3,
                                    ("Fixing bricks [%Ix, %Ix[ to point to unused array %Ix",
                                    start_brick, end_brick, (size_t)arr));
                        set_brick (start_brick,
                                    arr - brick_address (start_brick));
                        size_t brick = start_brick+1;
                        while (brick < end_brick)
                        {
                            set_brick (brick, start_brick - brick);
                            brick++;
                        }
                    }

                    //when we take an old segment to make the new
                    //ephemeral segment. we can have a bunch of
                    //pinned plugs out of order going to the new ephemeral seg
                    //and then the next plugs go back to max_generation
                    if ((heap_segment_mem (ephemeral_heap_segment) <= arr) &&
                        (heap_segment_reserved (ephemeral_heap_segment) > arr))
                    {

                        while ((low <= arr) && (high > arr))
                        {
                            gen_number--;
                            assert ((gen_number >= 1) || (demotion_low != MAX_PTR) ||
                                    settings.demotion || !settings.promotion);
                            dprintf (3, ("new free list generation %d", gen_number));

                            gen = generation_of (gen_number);
                            if (gen_number >= 1)
                                low = generation_allocation_start (generation_of (gen_number-1));
                            else
                                low = high;
                        }
                    }
                    else
                    {
                        dprintf (3, ("new free list generation %d", max_generation));
                        gen_number = max_generation;
                        gen = generation_of (gen_number);
                    }

                    dprintf(3,("threading it into generation %d", gen_number));
                    thread_gap (arr, len, gen);
                    add_gen_free (gen_number, len);
                }
            }
        }

#ifdef _DEBUG
        for (int x = 0; x <= max_generation; x++)
        {
            assert (generation_allocation_start (generation_of (x)));
        }
#endif //_DEBUG

        if (!settings.demotion && settings.promotion)
        {
            //clear card for generation 1. generation 0 is empty
            clear_card_for_addresses (
                generation_allocation_start (generation_of (1)),
                generation_allocation_start (generation_of (0)));
        }
        if (settings.promotion && !settings.demotion)
        {
            uint8_t* start = generation_allocation_start (youngest_generation);
            MAYBE_UNUSED_VAR(start);
            assert (heap_segment_allocated (ephemeral_heap_segment) ==
                    (start + Align (size (start))));
        }
    }
    else
    {
        //force promotion for sweep
        settings.promotion = TRUE;
        settings.compaction = FALSE;

        ScanContext sc;
        sc.thread_number = heap_number;
        sc.promotion = FALSE;
        sc.concurrent = FALSE;

        dprintf (2, ("**** Doing Mark and Sweep GC****"));

        if ((condemned_gen_number < max_generation))
        {
            generation_allocator (older_gen)->copy_from_alloc_list (r_free_list);
            generation_free_list_space (older_gen) = r_free_list_space;
            generation_free_obj_space (older_gen) = r_free_obj_space;
            generation_free_list_allocated (older_gen) = r_older_gen_free_list_allocated;
            generation_end_seg_allocated (older_gen) = r_older_gen_end_seg_allocated;
            generation_condemned_allocated (older_gen) = r_older_gen_condemned_allocated;
            generation_sweep_allocated (older_gen) += dd_survived_size (dynamic_data_of (condemned_gen_number));
            generation_allocation_limit (older_gen) = r_allocation_limit;
            generation_allocation_pointer (older_gen) = r_allocation_pointer;
            generation_allocation_context_start_region (older_gen) = r_allocation_start_region;
            generation_allocation_segment (older_gen) = r_allocation_segment;
        }

        if ((condemned_gen_number < max_generation))
        {
            // Fix the allocation area of the older generation
            fix_older_allocation_area (older_gen);
        }

        GCToEEInterface::DiagWalkSurvivors(__this, false);

        gen0_big_free_spaces = 0;
        make_free_lists (condemned_gen_number);
        recover_saved_pinned_info();

#ifdef FEATURE_PREMORTEM_FINALIZATION
        finalize_queue->UpdatePromotedGenerations (condemned_gen_number, TRUE);
#endif // FEATURE_PREMORTEM_FINALIZATION
// MTHTS: leave single thread for HT processing on plan_phase
#ifdef MULTIPLE_HEAPS
        dprintf(3, ("Joining after end of sweep"));
        gc_t_join.join(this, gc_join_adjust_handle_age_sweep);
        if (gc_t_join.joined())
#endif //MULTIPLE_HEAPS
        {
            GCScan::GcPromotionsGranted(condemned_gen_number,
                                            max_generation, &sc);
            if (condemned_gen_number >= (max_generation -1))
            {
#ifdef MULTIPLE_HEAPS
                for (int i = 0; i < n_heaps; i++)
                {
                    g_heaps[i]->rearrange_heap_segments(FALSE);
                }
#else
                rearrange_heap_segments(FALSE);
#endif //MULTIPLE_HEAPS
            }

#ifdef MULTIPLE_HEAPS
            //join all threads to make sure they are synchronized
            dprintf(3, ("Restarting after Promotion granted"));
            gc_t_join.restart();
#endif //MULTIPLE_HEAPS
        }

#ifdef _DEBUG
        for (int x = 0; x <= max_generation; x++)
        {
            assert (generation_allocation_start (generation_of (x)));
        }
#endif //_DEBUG

        //clear card for generation 1. generation 0 is empty
        clear_card_for_addresses (
            generation_allocation_start (generation_of (1)),
            generation_allocation_start (generation_of (0)));
        assert ((heap_segment_allocated (ephemeral_heap_segment) ==
                 (generation_allocation_start (youngest_generation) +
                  Align (min_obj_size))));
    }

    //verify_partial();
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif //_PREFAST_


/*****************************
Called after compact phase to fix all generation gaps
********************************/
void gc_heap::fix_generation_bounds (int condemned_gen_number,
                                     generation* consing_gen)
{
    UNREFERENCED_PARAMETER(consing_gen);

    assert (generation_allocation_segment (consing_gen) ==
            ephemeral_heap_segment);

    //assign the planned allocation start to the generation
    int gen_number = condemned_gen_number;
    int bottom_gen = 0;

    while (gen_number >= bottom_gen)
    {
        generation*  gen = generation_of (gen_number);
        dprintf(3,("Fixing generation pointers for %Ix", gen_number));
        if ((gen_number < max_generation) && ephemeral_promotion)
        {
            make_unused_array (saved_ephemeral_plan_start[gen_number],
                               saved_ephemeral_plan_start_size[gen_number]);
        }
        reset_allocation_pointers (gen, generation_plan_allocation_start (gen));
        make_unused_array (generation_allocation_start (gen), generation_plan_allocation_start_size (gen));
        dprintf(3,(" start %Ix", (size_t)generation_allocation_start (gen)));
        gen_number--;
    }
#ifdef MULTIPLE_HEAPS
    if (ephemeral_promotion)
    {
        //we are creating a generation fault. set the cards.
        // and we are only doing this for multiple heaps because in the single heap scenario the
        // new ephemeral generations will be empty and there'll be no need to set cards for the
        // old ephemeral generations that got promoted into max_generation.
        ptrdiff_t delta = 0;
#ifdef SEG_MAPPING_TABLE
        heap_segment* old_ephemeral_seg = seg_mapping_table_segment_of (saved_ephemeral_plan_start[max_generation-1]);
#else //SEG_MAPPING_TABLE
        heap_segment* old_ephemeral_seg = segment_of (saved_ephemeral_plan_start[max_generation-1], delta);
#endif //SEG_MAPPING_TABLE

        assert (in_range_for_segment (saved_ephemeral_plan_start[max_generation-1], old_ephemeral_seg));
        size_t end_card = card_of (align_on_card (heap_segment_plan_allocated (old_ephemeral_seg)));
        size_t card = card_of (saved_ephemeral_plan_start[max_generation-1]);
        while (card != end_card)
        {
            set_card (card);
            card++;
        }
    }
#endif //MULTIPLE_HEAPS
    {
        alloc_allocated = heap_segment_plan_allocated(ephemeral_heap_segment);
        //reset the allocated size
        uint8_t* start = generation_allocation_start (youngest_generation);
        MAYBE_UNUSED_VAR(start);
        if (settings.promotion && !settings.demotion)
        {
            assert ((start + Align (size (start))) ==
                    heap_segment_plan_allocated(ephemeral_heap_segment));
        }

        heap_segment_allocated(ephemeral_heap_segment)=
            heap_segment_plan_allocated(ephemeral_heap_segment);
    }
}

uint8_t* gc_heap::generation_limit (int gen_number)
{
    if (settings.promotion)
    {
        if (gen_number <= 1)
            return heap_segment_reserved (ephemeral_heap_segment);
        else
            return generation_allocation_start (generation_of ((gen_number - 2)));
    }
    else
    {
        if (gen_number <= 0)
            return heap_segment_reserved (ephemeral_heap_segment);
        else
            return generation_allocation_start (generation_of ((gen_number - 1)));
    }
}

BOOL gc_heap::ensure_gap_allocation (int condemned_gen_number)
{
    uint8_t* start = heap_segment_allocated (ephemeral_heap_segment);
    size_t size = Align (min_obj_size)*(condemned_gen_number+1);
    assert ((start + size) <=
            heap_segment_reserved (ephemeral_heap_segment));
    if ((start + size) >
        heap_segment_committed (ephemeral_heap_segment))
    {
        if (!grow_heap_segment (ephemeral_heap_segment, start + size))
        {
            return FALSE;
        }
    }
    return TRUE;
}

uint8_t* gc_heap::allocate_at_end (size_t size)
{
    uint8_t* start = heap_segment_allocated (ephemeral_heap_segment);
    size = Align (size);
    uint8_t* result = start;
    // only called to allocate a min obj so can't overflow here.
    assert ((start + size) <=
            heap_segment_reserved (ephemeral_heap_segment));
    //ensure_gap_allocation took care of it
    assert ((start + size) <=
            heap_segment_committed (ephemeral_heap_segment));
    heap_segment_allocated (ephemeral_heap_segment) += size;
    return result;
}


void gc_heap::make_free_lists (int condemned_gen_number)
{
#ifdef TIME_GC
    unsigned start;
    unsigned finish;
    start = GetCycleCount32();
#endif //TIME_GC

    //Promotion has to happen in sweep case.
    assert (settings.promotion);

    generation* condemned_gen = generation_of (condemned_gen_number);
    uint8_t* start_address = generation_allocation_start (condemned_gen);

    size_t  current_brick = brick_of (start_address);
    heap_segment* current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));

    PREFIX_ASSUME(current_heap_segment != NULL);

    uint8_t*  end_address = heap_segment_allocated (current_heap_segment);
    size_t  end_brick = brick_of (end_address-1);
    make_free_args args;
    args.free_list_gen_number = min (max_generation, 1 + condemned_gen_number);
    args.current_gen_limit = (((condemned_gen_number == max_generation)) ?
                              MAX_PTR :
                              (generation_limit (args.free_list_gen_number)));
    args.free_list_gen = generation_of (args.free_list_gen_number);
    args.highest_plug = 0;

    if ((start_address < end_address) ||
        (condemned_gen_number == max_generation))
    {
        while (1)
        {
            if ((current_brick > end_brick))
            {
                if (args.current_gen_limit == MAX_PTR)
                {
                    //We had an empty segment
                    //need to allocate the generation start

                    generation* gen = generation_of (max_generation);

                    heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen));

                    PREFIX_ASSUME(start_seg != NULL);

                    uint8_t* gap = heap_segment_mem (start_seg);

                    generation_allocation_start (gen) = gap;
                    heap_segment_allocated (start_seg) = gap + Align (min_obj_size);
                    make_unused_array (gap, Align (min_obj_size));
                    reset_allocation_pointers (gen, gap);
                    dprintf (3, ("Start segment empty, fixing generation start of %d to: %Ix",
                                 max_generation, (size_t)gap));
                    args.current_gen_limit = generation_limit (args.free_list_gen_number);
                }
                if (heap_segment_next_rw (current_heap_segment))
                {
                    current_heap_segment = heap_segment_next_rw (current_heap_segment);
                    current_brick = brick_of (heap_segment_mem (current_heap_segment));
                    end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);

                    continue;
                }
                else
                {
                    break;
                }
            }
            {
                int brick_entry =  brick_table [ current_brick ];
                if ((brick_entry >= 0))
                {
                    make_free_list_in_brick (brick_address (current_brick) + brick_entry-1, &args);
                    dprintf(3,("Fixing brick entry %Ix to %Ix",
                               current_brick, (size_t)args.highest_plug));
                    set_brick (current_brick,
                               (args.highest_plug - brick_address (current_brick)));
                }
                else
                {
                    if ((brick_entry > -32768))
                    {

#ifdef _DEBUG
                        ptrdiff_t offset = brick_of (args.highest_plug) - current_brick;
                        if ((brick_entry != -32767) && (! ((offset == brick_entry))))
                        {
                            assert ((brick_entry == -1));
                        }
#endif //_DEBUG
                        //init to -1 for faster find_first_object
                        set_brick (current_brick, -1);
                    }
                }
            }
            current_brick++;
        }
    }
    {
        int bottom_gen = 0;
        args.free_list_gen_number--;
        while (args.free_list_gen_number >= bottom_gen)
        {
            uint8_t*  gap = 0;
            generation* gen2 = generation_of (args.free_list_gen_number);
            gap = allocate_at_end (Align(min_obj_size));
            generation_allocation_start (gen2) = gap;
            reset_allocation_pointers (gen2, gap);
            dprintf(3,("Fixing generation start of %d to: %Ix",
                       args.free_list_gen_number, (size_t)gap));
            PREFIX_ASSUME(gap != NULL);
            make_unused_array (gap, Align (min_obj_size));

            args.free_list_gen_number--;
        }

        //reset the allocated size
        uint8_t* start2 = generation_allocation_start (youngest_generation);
        alloc_allocated = start2 + Align (size (start2));
    }

#ifdef TIME_GC
    finish = GetCycleCount32();
    sweep_time = finish - start;
#endif //TIME_GC
}

void gc_heap::make_free_list_in_brick (uint8_t* tree, make_free_args* args)
{
    assert ((tree != NULL));
    {
        int  right_node = node_right_child (tree);
        int left_node = node_left_child (tree);
        args->highest_plug = 0;
        if (! (0 == tree))
        {
            if (! (0 == left_node))
            {
                make_free_list_in_brick (tree + left_node, args);

            }
            {
                uint8_t*  plug = tree;
                size_t  gap_size = node_gap_size (tree);
                uint8_t*  gap = (plug - gap_size);
                dprintf (3,("Making free list %Ix len %d in %d",
                //dprintf (3,("F: %Ix len %Ix in %d",
                        (size_t)gap, gap_size, args->free_list_gen_number));
                args->highest_plug = tree;
#ifdef SHORT_PLUGS
                if (is_plug_padded (plug))
                {
                    dprintf (3, ("%Ix padded", plug));
                    clear_plug_padded (plug);
                }
#endif //SHORT_PLUGS
            gen_crossing:
                {
                    if ((args->current_gen_limit == MAX_PTR) ||
                        ((plug >= args->current_gen_limit) &&
                         ephemeral_pointer_p (plug)))
                    {
                        dprintf(3,(" Crossing Generation boundary at %Ix",
                               (size_t)args->current_gen_limit));
                        if (!(args->current_gen_limit == MAX_PTR))
                        {
                            args->free_list_gen_number--;
                            args->free_list_gen = generation_of (args->free_list_gen_number);
                        }
                        dprintf(3,( " Fixing generation start of %d to: %Ix",
                                args->free_list_gen_number, (size_t)gap));

                        reset_allocation_pointers (args->free_list_gen, gap);
                        args->current_gen_limit = generation_limit (args->free_list_gen_number);

                        if ((gap_size >= (2*Align (min_obj_size))))
                        {
                            dprintf(3,(" Splitting the gap in two %Id left",
                                   gap_size));
                            make_unused_array (gap, Align(min_obj_size));
                            gap_size = (gap_size - Align(min_obj_size));
                            gap = (gap + Align(min_obj_size));
                        }
                        else
                        {
                            make_unused_array (gap, gap_size);
                            gap_size = 0;
                        }
                        goto gen_crossing;
                    }
                }

                thread_gap (gap, gap_size, args->free_list_gen);
                add_gen_free (args->free_list_gen->gen_num, gap_size);
            }
            if (! (0 == right_node))
            {
                make_free_list_in_brick (tree + right_node, args);
            }
        }
    }
}

void gc_heap::thread_gap (uint8_t* gap_start, size_t size, generation*  gen)
{
    assert (generation_allocation_start (gen));
    if ((size > 0))
    {
        if ((gen->gen_num == 0) && (size > CLR_SIZE))
        {
            gen0_big_free_spaces += size;
        }

        assert ((heap_segment_rw (generation_start_segment (gen))!=
                 ephemeral_heap_segment) ||
                (gap_start > generation_allocation_start (gen)));
        // The beginning of a segment gap is not aligned
        assert (size >= Align (min_obj_size));
        make_unused_array (gap_start, size,
                          (!settings.concurrent && (gen != youngest_generation)),
                          (gen->gen_num == max_generation));
        dprintf (3, ("fr: [%Ix, %Ix[", (size_t)gap_start, (size_t)gap_start+size));

        if ((size >= min_free_list))
        {
            generation_free_list_space (gen) += size;
            generation_allocator (gen)->thread_item (gap_start, size);
        }
        else
        {
            generation_free_obj_space (gen) += size;
        }
    }
}

void gc_heap::loh_thread_gap_front (uint8_t* gap_start, size_t size, generation*  gen)
{
    assert (generation_allocation_start (gen));
    if (size >= min_free_list)
    {
        generation_free_list_space (gen) += size;
        generation_allocator (gen)->thread_item_front (gap_start, size);
    }
}

void gc_heap::make_unused_array (uint8_t* x, size_t size, BOOL clearp, BOOL resetp)
{
    dprintf (3, ("Making unused array [%Ix, %Ix[",
        (size_t)x, (size_t)(x+size)));
    assert (size >= Align (min_obj_size));

//#if defined (VERIFY_HEAP) && defined (BACKGROUND_GC)
//    check_batch_mark_array_bits (x, x+size);
//#endif //VERIFY_HEAP && BACKGROUND_GC

    if (resetp)
    {
#ifdef BGC_SERVO_TUNING
        // Don't do this for servo tuning because it makes it even harder to regulate WS.
        if (!(bgc_tuning::enable_fl_tuning && bgc_tuning::fl_tuning_triggered))
#endif //BGC_SERVO_TUNING
        {
            reset_memory (x, size);
        }
    }
    ((CObjectHeader*)x)->SetFree(size);

#ifdef BIT64

#if BIGENDIAN
#error "This won't work on big endian platforms"
#endif

    size_t size_as_object = (uint32_t)(size - free_object_base_size) + free_object_base_size;

    if (size_as_object < size)
    {
        //
        // If the size is more than 4GB, we need to create multiple objects because of
        // the Array::m_NumComponents is uint32_t and the high 32 bits of unused array
        // size is ignored in regular object size computation.
        //
        uint8_t * tmp = x + size_as_object;
        size_t remaining_size = size - size_as_object;

        while (remaining_size > UINT32_MAX)
        {
            // Make sure that there will be at least Align(min_obj_size) left
            size_t current_size = UINT32_MAX - get_alignment_constant (FALSE)
                - Align (min_obj_size, get_alignment_constant (FALSE));

            ((CObjectHeader*)tmp)->SetFree(current_size);

            remaining_size -= current_size;
            tmp += current_size;
        }

        ((CObjectHeader*)tmp)->SetFree(remaining_size);
    }
#endif

    if (clearp)
        clear_card_for_addresses (x, x + Align(size));
}

// Clear memory set by make_unused_array.
void gc_heap::clear_unused_array (uint8_t* x, size_t size)
{
    // Also clear the sync block
    *(((PTR_PTR)x)-1) = 0;

    ((CObjectHeader*)x)->UnsetFree();

#ifdef BIT64

#if BIGENDIAN
#error "This won't work on big endian platforms"
#endif

    // The memory could have been cleared in the meantime. We have to mirror the algorithm
    // from make_unused_array since we cannot depend on the object sizes in memory.
    size_t size_as_object = (uint32_t)(size - free_object_base_size) + free_object_base_size;

    if (size_as_object < size)
    {
        uint8_t * tmp = x + size_as_object;
        size_t remaining_size = size - size_as_object;

        while (remaining_size > UINT32_MAX)
        {
            size_t current_size = UINT32_MAX - get_alignment_constant (FALSE)
                - Align (min_obj_size, get_alignment_constant (FALSE));

            ((CObjectHeader*)tmp)->UnsetFree();

            remaining_size -= current_size;
            tmp += current_size;
        }

        ((CObjectHeader*)tmp)->UnsetFree();
    }
#else
    UNREFERENCED_PARAMETER(size);
#endif
}

inline
uint8_t* tree_search (uint8_t* tree, uint8_t* old_address)
{
    uint8_t* candidate = 0;
    int cn;
    while (1)
    {
        if (tree < old_address)
        {
            if ((cn = node_right_child (tree)) != 0)
            {
                assert (candidate < tree);
                candidate = tree;
                tree = tree + cn;
                Prefetch (tree - 8);
                continue;
            }
            else
                break;
        }
        else if (tree > old_address)
        {
            if ((cn = node_left_child (tree)) != 0)
            {
                tree = tree + cn;
                Prefetch (tree - 8);
                continue;
            }
            else
                break;
        } else
            break;
    }
    if (tree <= old_address)
        return tree;
    else if (candidate)
        return candidate;
    else
        return tree;
}

#ifdef FEATURE_BASICFREEZE
bool gc_heap::frozen_object_p (Object* obj)
{
#ifdef MULTIPLE_HEAPS
#ifdef SEG_MAPPING_TABLE
    heap_segment* pSegment = seg_mapping_table_segment_of((uint8_t*)obj);
#else
    ptrdiff_t delta = 0;
    heap_segment* pSegment = segment_of ((uint8_t*)obj, delta);
#endif
#else //MULTIPLE_HEAPS
    heap_segment* pSegment = gc_heap::find_segment ((uint8_t*)obj, FALSE);
    _ASSERTE(pSegment);
#endif //MULTIPLE_HEAPS

    return heap_segment_read_only_p(pSegment);
}
#endif // FEATURE_BASICFREEZE

#ifdef FEATURE_REDHAWK
// TODO: this was added on RH, we have not done perf runs to see if this is the right
// thing to do for other versions of the CLR.
inline
#endif // FEATURE_REDHAWK
void gc_heap::relocate_address (uint8_t** pold_address THREAD_NUMBER_DCL)
{
    uint8_t* old_address = *pold_address;
    if (!((old_address >= gc_low) && (old_address < gc_high)))
#ifdef MULTIPLE_HEAPS
    {
        UNREFERENCED_PARAMETER(thread);
        if (old_address == 0)
            return;
        gc_heap* hp = heap_of (old_address);
        if ((hp == this) ||
            !((old_address >= hp->gc_low) && (old_address < hp->gc_high)))
            return;
    }
#else //MULTIPLE_HEAPS
        return ;
#endif //MULTIPLE_HEAPS
    // delta translates old_address into address_gc (old_address);
    size_t  brick = brick_of (old_address);
    int    brick_entry =  brick_table [ brick ];
    uint8_t*  new_address = old_address;
    if (! ((brick_entry == 0)))
    {
    retry:
        {
            while (brick_entry < 0)
            {
                brick = (brick + brick_entry);
                brick_entry =  brick_table [ brick ];
            }
            uint8_t* old_loc = old_address;

            uint8_t* node = tree_search ((brick_address (brick) + brick_entry-1),
                                      old_loc);
            if ((node <= old_loc))
                new_address = (old_address + node_relocation_distance (node));
            else
            {
                if (node_left_p (node))
                {
                    dprintf(3,(" L: %Ix", (size_t)node));
                    new_address = (old_address +
                                   (node_relocation_distance (node) +
                                    node_gap_size (node)));
                }
                else
                {
                    brick = brick - 1;
                    brick_entry =  brick_table [ brick ];
                    goto retry;
                }
            }
        }

        *pold_address = new_address;
        return;
    }

#ifdef FEATURE_LOH_COMPACTION
    if (loh_compacted_p
#ifdef FEATURE_BASICFREEZE
        && !frozen_object_p((Object*)old_address)
#endif // FEATURE_BASICFREEZE
        )
    {
        *pold_address = old_address + loh_node_relocation_distance (old_address);
    }
    else
#endif //FEATURE_LOH_COMPACTION
    {
        *pold_address = new_address;
    }
}

inline void
gc_heap::check_class_object_demotion (uint8_t* obj)
{
#ifdef COLLECTIBLE_CLASS
    if (is_collectible(obj))
    {
        check_class_object_demotion_internal (obj);
    }
#else
    UNREFERENCED_PARAMETER(obj);
#endif //COLLECTIBLE_CLASS
}

#ifdef COLLECTIBLE_CLASS
NOINLINE void
gc_heap::check_class_object_demotion_internal (uint8_t* obj)
{
    if (settings.demotion)
    {
#ifdef MULTIPLE_HEAPS
        // We set the card without checking the demotion range 'cause at this point
        // the handle that points to the loader allocator object may or may not have
        // been relocated by other GC threads.
        set_card (card_of (obj));
#else
        THREAD_FROM_HEAP;
        uint8_t* class_obj = get_class_object (obj);
        dprintf (3, ("%Ix: got classobj %Ix", obj, class_obj));
        uint8_t* temp_class_obj = class_obj;
        uint8_t** temp = &temp_class_obj;
        relocate_address (temp THREAD_NUMBER_ARG);

        check_demotion_helper (temp, obj);
#endif //MULTIPLE_HEAPS
    }
}

#endif //COLLECTIBLE_CLASS

inline void
gc_heap::check_demotion_helper (uint8_t** pval, uint8_t* parent_obj)
{
    // detect if we are demoting an object
    if ((*pval < demotion_high) &&
        (*pval >= demotion_low))
    {
        dprintf(3, ("setting card %Ix:%Ix",
                    card_of((uint8_t*)pval),
                    (size_t)pval));

        set_card (card_of (parent_obj));
    }
#ifdef MULTIPLE_HEAPS
    else if (settings.demotion)
    {
        dprintf (4, ("Demotion active, computing heap_of object"));
        gc_heap* hp = heap_of (*pval);
        if ((*pval < hp->demotion_high) &&
            (*pval >= hp->demotion_low))
        {
            dprintf(3, ("setting card %Ix:%Ix",
                        card_of((uint8_t*)pval),
                        (size_t)pval));

            set_card (card_of (parent_obj));
        }
    }
#endif //MULTIPLE_HEAPS
}

inline void
gc_heap::reloc_survivor_helper (uint8_t** pval)
{
    THREAD_FROM_HEAP;
    relocate_address (pval THREAD_NUMBER_ARG);

    check_demotion_helper (pval, (uint8_t*)pval);
}

inline void
gc_heap::relocate_obj_helper (uint8_t* x, size_t s)
{
    THREAD_FROM_HEAP;
    if (contain_pointers (x))
    {
        dprintf (3, ("$%Ix$", (size_t)x));

        go_through_object_nostart (method_table(x), x, s, pval,
                            {
                                uint8_t* child = *pval;
                                reloc_survivor_helper (pval);
                                if (child)
                                {
                                    dprintf (3, ("%Ix->%Ix->%Ix", (uint8_t*)pval, child, *pval));
                                }
                            });

    }
    check_class_object_demotion (x);
}

inline
void gc_heap::reloc_ref_in_shortened_obj (uint8_t** address_to_set_card, uint8_t** address_to_reloc)
{
    THREAD_FROM_HEAP;

    uint8_t* old_val = (address_to_reloc ? *address_to_reloc : 0);
    relocate_address (address_to_reloc THREAD_NUMBER_ARG);
    if (address_to_reloc)
    {
        dprintf (3, ("SR %Ix: %Ix->%Ix", (uint8_t*)address_to_reloc, old_val, *address_to_reloc));
    }

    //check_demotion_helper (current_saved_info_to_relocate, (uint8_t*)pval);
    uint8_t* relocated_addr = *address_to_reloc;
    if ((relocated_addr < demotion_high) &&
        (relocated_addr >= demotion_low))
    {
        dprintf (3, ("set card for location %Ix(%Ix)",
                    (size_t)address_to_set_card, card_of((uint8_t*)address_to_set_card)));

        set_card (card_of ((uint8_t*)address_to_set_card));
    }
#ifdef MULTIPLE_HEAPS
    else if (settings.demotion)
    {
        gc_heap* hp = heap_of (relocated_addr);
        if ((relocated_addr < hp->demotion_high) &&
            (relocated_addr >= hp->demotion_low))
        {
            dprintf (3, ("%Ix on h%d, set card for location %Ix(%Ix)",
                        relocated_addr, hp->heap_number, (size_t)address_to_set_card, card_of((uint8_t*)address_to_set_card)));

            set_card (card_of ((uint8_t*)address_to_set_card));
        }
    }
#endif //MULTIPLE_HEAPS
}

void gc_heap::relocate_pre_plug_info (mark* pinned_plug_entry)
{
    THREAD_FROM_HEAP;
    uint8_t* plug = pinned_plug (pinned_plug_entry);
    uint8_t* pre_plug_start = plug - sizeof (plug_and_gap);
    // Note that we need to add one ptr size here otherwise we may not be able to find the relocated
    // address. Consider this scenario:
    // gen1 start | 3-ptr sized NP | PP
    // 0          | 0x18           | 0x30
    // If we are asking for the reloc address of 0x10 we will AV in relocate_address because
    // the first plug we saw in the brick is 0x18 which means 0x10 will cause us to go back a brick
    // which is 0, and then we'll AV in tree_search when we try to do node_right_child (tree).
    pre_plug_start += sizeof (uint8_t*);
    uint8_t** old_address = &pre_plug_start;

    uint8_t* old_val = (old_address ? *old_address : 0);
    relocate_address (old_address THREAD_NUMBER_ARG);
    if (old_address)
    {
        dprintf (3, ("PreR %Ix: %Ix->%Ix, set reloc: %Ix",
            (uint8_t*)old_address, old_val, *old_address, (pre_plug_start - sizeof (uint8_t*))));
    }

    pinned_plug_entry->set_pre_plug_info_reloc_start (pre_plug_start - sizeof (uint8_t*));
}

inline
void gc_heap::relocate_shortened_obj_helper (uint8_t* x, size_t s, uint8_t* end, mark* pinned_plug_entry, BOOL is_pinned)
{
    THREAD_FROM_HEAP;
    uint8_t* plug = pinned_plug (pinned_plug_entry);

    if (!is_pinned)
    {
        //// Temporary - we just wanna make sure we are doing things right when padding is needed.
        //if ((x + s) < plug)
        //{
        //    dprintf (3, ("obj %Ix needed padding: end %Ix is %d bytes from pinned obj %Ix",
        //        x, (x + s), (plug- (x + s)), plug));
        //    GCToOSInterface::DebugBreak();
        //}

        relocate_pre_plug_info (pinned_plug_entry);
    }

    verify_pins_with_post_plug_info("after relocate_pre_plug_info");

    uint8_t* saved_plug_info_start = 0;
    uint8_t** saved_info_to_relocate = 0;

    if (is_pinned)
    {
        saved_plug_info_start = (uint8_t*)(pinned_plug_entry->get_post_plug_info_start());
        saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_post_plug_reloc_info());
    }
    else
    {
        saved_plug_info_start = (plug - sizeof (plug_and_gap));
        saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_pre_plug_reloc_info());
    }

    uint8_t** current_saved_info_to_relocate = 0;
    uint8_t* child = 0;

    dprintf (3, ("x: %Ix, pp: %Ix, end: %Ix", x, plug, end));

    if (contain_pointers (x))
    {
        dprintf (3,("$%Ix$", (size_t)x));

        go_through_object_nostart (method_table(x), x, s, pval,
        {
            dprintf (3, ("obj %Ix, member: %Ix->%Ix", x, (uint8_t*)pval, *pval));

            if ((uint8_t*)pval >= end)
            {
                current_saved_info_to_relocate = saved_info_to_relocate + ((uint8_t*)pval - saved_plug_info_start) / sizeof (uint8_t**);
                child = *current_saved_info_to_relocate;
                reloc_ref_in_shortened_obj (pval, current_saved_info_to_relocate);
                dprintf (3, ("last part: R-%Ix(saved: %Ix)->%Ix ->%Ix",
                    (uint8_t*)pval, current_saved_info_to_relocate, child, *current_saved_info_to_relocate));
            }
            else
            {
                reloc_survivor_helper (pval);
            }
        });
    }

    check_class_object_demotion (x);
}

void gc_heap::relocate_survivor_helper (uint8_t* plug, uint8_t* plug_end)
{
    uint8_t*  x = plug;
    while (x < plug_end)
    {
        size_t s = size (x);
        uint8_t* next_obj = x + Align (s);
        Prefetch (next_obj);
        relocate_obj_helper (x, s);
        assert (s > 0);
        x = next_obj;
    }
}

// if we expanded, right now we are not handling it as We are not saving the new reloc info.
void gc_heap::verify_pins_with_post_plug_info (const char* msg)
{
    UNREFERENCED_PARAMETER(msg);
#if defined  (_DEBUG) && defined (VERIFY_HEAP)
    if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
    {
        if (!verify_pinned_queue_p)
            return;

        if (settings.heap_expansion)
            return;

        for (size_t i = 0; i < mark_stack_tos; i++)
        {
            mark& m = mark_stack_array[i];

            mark* pinned_plug_entry = pinned_plug_of(i);

            if (pinned_plug_entry->has_post_plug_info() &&
                pinned_plug_entry->post_short_p() &&
                (pinned_plug_entry->saved_post_plug_debug.gap != 1))
            {
                uint8_t* next_obj = pinned_plug_entry->get_post_plug_info_start() + sizeof (plug_and_gap);
                // object after pin
                dprintf (3, ("OFP: %Ix, G: %Ix, R: %Ix, LC: %d, RC: %d",
                    next_obj, node_gap_size (next_obj), node_relocation_distance (next_obj),
                    (int)node_left_child (next_obj), (int)node_right_child (next_obj)));

                size_t* post_plug_debug = (size_t*)(&m.saved_post_plug_debug);

                if (node_gap_size (next_obj) != *post_plug_debug)
                {
                    dprintf (3, ("obj: %Ix gap should be %Ix but it is %Ix",
                        next_obj, *post_plug_debug, (size_t)(node_gap_size (next_obj))));
                    FATAL_GC_ERROR();
                }
                post_plug_debug++;
                // can't do node_relocation_distance here as it clears the left bit.
                //if (node_relocation_distance (next_obj) != *post_plug_debug)
                if (*((size_t*)(next_obj - 3 * sizeof (size_t))) != *post_plug_debug)
                {
                    dprintf (3, ("obj: %Ix reloc should be %Ix but it is %Ix",
                        next_obj, *post_plug_debug, (size_t)(node_relocation_distance (next_obj))));
                    FATAL_GC_ERROR();
                }
                if (node_left_child (next_obj) > 0)
                {
                    dprintf (3, ("obj: %Ix, vLC: %d\n", next_obj, (int)(node_left_child (next_obj))));
                    FATAL_GC_ERROR();
                }
            }
        }

        dprintf (3, ("%s verified", msg));
    }
#endif // _DEBUG && VERIFY_HEAP
}

#ifdef COLLECTIBLE_CLASS
// We don't want to burn another ptr size space for pinned plugs to record this so just
// set the card unconditionally for collectible objects if we are demoting.
inline void
gc_heap::unconditional_set_card_collectible (uint8_t* obj)
{
    if (settings.demotion)
    {
        set_card (card_of (obj));
    }
}
#endif //COLLECTIBLE_CLASS

void gc_heap::relocate_shortened_survivor_helper (uint8_t* plug, uint8_t* plug_end, mark* pinned_plug_entry)
{
    uint8_t*  x = plug;
    uint8_t* p_plug = pinned_plug (pinned_plug_entry);
    BOOL is_pinned = (plug == p_plug);
    BOOL check_short_obj_p = (is_pinned ? pinned_plug_entry->post_short_p() : pinned_plug_entry->pre_short_p());

    plug_end += sizeof (gap_reloc_pair);

    //dprintf (3, ("%s %Ix is shortened, and last object %s overwritten", (is_pinned ? "PP" : "NP"), plug, (check_short_obj_p ? "is" : "is not")));
    dprintf (3, ("%s %Ix-%Ix short, LO: %s OW", (is_pinned ? "PP" : "NP"), plug, plug_end, (check_short_obj_p ? "is" : "is not")));

    verify_pins_with_post_plug_info("begin reloc short surv");

    while (x < plug_end)
    {
        if (check_short_obj_p && ((DWORD)(plug_end - x) < (DWORD)min_pre_pin_obj_size))
        {
            dprintf (3, ("last obj %Ix is short", x));

            if (is_pinned)
            {
#ifdef COLLECTIBLE_CLASS
                if (pinned_plug_entry->post_short_collectible_p())
                    unconditional_set_card_collectible (x);
#endif //COLLECTIBLE_CLASS

                // Relocate the saved references based on bits set.
                uint8_t** saved_plug_info_start = (uint8_t**)(pinned_plug_entry->get_post_plug_info_start());
                uint8_t** saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_post_plug_reloc_info());
                for (size_t i = 0; i < pinned_plug_entry->get_max_short_bits(); i++)
                {
                    if (pinned_plug_entry->post_short_bit_p (i))
                    {
                        reloc_ref_in_shortened_obj ((saved_plug_info_start + i), (saved_info_to_relocate + i));
                    }
                }
            }
            else
            {
#ifdef COLLECTIBLE_CLASS
                if (pinned_plug_entry->pre_short_collectible_p())
                    unconditional_set_card_collectible (x);
#endif //COLLECTIBLE_CLASS

                relocate_pre_plug_info (pinned_plug_entry);

                // Relocate the saved references based on bits set.
                uint8_t** saved_plug_info_start = (uint8_t**)(p_plug - sizeof (plug_and_gap));
                uint8_t** saved_info_to_relocate = (uint8_t**)(pinned_plug_entry->get_pre_plug_reloc_info());
                for (size_t i = 0; i < pinned_plug_entry->get_max_short_bits(); i++)
                {
                    if (pinned_plug_entry->pre_short_bit_p (i))
                    {
                        reloc_ref_in_shortened_obj ((saved_plug_info_start + i), (saved_info_to_relocate + i));
                    }
                }
            }

            break;
        }

        size_t s = size (x);
        uint8_t* next_obj = x + Align (s);
        Prefetch (next_obj);

        if (next_obj >= plug_end)
        {
            dprintf (3, ("object %Ix is at the end of the plug %Ix->%Ix",
                next_obj, plug, plug_end));

            verify_pins_with_post_plug_info("before reloc short obj");

            relocate_shortened_obj_helper (x, s, (x + Align (s) - sizeof (plug_and_gap)), pinned_plug_entry, is_pinned);
        }
        else
        {
            relocate_obj_helper (x, s);
        }

        assert (s > 0);
        x = next_obj;
    }

    verify_pins_with_post_plug_info("end reloc short surv");
}

void gc_heap::relocate_survivors_in_plug (uint8_t* plug, uint8_t* plug_end,
                                          BOOL check_last_object_p,
                                          mark* pinned_plug_entry)
{
    //dprintf(3,("Relocating pointers in Plug [%Ix,%Ix[", (size_t)plug, (size_t)plug_end));
    dprintf (3,("RP: [%Ix,%Ix[", (size_t)plug, (size_t)plug_end));

    if (check_last_object_p)
    {
        relocate_shortened_survivor_helper (plug, plug_end, pinned_plug_entry);
    }
    else
    {
        relocate_survivor_helper (plug, plug_end);
    }
}

void gc_heap::relocate_survivors_in_brick (uint8_t* tree, relocate_args* args)
{
    assert ((tree != NULL));

    dprintf (3, ("tree: %Ix, args->last_plug: %Ix, left: %Ix, right: %Ix, gap(t): %Ix",
        tree, args->last_plug,
        (tree + node_left_child (tree)),
        (tree + node_right_child (tree)),
        node_gap_size (tree)));

    if (node_left_child (tree))
    {
        relocate_survivors_in_brick (tree + node_left_child (tree), args);
    }
    {
        uint8_t*  plug = tree;
        BOOL   has_post_plug_info_p = FALSE;
        BOOL   has_pre_plug_info_p = FALSE;

        if (tree == oldest_pinned_plug)
        {
            args->pinned_plug_entry = get_oldest_pinned_entry (&has_pre_plug_info_p,
                                                               &has_post_plug_info_p);
            assert (tree == pinned_plug (args->pinned_plug_entry));

            dprintf (3, ("tree is the oldest pin: %Ix", tree));
        }
        if (args->last_plug)
        {
            size_t  gap_size = node_gap_size (tree);
            uint8_t*  gap = (plug - gap_size);
            dprintf (3, ("tree: %Ix, gap: %Ix (%Ix)", tree, gap, gap_size));
            assert (gap_size >= Align (min_obj_size));
            uint8_t*  last_plug_end = gap;

            BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p);

            {
                relocate_survivors_in_plug (args->last_plug, last_plug_end, check_last_object_p, args->pinned_plug_entry);
            }
        }
        else
        {
            assert (!has_pre_plug_info_p);
        }

        args->last_plug = plug;
        args->is_shortened = has_post_plug_info_p;
        if (has_post_plug_info_p)
        {
            dprintf (3, ("setting %Ix as shortened", plug));
        }
        dprintf (3, ("last_plug: %Ix(shortened: %d)", plug, (args->is_shortened ? 1 : 0)));
    }
    if (node_right_child (tree))
    {
        relocate_survivors_in_brick (tree + node_right_child (tree), args);
    }
}

inline
void gc_heap::update_oldest_pinned_plug()
{
    oldest_pinned_plug = (pinned_plug_que_empty_p() ? 0 : pinned_plug (oldest_pin()));
}

void gc_heap::relocate_survivors (int condemned_gen_number,
                                  uint8_t* first_condemned_address)
{
    generation* condemned_gen = generation_of (condemned_gen_number);
    uint8_t*  start_address = first_condemned_address;
    size_t  current_brick = brick_of (start_address);
    heap_segment*  current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));

    PREFIX_ASSUME(current_heap_segment != NULL);

    uint8_t*  end_address = 0;

    reset_pinned_queue_bos();
    update_oldest_pinned_plug();

    end_address = heap_segment_allocated (current_heap_segment);

    size_t  end_brick = brick_of (end_address - 1);
    relocate_args args;
    args.low = gc_low;
    args.high = gc_high;
    args.is_shortened = FALSE;
    args.pinned_plug_entry = 0;
    args.last_plug = 0;
    while (1)
    {
        if (current_brick > end_brick)
        {
            if (args.last_plug)
            {
                {
                    assert (!(args.is_shortened));
                    relocate_survivors_in_plug (args.last_plug,
                                                heap_segment_allocated (current_heap_segment),
                                                args.is_shortened,
                                                args.pinned_plug_entry);
                }

                args.last_plug = 0;
            }

            if (heap_segment_next_rw (current_heap_segment))
            {
                current_heap_segment = heap_segment_next_rw (current_heap_segment);
                current_brick = brick_of (heap_segment_mem (current_heap_segment));
                end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
                continue;
            }
            else
            {
                break;
            }
        }
        {
            int brick_entry =  brick_table [ current_brick ];

            if (brick_entry >= 0)
            {
                relocate_survivors_in_brick (brick_address (current_brick) +
                                             brick_entry -1,
                                             &args);
            }
        }
        current_brick++;
    }
}

void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, walk_relocate_args* args)
{
    if (check_last_object_p)
    {
        size += sizeof (gap_reloc_pair);
        mark* entry = args->pinned_plug_entry;

        if (args->is_shortened)
        {
            assert (entry->has_post_plug_info());
            entry->swap_post_plug_and_saved_for_profiler();
        }
        else
        {
            assert (entry->has_pre_plug_info());
            entry->swap_pre_plug_and_saved_for_profiler();
        }
    }

    ptrdiff_t last_plug_relocation = node_relocation_distance (plug);
    STRESS_LOG_PLUG_MOVE(plug, (plug + size), -last_plug_relocation);
    ptrdiff_t reloc = settings.compaction ? last_plug_relocation : 0;

    (args->fn) (plug, (plug + size), reloc, args->profiling_context, !!settings.compaction, false);

    if (check_last_object_p)
    {
        mark* entry = args->pinned_plug_entry;

        if (args->is_shortened)
        {
            entry->swap_post_plug_and_saved_for_profiler();
        }
        else
        {
            entry->swap_pre_plug_and_saved_for_profiler();
        }
    }
}

void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args)
{
    assert ((tree != NULL));
    if (node_left_child (tree))
    {
        walk_relocation_in_brick (tree + node_left_child (tree), args);
    }

    uint8_t*  plug = tree;
    BOOL   has_pre_plug_info_p = FALSE;
    BOOL   has_post_plug_info_p = FALSE;

    if (tree == oldest_pinned_plug)
    {
        args->pinned_plug_entry = get_oldest_pinned_entry (&has_pre_plug_info_p,
                                                           &has_post_plug_info_p);
        assert (tree == pinned_plug (args->pinned_plug_entry));
    }

    if (args->last_plug != 0)
    {
        size_t gap_size = node_gap_size (tree);
        uint8_t*  gap = (plug - gap_size);
        uint8_t*  last_plug_end = gap;
        size_t last_plug_size = (last_plug_end - args->last_plug);
        dprintf (3, ("tree: %Ix, last_plug: %Ix, gap: %Ix(%Ix), last_plug_end: %Ix, size: %Ix",
            tree, args->last_plug, gap, gap_size, last_plug_end, last_plug_size));

        BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p);
        if (!check_last_object_p)
        {
            assert (last_plug_size >= Align (min_obj_size));
        }

        walk_plug (args->last_plug, last_plug_size, check_last_object_p, args);
    }
    else
    {
        assert (!has_pre_plug_info_p);
    }

    dprintf (3, ("set args last plug to plug: %Ix", plug));
    args->last_plug = plug;
    args->is_shortened = has_post_plug_info_p;

    if (node_right_child (tree))
    {
        walk_relocation_in_brick (tree + node_right_child (tree), args);
    }
}

void gc_heap::walk_relocation (void* profiling_context, record_surv_fn fn)
{
    generation* condemned_gen = generation_of (settings.condemned_generation);
    uint8_t*  start_address = generation_allocation_start (condemned_gen);
    size_t  current_brick = brick_of (start_address);
    heap_segment*  current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));

    PREFIX_ASSUME(current_heap_segment != NULL);

    reset_pinned_queue_bos();
    update_oldest_pinned_plug();
    size_t end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
    walk_relocate_args args;
    args.is_shortened = FALSE;
    args.pinned_plug_entry = 0;
    args.last_plug = 0;
    args.profiling_context = profiling_context;
    args.fn = fn;

    while (1)
    {
        if (current_brick > end_brick)
        {
            if (args.last_plug)
            {
                walk_plug (args.last_plug,
                           (heap_segment_allocated (current_heap_segment) - args.last_plug),
                           args.is_shortened,
                           &args);
                args.last_plug = 0;
            }
            if (heap_segment_next_rw (current_heap_segment))
            {
                current_heap_segment = heap_segment_next_rw (current_heap_segment);
                current_brick = brick_of (heap_segment_mem (current_heap_segment));
                end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
                continue;
            }
            else
            {
                break;
            }
        }
        {
            int brick_entry =  brick_table [ current_brick ];
            if (brick_entry >= 0)
            {
                walk_relocation_in_brick (brick_address (current_brick) +
                                          brick_entry - 1,
                                          &args);
            }
        }
        current_brick++;
    }
}

void gc_heap::walk_survivors (record_surv_fn fn, void* context, walk_surv_type type)
{
    if (type == walk_for_gc)
        walk_survivors_relocation (context, fn);
#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
    else if (type == walk_for_bgc)
        walk_survivors_for_bgc (context, fn);
#endif //BACKGROUND_GC && FEATURE_EVENT_TRACE
    else if (type == walk_for_loh)
        walk_survivors_for_loh (context, fn);
    else
        assert (!"unknown type!");
}

#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)
void gc_heap::walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn)
{
    // This should only be called for BGCs
    assert(settings.concurrent);

    heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));

    BOOL small_object_segments = TRUE;
    int align_const = get_alignment_constant (small_object_segments);

    while (1)
    {
        if (seg == 0)
        {
            if (small_object_segments)
            {
                //switch to large segment
                small_object_segments = FALSE;

                align_const = get_alignment_constant (small_object_segments);
                seg = heap_segment_rw (generation_start_segment (large_object_generation));

                PREFIX_ASSUME(seg != NULL);

                continue;
            }
            else
                break;
        }

        uint8_t* o = heap_segment_mem (seg);
        uint8_t* end = heap_segment_allocated (seg);

        while (o < end)
        {
            if (method_table(o) == g_gc_pFreeObjectMethodTable)
            {
                o += Align (size (o), align_const);
                continue;
            }

            // It's survived. Make a fake plug, starting at o,
            // and send the event

            uint8_t* plug_start = o;

            while (method_table(o) != g_gc_pFreeObjectMethodTable)
            {
                o += Align (size (o), align_const);
                if (o >= end)
                {
                    break;
                }
            }

            uint8_t* plug_end = o;

            fn (plug_start,
                plug_end,
                0,              // Reloc distance == 0 as this is non-compacting
                profiling_context,
                false,          // Non-compacting
                true);          // BGC
        }

        seg = heap_segment_next (seg);
    }
}
#endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE)

void gc_heap::relocate_phase (int condemned_gen_number,
                              uint8_t* first_condemned_address)
{
    ScanContext sc;
    sc.thread_number = heap_number;
    sc.promotion = FALSE;
    sc.concurrent = FALSE;


#ifdef TIME_GC
        unsigned start;
        unsigned finish;
        start = GetCycleCount32();
#endif //TIME_GC

//  %type%  category = quote (relocate);
    dprintf (2,("---- Relocate phase -----"));

#ifdef MULTIPLE_HEAPS
    //join all threads to make sure they are synchronized
    dprintf(3, ("Joining after end of plan"));
    gc_t_join.join(this, gc_join_begin_relocate_phase);
    if (gc_t_join.joined())
#endif //MULTIPLE_HEAPS

    {
#ifdef MULTIPLE_HEAPS

        //join all threads to make sure they are synchronized
        dprintf(3, ("Restarting for relocation"));
        gc_t_join.restart();
#endif //MULTIPLE_HEAPS
    }

    dprintf(3,("Relocating roots"));
    GCScan::GcScanRoots(GCHeap::Relocate,
                            condemned_gen_number, max_generation, &sc);

    verify_pins_with_post_plug_info("after reloc stack");

#ifdef BACKGROUND_GC
    if (recursive_gc_sync::background_running_p())
    {
        scan_background_roots (GCHeap::Relocate, heap_number, &sc);
    }
#endif //BACKGROUND_GC

#ifdef FEATURE_CARD_MARKING_STEALING
    // for card marking stealing, do the other relocations *before* we scan the older generations
    // this gives us a chance to make up for imbalance in these phases later
    {
        dprintf(3, ("Relocating survivors"));
        relocate_survivors(condemned_gen_number,
            first_condemned_address);
    }

#ifdef FEATURE_PREMORTEM_FINALIZATION
    dprintf(3, ("Relocating finalization data"));
    finalize_queue->RelocateFinalizationData(condemned_gen_number,
        __this);
#endif // FEATURE_PREMORTEM_FINALIZATION


    {
        dprintf(3, ("Relocating handle table"));
        GCScan::GcScanHandles(GCHeap::Relocate,
            condemned_gen_number, max_generation, &sc);
    }
#endif // FEATURE_CARD_MARKING_STEALING

    if (condemned_gen_number != max_generation)
    {
#if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING)
        if (!card_mark_done_soh)
#endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING
        {
            dprintf (3, ("Relocating cross generation pointers on heap %d", heap_number));
            mark_through_cards_for_segments(&gc_heap::relocate_address, TRUE THIS_ARG);
            verify_pins_with_post_plug_info("after reloc cards");
#if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING)
            card_mark_done_soh = true;
#endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING
        }
    }
    if (condemned_gen_number != max_generation)
    {
#if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING)
        if (!card_mark_done_loh)
#endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING
        {
            dprintf (3, ("Relocating cross generation pointers for large objects on heap %d", heap_number));
            mark_through_cards_for_large_objects(&gc_heap::relocate_address, TRUE THIS_ARG);
#if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING)
            card_mark_done_loh = true;
#endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING
        }
    }
    else
    {
#ifdef FEATURE_LOH_COMPACTION
        if (loh_compacted_p)
        {
            assert (settings.condemned_generation == max_generation);
            relocate_in_loh_compact();
        }
        else
#endif //FEATURE_LOH_COMPACTION
        {
            relocate_in_large_objects ();
        }
    }
#ifndef FEATURE_CARD_MARKING_STEALING
    // moved this code *before* we scan the older generations via mark_through_cards_xxx
    // this gives us a chance to have mark_through_cards_xxx make up for imbalance in the other relocations
    {
        dprintf(3,("Relocating survivors"));
        relocate_survivors (condemned_gen_number,
                            first_condemned_address);
    }

#ifdef FEATURE_PREMORTEM_FINALIZATION
        dprintf(3,("Relocating finalization data"));
        finalize_queue->RelocateFinalizationData (condemned_gen_number,
                                                       __this);
#endif // FEATURE_PREMORTEM_FINALIZATION


// MTHTS
    {
        dprintf(3,("Relocating handle table"));
        GCScan::GcScanHandles(GCHeap::Relocate,
                                  condemned_gen_number, max_generation, &sc);
    }
#endif // !FEATURE_CARD_MARKING_STEALING


#if defined(MULTIPLE_HEAPS) && defined(FEATURE_CARD_MARKING_STEALING)
    if (condemned_gen_number != max_generation)
    {
        // check the other heaps cyclically and try to help out where the relocation isn't done
        for (int i = 0; i < gc_heap::n_heaps; i++)
        {
            int heap_number_to_look_at = (i + heap_number) % gc_heap::n_heaps;
            gc_heap* hp = gc_heap::g_heaps[heap_number_to_look_at];
            if (!hp->card_mark_done_soh)
            {
                dprintf(3, ("Relocating cross generation pointers on heap %d", hp->heap_number));
                hp->mark_through_cards_for_segments(&gc_heap::relocate_address, TRUE THIS_ARG);
                hp->card_mark_done_soh = true;
            }

            if (!hp->card_mark_done_loh)
            {
                dprintf(3, ("Relocating cross generation pointers for large objects on heap %d", hp->heap_number));
                hp->mark_through_cards_for_large_objects(&gc_heap::relocate_address, TRUE THIS_ARG);
                hp->card_mark_done_loh = true;
            }
        }
    }
#endif // MULTIPLE_HEAPS && FEATURE_CARD_MARKING_STEALING

#ifdef MULTIPLE_HEAPS
    //join all threads to make sure they are synchronized
    dprintf(3, ("Joining after end of relocation"));
    gc_t_join.join(this, gc_join_relocate_phase_done);

#endif //MULTIPLE_HEAPS

#ifdef TIME_GC
        finish = GetCycleCount32();
        reloc_time = finish - start;
#endif //TIME_GC

    dprintf(2,( "---- End of Relocate phase ----"));
}

// This compares to see if tree is the current pinned plug and returns info
// for this pinned plug. Also advances the pinned queue if that's the case.
//
// We don't change the values of the plug info if tree is not the same as
// the current pinned plug - the caller is responsible for setting the right
// values to begin with.
//
// POPO TODO: We are keeping this temporarily as this is also used by realloc
// where it passes FALSE to deque_p, change it to use the same optimization
// as relocate. Not as essential since realloc is already a slow path.
mark* gc_heap::get_next_pinned_entry (uint8_t* tree,
                                      BOOL* has_pre_plug_info_p,
                                      BOOL* has_post_plug_info_p,
                                      BOOL deque_p)
{
    if (!pinned_plug_que_empty_p())
    {
        mark* oldest_entry = oldest_pin();
        uint8_t* oldest_plug = pinned_plug (oldest_entry);
        if (tree == oldest_plug)
        {
            *has_pre_plug_info_p =  oldest_entry->has_pre_plug_info();
            *has_post_plug_info_p = oldest_entry->has_post_plug_info();

            if (deque_p)
            {
                deque_pinned_plug();
            }

            dprintf (3, ("found a pinned plug %Ix, pre: %d, post: %d",
                tree,
                (*has_pre_plug_info_p ? 1 : 0),
                (*has_post_plug_info_p ? 1 : 0)));

            return oldest_entry;
        }
    }

    return NULL;
}

// This also deques the oldest entry and update the oldest plug
mark* gc_heap::get_oldest_pinned_entry (BOOL* has_pre_plug_info_p,
                                        BOOL* has_post_plug_info_p)
{
    mark* oldest_entry = oldest_pin();
    *has_pre_plug_info_p =  oldest_entry->has_pre_plug_info();
    *has_post_plug_info_p = oldest_entry->has_post_plug_info();

    deque_pinned_plug();
    update_oldest_pinned_plug();
    return oldest_entry;
}

inline
void gc_heap::copy_cards_range (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p)
{
    if (copy_cards_p)
        copy_cards_for_addresses (dest, src, len);
    else
        clear_card_for_addresses (dest, dest + len);
}

// POPO TODO: We should actually just recover the artificially made gaps here..because when we copy
// we always copy the earlier plugs first which means we won't need the gap sizes anymore. This way
// we won't need to individually recover each overwritten part of plugs.
inline
void  gc_heap::gcmemcopy (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p)
{
    if (dest != src)
    {
#ifdef BACKGROUND_GC
        if (current_c_gc_state == c_gc_state_marking)
        {
            //TODO: should look to see whether we should consider changing this
            // to copy a consecutive region of the mark array instead.
            copy_mark_bits_for_addresses (dest, src, len);
        }
#endif //BACKGROUND_GC
        //dprintf(3,(" Memcopy [%Ix->%Ix, %Ix->%Ix[", (size_t)src, (size_t)dest, (size_t)src+len, (size_t)dest+len));
        dprintf(3,(" mc: [%Ix->%Ix, %Ix->%Ix[", (size_t)src, (size_t)dest, (size_t)src+len, (size_t)dest+len));
        memcopy (dest - plug_skew, src - plug_skew, len);
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
        if (SoftwareWriteWatch::IsEnabledForGCHeap())
        {
            // The ranges [src - plug_kew .. src[ and [src + len - plug_skew .. src + len[ are ObjHeaders, which don't have GC
            // references, and are not relevant for write watch. The latter range actually corresponds to the ObjHeader for the
            // object at (src + len), so it can be ignored anyway.
            SoftwareWriteWatch::SetDirtyRegion(dest, len - plug_skew);
        }
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
        copy_cards_range (dest, src, len, copy_cards_p);
    }
}

void gc_heap::compact_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, compact_args* args)
{
    args->print();
    uint8_t* reloc_plug = plug + args->last_plug_relocation;

    if (check_last_object_p)
    {
        size += sizeof (gap_reloc_pair);
        mark* entry = args->pinned_plug_entry;

        if (args->is_shortened)
        {
            assert (entry->has_post_plug_info());
            entry->swap_post_plug_and_saved();
        }
        else
        {
            assert (entry->has_pre_plug_info());
            entry->swap_pre_plug_and_saved();
        }
    }

    int  old_brick_entry =  brick_table [brick_of (plug)];

    assert (node_relocation_distance (plug) == args->last_plug_relocation);

#ifdef FEATURE_STRUCTALIGN
    ptrdiff_t alignpad = node_alignpad(plug);
    if (alignpad)
    {
        make_unused_array (reloc_plug - alignpad, alignpad);
        if (brick_of (reloc_plug - alignpad) != brick_of (reloc_plug))
        {
            // The alignment padding is straddling one or more bricks;
            // it has to be the last "object" of its first brick.
            fix_brick_to_highest (reloc_plug - alignpad, reloc_plug);
        }
    }
#else // FEATURE_STRUCTALIGN
    size_t unused_arr_size = 0;
    BOOL  already_padded_p = FALSE;
#ifdef SHORT_PLUGS
    if (is_plug_padded (plug))
    {
        already_padded_p = TRUE;
        clear_plug_padded (plug);
        unused_arr_size = Align (min_obj_size);
    }
#endif //SHORT_PLUGS
    if (node_realigned (plug))
    {
        unused_arr_size += switch_alignment_size (already_padded_p);
    }

    if (unused_arr_size != 0)
    {
        make_unused_array (reloc_plug - unused_arr_size, unused_arr_size);

        if (brick_of (reloc_plug - unused_arr_size) != brick_of (reloc_plug))
        {
            dprintf (3, ("fix B for padding: %Id: %Ix->%Ix",
                unused_arr_size, (reloc_plug - unused_arr_size), reloc_plug));
            // The alignment padding is straddling one or more bricks;
            // it has to be the last "object" of its first brick.
            fix_brick_to_highest (reloc_plug - unused_arr_size, reloc_plug);
        }
    }
#endif // FEATURE_STRUCTALIGN

#ifdef SHORT_PLUGS
    if (is_plug_padded (plug))
    {
        make_unused_array (reloc_plug - Align (min_obj_size), Align (min_obj_size));

        if (brick_of (reloc_plug - Align (min_obj_size)) != brick_of (reloc_plug))
        {
            // The alignment padding is straddling one or more bricks;
            // it has to be the last "object" of its first brick.
            fix_brick_to_highest (reloc_plug - Align (min_obj_size), reloc_plug);
        }
    }
#endif //SHORT_PLUGS

    gcmemcopy (reloc_plug, plug, size, args->copy_cards_p);

    if (args->check_gennum_p)
    {
        int src_gennum = args->src_gennum;
        if (src_gennum == -1)
        {
            src_gennum = object_gennum (plug);
        }

        int dest_gennum = object_gennum_plan (reloc_plug);

        if (src_gennum < dest_gennum)
        {
            generation_allocation_size (generation_of (dest_gennum)) += size;
        }
    }

    size_t current_reloc_brick = args->current_compacted_brick;

    if (brick_of (reloc_plug) != current_reloc_brick)
    {
        dprintf (3, ("last reloc B: %Ix, current reloc B: %Ix",
            current_reloc_brick, brick_of (reloc_plug)));

        if (args->before_last_plug)
        {
            dprintf (3,(" fixing last brick %Ix to point to last plug %Ix(%Ix)",
                     current_reloc_brick,
                     args->before_last_plug,
                     (args->before_last_plug - brick_address (current_reloc_brick))));

            {
                set_brick (current_reloc_brick,
                        args->before_last_plug - brick_address (current_reloc_brick));
            }
        }
        current_reloc_brick = brick_of (reloc_plug);
    }
    size_t end_brick = brick_of (reloc_plug + size-1);
    if (end_brick != current_reloc_brick)
    {
        // The plug is straddling one or more bricks
        // It has to be the last plug of its first brick
        dprintf (3,("plug spanning multiple bricks, fixing first brick %Ix to %Ix(%Ix)",
                 current_reloc_brick, (size_t)reloc_plug,
                 (reloc_plug - brick_address (current_reloc_brick))));

        {
            set_brick (current_reloc_brick,
                    reloc_plug - brick_address (current_reloc_brick));
        }
        // update all intervening brick
        size_t brick = current_reloc_brick + 1;
        dprintf (3,("setting intervening bricks %Ix->%Ix to -1",
            brick, (end_brick - 1)));
        while (brick < end_brick)
        {
            set_brick (brick, -1);
            brick++;
        }
        // code last brick offset as a plug address
        args->before_last_plug = brick_address (end_brick) -1;
        current_reloc_brick = end_brick;
        dprintf (3, ("setting before last to %Ix, last brick to %Ix",
            args->before_last_plug, current_reloc_brick));
    }
    else
    {
        dprintf (3, ("still in the same brick: %Ix", end_brick));
        args->before_last_plug = reloc_plug;
    }
    args->current_compacted_brick = current_reloc_brick;

    if (check_last_object_p)
    {
        mark* entry = args->pinned_plug_entry;

        if (args->is_shortened)
        {
            entry->swap_post_plug_and_saved();
        }
        else
        {
            entry->swap_pre_plug_and_saved();
        }
    }
}

void gc_heap::compact_in_brick (uint8_t* tree, compact_args* args)
{
    assert (tree != NULL);
    int   left_node = node_left_child (tree);
    int   right_node = node_right_child (tree);
    ptrdiff_t relocation = node_relocation_distance (tree);

    args->print();

    if (left_node)
    {
        dprintf (3, ("B: L: %d->%Ix", left_node, (tree + left_node)));
        compact_in_brick ((tree + left_node), args);
    }

    uint8_t*  plug = tree;
    BOOL   has_pre_plug_info_p = FALSE;
    BOOL   has_post_plug_info_p = FALSE;

    if (tree == oldest_pinned_plug)
    {
        args->pinned_plug_entry = get_oldest_pinned_entry (&has_pre_plug_info_p,
                                                           &has_post_plug_info_p);
        assert (tree == pinned_plug (args->pinned_plug_entry));
    }

    if (args->last_plug != 0)
    {
        size_t gap_size = node_gap_size (tree);
        uint8_t*  gap = (plug - gap_size);
        uint8_t*  last_plug_end = gap;
        size_t last_plug_size = (last_plug_end - args->last_plug);
        dprintf (3, ("tree: %Ix, last_plug: %Ix, gap: %Ix(%Ix), last_plug_end: %Ix, size: %Ix",
            tree, args->last_plug, gap, gap_size, last_plug_end, last_plug_size));

        BOOL check_last_object_p = (args->is_shortened || has_pre_plug_info_p);
        if (!check_last_object_p)
        {
            assert (last_plug_size >= Align (min_obj_size));
        }

        compact_plug (args->last_plug, last_plug_size, check_last_object_p, args);
    }
    else
    {
        assert (!has_pre_plug_info_p);
    }

    dprintf (3, ("set args last plug to plug: %Ix, reloc: %Ix", plug, relocation));
    args->last_plug = plug;
    args->last_plug_relocation = relocation;
    args->is_shortened = has_post_plug_info_p;

    if (right_node)
    {
        dprintf (3, ("B: R: %d->%Ix", right_node, (tree + right_node)));
        compact_in_brick ((tree + right_node), args);
    }
}

void gc_heap::recover_saved_pinned_info()
{
    reset_pinned_queue_bos();

    while (!(pinned_plug_que_empty_p()))
    {
        mark* oldest_entry = oldest_pin();
        oldest_entry->recover_plug_info();
#ifdef GC_CONFIG_DRIVEN
        if (oldest_entry->has_pre_plug_info() && oldest_entry->has_post_plug_info())
            record_interesting_data_point (idp_pre_and_post_pin);
        else if (oldest_entry->has_pre_plug_info())
            record_interesting_data_point (idp_pre_pin);
        else if (oldest_entry->has_post_plug_info())
            record_interesting_data_point (idp_post_pin);
#endif //GC_CONFIG_DRIVEN

        deque_pinned_plug();
    }
}

void gc_heap::compact_phase (int condemned_gen_number,
                             uint8_t*  first_condemned_address,
                             BOOL clear_cards)
{
//  %type%  category = quote (compact);
#ifdef TIME_GC
        unsigned start;
        unsigned finish;
        start = GetCycleCount32();
#endif //TIME_GC
    generation*   condemned_gen = generation_of (condemned_gen_number);
    uint8_t*  start_address = first_condemned_address;
    size_t   current_brick = brick_of (start_address);
    heap_segment*  current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen));

    PREFIX_ASSUME(current_heap_segment != NULL);

    reset_pinned_queue_bos();
    update_oldest_pinned_plug();

    BOOL reused_seg = expand_reused_seg_p();
    if (reused_seg)
    {
        for (int i = 1; i <= max_generation; i++)
        {
            generation_allocation_size (generation_of (i)) = 0;
        }
    }

    uint8_t*  end_address = heap_segment_allocated (current_heap_segment);

    size_t  end_brick = brick_of (end_address-1);
    compact_args args;
    args.last_plug = 0;
    args.before_last_plug = 0;
    args.current_compacted_brick = ~((size_t)1);
    args.is_shortened = FALSE;
    args.pinned_plug_entry = 0;
    args.copy_cards_p =  (condemned_gen_number >= 1) || !clear_cards;
    args.check_gennum_p = reused_seg;
    if (args.check_gennum_p)
    {
        args.src_gennum = ((current_heap_segment == ephemeral_heap_segment) ? -1 : 2);
    }

    dprintf (2,("---- Compact Phase: %Ix(%Ix)----",
        first_condemned_address, brick_of (first_condemned_address)));

#ifdef MULTIPLE_HEAPS
    //restart
    if (gc_t_join.joined())
    {
#endif //MULTIPLE_HEAPS

#ifdef MULTIPLE_HEAPS
        dprintf(3, ("Restarting for compaction"));
        gc_t_join.restart();
    }
#endif //MULTIPLE_HEAPS

    reset_pinned_queue_bos();

#ifdef FEATURE_LOH_COMPACTION
    if (loh_compacted_p)
    {
        compact_loh();
    }
#endif //FEATURE_LOH_COMPACTION

    if ((start_address < end_address) ||
        (condemned_gen_number == max_generation))
    {
        while (1)
        {
            if (current_brick > end_brick)
            {
                if (args.last_plug != 0)
                {
                    dprintf (3, ("compacting last plug: %Ix", args.last_plug))
                    compact_plug (args.last_plug,
                                  (heap_segment_allocated (current_heap_segment) - args.last_plug),
                                  args.is_shortened,
                                  &args);
                }

                if (heap_segment_next_rw (current_heap_segment))
                {
                    current_heap_segment = heap_segment_next_rw (current_heap_segment);
                    current_brick = brick_of (heap_segment_mem (current_heap_segment));
                    end_brick = brick_of (heap_segment_allocated (current_heap_segment)-1);
                    args.last_plug = 0;
                    if (args.check_gennum_p)
                    {
                        args.src_gennum = ((current_heap_segment == ephemeral_heap_segment) ? -1 : 2);
                    }
                    continue;
                }
                else
                {
                    if (args.before_last_plug !=0)
                    {
                        dprintf (3, ("Fixing last brick %Ix to point to plug %Ix",
                                    args.current_compacted_brick, (size_t)args.before_last_plug));
                        assert (args.current_compacted_brick != ~1u);
                        set_brick (args.current_compacted_brick,
                                   args.before_last_plug - brick_address (args.current_compacted_brick));
                    }
                    break;
                }
            }
            {
                int  brick_entry =  brick_table [ current_brick ];
                dprintf (3, ("B: %Ix(%Ix)->%Ix",
                    current_brick, (size_t)brick_entry, (brick_address (current_brick) + brick_entry - 1)));

                if (brick_entry >= 0)
                {
                    compact_in_brick ((brick_address (current_brick) + brick_entry -1),
                                      &args);

                }
            }
            current_brick++;
        }
    }

    recover_saved_pinned_info();

#ifdef TIME_GC
    finish = GetCycleCount32();
    compact_time = finish - start;
#endif //TIME_GC

    concurrent_print_time_delta ("compact end");

    dprintf(2,("---- End of Compact phase ----"));
}

#ifdef MULTIPLE_HEAPS

#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable:4702) // C4702: unreachable code: gc_thread_function may not return
#endif //_MSC_VER
void gc_heap::gc_thread_stub (void* arg)
{
    gc_heap* heap = (gc_heap*)arg;
    if (!gc_thread_no_affinitize_p)
    {
        // We are about to set affinity for GC threads. It is a good place to set up NUMA and
        // CPU groups because the process mask, processor number, and group number are all
        // readily available.
        set_thread_affinity_for_heap (heap->heap_number, heap_select::find_proc_no_from_heap_no (heap->heap_number));
    }

    // server GC threads run at a higher priority than normal.
    GCToOSInterface::BoostThreadPriority();
    _alloca (256*heap->heap_number);
    heap->gc_thread_function();
}
#ifdef _MSC_VER
#pragma warning(pop)
#endif //_MSC_VER

#endif //MULTIPLE_HEAPS

#ifdef BACKGROUND_GC

#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable:4702) // C4702: unreachable code: gc_thread_function may not return
#endif //_MSC_VER
void gc_heap::bgc_thread_stub (void* arg)
{
    gc_heap* heap = (gc_heap*)arg;
    heap->bgc_thread = GCToEEInterface::GetThread();
    assert(heap->bgc_thread != nullptr);
    heap->bgc_thread_function();
}
#ifdef _MSC_VER
#pragma warning(pop)
#endif //_MSC_VER

#endif //BACKGROUND_GC

/*------------------ Background GC ----------------------------*/

#ifdef BACKGROUND_GC

void gc_heap::background_drain_mark_list (int thread)
{
    UNREFERENCED_PARAMETER(thread);

    size_t saved_c_mark_list_index = c_mark_list_index;

    if (saved_c_mark_list_index)
    {
        concurrent_print_time_delta ("SML");
    }
    while (c_mark_list_index != 0)
    {
        size_t current_index = c_mark_list_index - 1;
        uint8_t* o = c_mark_list [current_index];
        background_mark_object (o THREAD_NUMBER_ARG);
        c_mark_list_index--;
    }
    if (saved_c_mark_list_index)
    {

        concurrent_print_time_delta ("EML");
    }

    fire_drain_mark_list_event (saved_c_mark_list_index);
}


// The background GC version of scan_dependent_handles (see that method for a more in-depth comment).
#ifdef MULTIPLE_HEAPS
// Since we only scan dependent handles while we are stopped we'll never interfere with FGCs scanning
// them. So we can use the same static variables.
void gc_heap::background_scan_dependent_handles (ScanContext *sc)
{
    // Whenever we call this method there may have been preceding object promotions. So set
    // s_fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set
    // based on the how the scanning proceeded).
    s_fUnscannedPromotions = TRUE;

    // We don't know how many times we need to loop yet. In particular we can't base the loop condition on
    // the state of this thread's portion of the dependent handle table. That's because promotions on other
    // threads could cause handle promotions to become necessary here. Even if there are definitely no more
    // promotions possible in this thread's handles, we still have to stay in lock-step with those worker
    // threads that haven't finished yet (each GC worker thread has to join exactly the same number of times
    // as all the others or they'll get out of step).
    while (true)
    {
        // The various worker threads are all currently racing in this code. We need to work out if at least
        // one of them think they have work to do this cycle. Each thread needs to rescan its portion of the
        // dependent handle table when both of the following conditions apply:
        //  1) At least one (arbitrary) object might have been promoted since the last scan (because if this
        //     object happens to correspond to a primary in one of our handles we might potentially have to
        //     promote the associated secondary).
        //  2) The table for this thread has at least one handle with a secondary that isn't promoted yet.
        //
        // The first condition is represented by s_fUnscannedPromotions. This is always non-zero for the first
        // iteration of this loop (see comment above) and in subsequent cycles each thread updates this
        // whenever a mark stack overflow occurs or scanning their dependent handles results in a secondary
        // being promoted. This value is cleared back to zero in a synchronized fashion in the join that
        // follows below. Note that we can't read this outside of the join since on any iteration apart from
        // the first threads will be racing between reading this value and completing their previous
        // iteration's table scan.
        //
        // The second condition is tracked by the dependent handle code itself on a per worker thread basis
        // (and updated by the GcDhReScan() method). We call GcDhUnpromotedHandlesExist() on each thread to
        // determine the local value and collect the results into the s_fUnpromotedHandles variable in what is
        // effectively an OR operation. As per s_fUnscannedPromotions we can't read the final result until
        // we're safely joined.
        if (GCScan::GcDhUnpromotedHandlesExist(sc))
            s_fUnpromotedHandles = TRUE;

        // Synchronize all the threads so we can read our state variables safely. The following shared
        // variable (indicating whether we should scan the tables or terminate the loop) will be set by a
        // single thread inside the join.
        bgc_t_join.join(this, gc_join_scan_dependent_handles);
        if (bgc_t_join.joined())
        {
            // We're synchronized so it's safe to read our shared state variables. We update another shared
            // variable to indicate to all threads whether we'll be scanning for another cycle or terminating
            // the loop. We scan if there has been at least one object promotion since last time and at least
            // one thread has a dependent handle table with a potential handle promotion possible.
            s_fScanRequired = s_fUnscannedPromotions && s_fUnpromotedHandles;

            // Reset our shared state variables (ready to be set again on this scan or with a good initial
            // value for the next call if we're terminating the loop).
            s_fUnscannedPromotions = FALSE;
            s_fUnpromotedHandles = FALSE;

            if (!s_fScanRequired)
            {
                uint8_t* all_heaps_max = 0;
                uint8_t* all_heaps_min = MAX_PTR;
                int i;
                for (i = 0; i < n_heaps; i++)
                {
                    if (all_heaps_max < g_heaps[i]->background_max_overflow_address)
                        all_heaps_max = g_heaps[i]->background_max_overflow_address;
                    if (all_heaps_min > g_heaps[i]->background_min_overflow_address)
                        all_heaps_min = g_heaps[i]->background_min_overflow_address;
                }
                for (i = 0; i < n_heaps; i++)
                {
                    g_heaps[i]->background_max_overflow_address = all_heaps_max;
                    g_heaps[i]->background_min_overflow_address = all_heaps_min;
                }
            }

            // Restart all the workers.
            dprintf(2, ("Starting all gc thread mark stack overflow processing"));
            bgc_t_join.restart();
        }

        // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions
        // being visible. If there really was an overflow (process_mark_overflow returns true) then set the
        // global flag indicating that at least one object promotion may have occurred (the usual comment
        // about races applies). (Note it's OK to set this flag even if we're about to terminate the loop and
        // exit the method since we unconditionally set this variable on method entry anyway).
        if (background_process_mark_overflow (sc->concurrent))
            s_fUnscannedPromotions = TRUE;

        // If we decided that no scan was required we can terminate the loop now.
        if (!s_fScanRequired)
            break;

        // Otherwise we must join with the other workers to ensure that all mark stack overflows have been
        // processed before we start scanning dependent handle tables (if overflows remain while we scan we
        // could miss noting the promotion of some primary objects).
        bgc_t_join.join(this, gc_join_rescan_dependent_handles);
        if (bgc_t_join.joined())
        {
            // Restart all the workers.
            dprintf(3, ("Starting all gc thread for dependent handle promotion"));
            bgc_t_join.restart();
        }

        // If the portion of the dependent handle table managed by this worker has handles that could still be
        // promoted perform a rescan. If the rescan resulted in at least one promotion note this fact since it
        // could require a rescan of handles on this or other workers.
        if (GCScan::GcDhUnpromotedHandlesExist(sc))
            if (GCScan::GcDhReScan(sc))
                s_fUnscannedPromotions = TRUE;
    }
}
#else
void gc_heap::background_scan_dependent_handles (ScanContext *sc)
{
    // Whenever we call this method there may have been preceding object promotions. So set
    // fUnscannedPromotions unconditionally (during further iterations of the scanning loop this will be set
    // based on the how the scanning proceeded).
    bool fUnscannedPromotions = true;

    // Scan dependent handles repeatedly until there are no further promotions that can be made or we made a
    // scan without performing any new promotions.
    while (GCScan::GcDhUnpromotedHandlesExist(sc) && fUnscannedPromotions)
    {
        // On each iteration of the loop start with the assumption that no further objects have been promoted.
        fUnscannedPromotions = false;

        // Handle any mark stack overflow: scanning dependent handles relies on all previous object promotions
        // being visible. If there was an overflow (background_process_mark_overflow returned true) then
        // additional objects now appear to be promoted and we should set the flag.
        if (background_process_mark_overflow (sc->concurrent))
            fUnscannedPromotions = true;

        // Perform the scan and set the flag if any promotions resulted.
        if (GCScan::GcDhReScan (sc))
            fUnscannedPromotions = true;
    }

    // Perform a last processing of any overflowed mark stack.
    background_process_mark_overflow (sc->concurrent);
}
#endif //MULTIPLE_HEAPS

void gc_heap::recover_bgc_settings()
{
    if ((settings.condemned_generation < max_generation) && recursive_gc_sync::background_running_p())
    {
        dprintf (2, ("restoring bgc settings"));
        settings = saved_bgc_settings;
        GCHeap::GcCondemnedGeneration = gc_heap::settings.condemned_generation;
    }
}

void gc_heap::allow_fgc()
{
    assert (bgc_thread == GCToEEInterface::GetThread());
    bool bToggleGC = false;

    if (g_fSuspensionPending > 0)
    {
        bToggleGC = GCToEEInterface::EnablePreemptiveGC();
        if (bToggleGC)
        {
            GCToEEInterface::DisablePreemptiveGC();
        }
    }
}

BOOL gc_heap::should_commit_mark_array()
{
    return (recursive_gc_sync::background_running_p() || (current_bgc_state == bgc_initialized));
}

void gc_heap::clear_commit_flag()
{
    generation* gen = generation_of (max_generation);
    heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
    while (1)
    {
        if (seg == 0)
        {
            if (gen != large_object_generation)
            {
                gen = large_object_generation;
                seg = heap_segment_in_range (generation_start_segment (gen));
            }
            else
            {
                break;
            }
        }

        if (seg->flags & heap_segment_flags_ma_committed)
        {
            seg->flags &= ~heap_segment_flags_ma_committed;
        }

        if (seg->flags & heap_segment_flags_ma_pcommitted)
        {
            seg->flags &= ~heap_segment_flags_ma_pcommitted;
        }

        seg = heap_segment_next (seg);
    }
}

void gc_heap::clear_commit_flag_global()
{
#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < n_heaps; i++)
    {
        g_heaps[i]->clear_commit_flag();
    }
#else
    clear_commit_flag();
#endif //MULTIPLE_HEAPS
}

void gc_heap::verify_mark_array_cleared (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr)
{
#ifdef _DEBUG
    size_t  markw = mark_word_of (begin);
    size_t  markw_end = mark_word_of (end);

    while (markw < markw_end)
    {
        if (mark_array_addr[markw])
        {
            dprintf  (1, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
                            markw, mark_array_addr[markw], mark_word_address (markw)));
            FATAL_GC_ERROR();
        }
        markw++;
    }
#else // _DEBUG
    UNREFERENCED_PARAMETER(begin);
    UNREFERENCED_PARAMETER(end);
    UNREFERENCED_PARAMETER(mark_array_addr);
#endif //_DEBUG
}

void gc_heap::verify_mark_array_cleared (heap_segment* seg, uint32_t* mark_array_addr)
{
    verify_mark_array_cleared (heap_segment_mem (seg), heap_segment_reserved (seg), mark_array_addr);
}

BOOL gc_heap::commit_mark_array_new_seg (gc_heap* hp,
                                         heap_segment* seg,
                                         uint32_t* new_card_table,
                                         uint8_t* new_lowest_address)
{
    UNREFERENCED_PARAMETER(hp); // compiler bug? -- this *is*, indeed, referenced

    uint8_t* start = (heap_segment_read_only_p(seg) ? heap_segment_mem(seg) : (uint8_t*)seg);
    uint8_t* end = heap_segment_reserved (seg);

    uint8_t* lowest = hp->background_saved_lowest_address;
    uint8_t* highest = hp->background_saved_highest_address;

    uint8_t* commit_start = NULL;
    uint8_t* commit_end = NULL;
    size_t commit_flag = 0;

    if ((highest >= start) &&
        (lowest <= end))
    {
        if ((start >= lowest) && (end <= highest))
        {
            dprintf (GC_TABLE_LOG, ("completely in bgc range: seg %Ix-%Ix, bgc: %Ix-%Ix",
                                    start, end, lowest, highest));
            commit_flag = heap_segment_flags_ma_committed;
        }
        else
        {
            dprintf (GC_TABLE_LOG, ("partially in bgc range: seg %Ix-%Ix, bgc: %Ix-%Ix",
                                    start, end, lowest, highest));
            commit_flag = heap_segment_flags_ma_pcommitted;
        }

        commit_start = max (lowest, start);
        commit_end = min (highest, end);

        if (!commit_mark_array_by_range (commit_start, commit_end, hp->mark_array))
        {
            return FALSE;
        }

        if (new_card_table == 0)
        {
            new_card_table = g_gc_card_table;
        }

        if (hp->card_table != new_card_table)
        {
            if (new_lowest_address == 0)
            {
                new_lowest_address = g_gc_lowest_address;
            }

            uint32_t* ct = &new_card_table[card_word (gcard_of (new_lowest_address))];
            uint32_t* ma = (uint32_t*)((uint8_t*)card_table_mark_array (ct) - size_mark_array_of (0, new_lowest_address));

            dprintf (GC_TABLE_LOG, ("table realloc-ed: %Ix->%Ix, MA: %Ix->%Ix",
                                    hp->card_table, new_card_table,
                                    hp->mark_array, ma));

            if (!commit_mark_array_by_range (commit_start, commit_end, ma))
            {
                return FALSE;
            }
        }

        seg->flags |= commit_flag;
    }

    return TRUE;
}

BOOL gc_heap::commit_mark_array_by_range (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr)
{
    size_t beg_word = mark_word_of (begin);
    size_t end_word = mark_word_of (align_on_mark_word (end));
    uint8_t* commit_start = align_lower_page ((uint8_t*)&mark_array_addr[beg_word]);
    uint8_t* commit_end = align_on_page ((uint8_t*)&mark_array_addr[end_word]);
    size_t size = (size_t)(commit_end - commit_start);

#ifdef SIMPLE_DPRINTF
    dprintf (GC_TABLE_LOG, ("range: %Ix->%Ix mark word: %Ix->%Ix(%Id), mark array: %Ix->%Ix(%Id), commit %Ix->%Ix(%Id)",
                            begin, end,
                            beg_word, end_word,
                            (end_word - beg_word) * sizeof (uint32_t),
                            &mark_array_addr[beg_word],
                            &mark_array_addr[end_word],
                            (size_t)(&mark_array_addr[end_word] - &mark_array_addr[beg_word]),
                            commit_start, commit_end,
                            size));
#endif //SIMPLE_DPRINTF

    if (virtual_commit (commit_start, size))
    {
        // We can only verify the mark array is cleared from begin to end, the first and the last
        // page aren't necessarily all cleared 'cause they could be used by other segments or
        // card bundle.
        verify_mark_array_cleared (begin, end, mark_array_addr);
        return TRUE;
    }
    else
    {
        dprintf (GC_TABLE_LOG, ("failed to commit %Id bytes", (end_word - beg_word) * sizeof (uint32_t)));
        return FALSE;
    }
}

BOOL gc_heap::commit_mark_array_with_check (heap_segment* seg, uint32_t* new_mark_array_addr)
{
    uint8_t* start = (heap_segment_read_only_p(seg) ? heap_segment_mem(seg) : (uint8_t*)seg);
    uint8_t* end = heap_segment_reserved (seg);

#ifdef MULTIPLE_HEAPS
    uint8_t* lowest = heap_segment_heap (seg)->background_saved_lowest_address;
    uint8_t* highest = heap_segment_heap (seg)->background_saved_highest_address;
#else
    uint8_t* lowest = background_saved_lowest_address;
    uint8_t* highest = background_saved_highest_address;
#endif //MULTIPLE_HEAPS

    if ((highest >= start) &&
        (lowest <= end))
    {
        start = max (lowest, start);
        end = min (highest, end);
        if (!commit_mark_array_by_range (start, end, new_mark_array_addr))
        {
            return FALSE;
        }
    }

    return TRUE;
}

BOOL gc_heap::commit_mark_array_by_seg (heap_segment* seg, uint32_t* mark_array_addr)
{
    dprintf (GC_TABLE_LOG, ("seg: %Ix->%Ix; MA: %Ix",
        seg,
        heap_segment_reserved (seg),
        mark_array_addr));
    uint8_t* start = (heap_segment_read_only_p (seg) ? heap_segment_mem (seg) : (uint8_t*)seg);

    return commit_mark_array_by_range (start, heap_segment_reserved (seg), mark_array_addr);
}

BOOL gc_heap::commit_mark_array_bgc_init (uint32_t* mark_array_addr)
{
    UNREFERENCED_PARAMETER(mark_array_addr);

    dprintf (GC_TABLE_LOG, ("BGC init commit: lowest: %Ix, highest: %Ix, mark_array: %Ix",
                            lowest_address, highest_address, mark_array));

    generation* gen = generation_of (max_generation);
    heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
    while (1)
    {
        if (seg == 0)
        {
            if (gen != large_object_generation)
            {
                gen = large_object_generation;
                seg = heap_segment_in_range (generation_start_segment (gen));
            }
            else
            {
                break;
            }
        }

        dprintf (GC_TABLE_LOG, ("seg: %Ix, flags: %Id", seg, seg->flags));

        if (!(seg->flags & heap_segment_flags_ma_committed))
        {
            // For ro segments they could always be only partially in range so we'd
            // be calling this at the beginning of every BGC. We are not making this
            // more efficient right now - ro segments are currently only used by redhawk.
            if (heap_segment_read_only_p (seg))
            {
                if ((heap_segment_mem (seg) >= lowest_address) &&
                    (heap_segment_reserved (seg) <= highest_address))
                {
                    if (commit_mark_array_by_seg (seg, mark_array))
                    {
                        seg->flags |= heap_segment_flags_ma_committed;
                    }
                    else
                    {
                        return FALSE;
                    }
                }
                else
                {
                    uint8_t* start = max (lowest_address, heap_segment_mem (seg));
                    uint8_t* end = min (highest_address, heap_segment_reserved (seg));
                    if (commit_mark_array_by_range (start, end, mark_array))
                    {
                        seg->flags |= heap_segment_flags_ma_pcommitted;
                    }
                    else
                    {
                        return FALSE;
                    }
                }
            }
            else
            {
                // For normal segments they are by design completely in range so just
                // commit the whole mark array for each seg.
                if (commit_mark_array_by_seg (seg, mark_array))
                {
                    if (seg->flags & heap_segment_flags_ma_pcommitted)
                    {
                        seg->flags &= ~heap_segment_flags_ma_pcommitted;
                    }
                    seg->flags |= heap_segment_flags_ma_committed;
                }
                else
                {
                    return FALSE;
                }
            }
        }

        seg = heap_segment_next (seg);
    }

    return TRUE;
}

// This function doesn't check the commit flag since it's for a new array -
// the mark_array flag for these segments will remain the same.
BOOL gc_heap::commit_new_mark_array (uint32_t* new_mark_array_addr)
{
    dprintf (GC_TABLE_LOG, ("committing existing segs on MA %Ix", new_mark_array_addr));
    generation* gen = generation_of (max_generation);
    heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));
    while (1)
    {
        if (seg == 0)
        {
            if (gen != large_object_generation)
            {
                gen = large_object_generation;
                seg = heap_segment_in_range (generation_start_segment (gen));
            }
            else
            {
                break;
            }
        }

        if (!commit_mark_array_with_check (seg, new_mark_array_addr))
        {
            return FALSE;
        }

        seg = heap_segment_next (seg);
    }

#ifdef MULTIPLE_HEAPS
    if (new_heap_segment)
    {
        if (!commit_mark_array_with_check (new_heap_segment, new_mark_array_addr))
        {
            return FALSE;
        }
    }
#endif //MULTIPLE_HEAPS

    return TRUE;
}

BOOL gc_heap::commit_new_mark_array_global (uint32_t* new_mark_array)
{
#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < n_heaps; i++)
    {
        if (!g_heaps[i]->commit_new_mark_array (new_mark_array))
        {
            return FALSE;
        }
    }
#else
    if (!commit_new_mark_array (new_mark_array))
    {
        return FALSE;
    }
#endif //MULTIPLE_HEAPS

    return TRUE;
}

void gc_heap::decommit_mark_array_by_seg (heap_segment* seg)
{
    // if BGC is disabled (the finalize watchdog does this at shutdown), the mark array could have
    // been set to NULL.
    if (mark_array == NULL)
    {
        return;
    }

    dprintf (GC_TABLE_LOG, ("decommitting seg %Ix(%Ix), MA: %Ix", seg, seg->flags, mark_array));

    size_t flags = seg->flags;

    if ((flags & heap_segment_flags_ma_committed) ||
        (flags & heap_segment_flags_ma_pcommitted))
    {
        uint8_t* start = (heap_segment_read_only_p(seg) ? heap_segment_mem(seg) : (uint8_t*)seg);
        uint8_t* end = heap_segment_reserved (seg);

        if (flags & heap_segment_flags_ma_pcommitted)
        {
            start = max (lowest_address, start);
            end = min (highest_address, end);
        }

        size_t beg_word = mark_word_of (start);
        size_t end_word = mark_word_of (align_on_mark_word (end));
        uint8_t* decommit_start = align_on_page ((uint8_t*)&mark_array[beg_word]);
        uint8_t* decommit_end = align_lower_page ((uint8_t*)&mark_array[end_word]);
        size_t size = (size_t)(decommit_end - decommit_start);

#ifdef SIMPLE_DPRINTF
        dprintf (GC_TABLE_LOG, ("seg: %Ix mark word: %Ix->%Ix(%Id), mark array: %Ix->%Ix(%Id), decommit %Ix->%Ix(%Id)",
                                seg,
                                beg_word, end_word,
                                (end_word - beg_word) * sizeof (uint32_t),
                                &mark_array[beg_word],
                                &mark_array[end_word],
                                (size_t)(&mark_array[end_word] - &mark_array[beg_word]),
                                decommit_start, decommit_end,
                                size));
#endif //SIMPLE_DPRINTF

        if (decommit_start < decommit_end)
        {
            if (!virtual_decommit (decommit_start, size))
            {
                dprintf (GC_TABLE_LOG, ("decommit on %Ix for %Id bytes failed",
                                        decommit_start, size));
                assert (!"decommit failed");
            }
        }

        dprintf (GC_TABLE_LOG, ("decommited [%Ix for address [%Ix", beg_word, seg));
    }
}

void gc_heap::background_mark_phase ()
{
    verify_mark_array_cleared();

    ScanContext sc;
    sc.thread_number = heap_number;
    sc.promotion = TRUE;
    sc.concurrent = FALSE;

    THREAD_FROM_HEAP;
    BOOL cooperative_mode = TRUE;
#ifndef MULTIPLE_HEAPS
    const int thread = heap_number;
#endif //!MULTIPLE_HEAPS

    dprintf(2,("-(GC%d)BMark-", VolatileLoad(&settings.gc_index)));

    assert (settings.concurrent);

#ifdef TIME_GC
    unsigned start;
    unsigned finish;
    start = GetCycleCount32();
#endif //TIME_GC

    if (gen0_must_clear_bricks > 0)
        gen0_must_clear_bricks--;

    background_soh_alloc_count = 0;
    background_loh_alloc_count = 0;
    bgc_overflow_count = 0;

    bpromoted_bytes (heap_number) = 0;
    static uint32_t num_sizedrefs = 0;

    background_min_overflow_address = MAX_PTR;
    background_max_overflow_address = 0;
    background_min_soh_overflow_address = MAX_PTR;
    background_max_soh_overflow_address = 0;
    processed_soh_overflow_p = FALSE;

    {
        //set up the mark lists from g_mark_list
        assert (g_mark_list);
        mark_list = g_mark_list;
        //dont use the mark list for full gc
        //because multiple segments are more complex to handle and the list
        //is likely to overflow
        mark_list_end = &mark_list [0];
        mark_list_index = &mark_list [0];

        c_mark_list_index = 0;

#ifndef MULTIPLE_HEAPS
        shigh = (uint8_t*) 0;
        slow  = MAX_PTR;
#endif //MULTIPLE_HEAPS

        generation*   gen = generation_of (max_generation);

        dprintf(3,("BGC: stack marking"));
        sc.concurrent = TRUE;

        GCScan::GcScanRoots(background_promote_callback,
                                max_generation, max_generation,
                                &sc);
    }

    {
        dprintf(3,("BGC: finalization marking"));
        finalize_queue->GcScanRoots(background_promote_callback, heap_number, 0);
    }

    size_t total_loh_size = generation_size (max_generation + 1);
    bgc_begin_loh_size = total_loh_size;
    bgc_alloc_spin_loh = 0;
    bgc_loh_size_increased = 0;
    bgc_loh_allocated_in_free = 0;
    size_t total_soh_size = generation_sizes (generation_of (max_generation));

    dprintf (GTC_LOG, ("BM: h%d: loh: %Id, soh: %Id", heap_number, total_loh_size, total_soh_size));

    {
        //concurrent_print_time_delta ("copying stack roots");
        concurrent_print_time_delta ("CS");

        FIRE_EVENT(BGC1stNonConEnd);

        expanded_in_fgc = FALSE;
        saved_overflow_ephemeral_seg = 0;
        current_bgc_state = bgc_reset_ww;

        // we don't need a join here - just whichever thread that gets here
        // first can change the states and call restart_vm.
        // this is not true - we can't let the EE run when we are scanning stack.
        // since we now allow reset ww to run concurrently and have a join for it,
        // we can do restart ee on the 1st thread that got here. Make sure we handle the
        // sizedref handles correctly.
#ifdef MULTIPLE_HEAPS
        bgc_t_join.join(this, gc_join_restart_ee);
        if (bgc_t_join.joined())
#endif //MULTIPLE_HEAPS
        {
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
            // Resetting write watch for software write watch is pretty fast, much faster than for hardware write watch. Reset
            // can be done while the runtime is suspended or after the runtime is restarted, the preference was to reset while
            // the runtime is suspended. The reset for hardware write watch is done after the runtime is restarted below.
#ifdef WRITE_WATCH
            concurrent_print_time_delta ("CRWW begin");

#ifdef MULTIPLE_HEAPS
            for (int i = 0; i < n_heaps; i++)
            {
                g_heaps[i]->reset_write_watch (FALSE);
            }
#else
            reset_write_watch (FALSE);
#endif //MULTIPLE_HEAPS

            concurrent_print_time_delta ("CRWW");
#endif //WRITE_WATCH
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP

            num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();

            // this c_write is not really necessary because restart_vm
            // has an instruction that will flush the cpu cache (interlocked
            // or whatever) but we don't want to rely on that.
            dprintf (GTC_LOG, ("setting cm_in_progress"));
            c_write (cm_in_progress, TRUE);

            //restart all thread, doing the marking from the array
            assert (dont_restart_ee_p);
            dont_restart_ee_p = FALSE;

            restart_vm();
            GCToOSInterface::YieldThread (0);
#ifdef MULTIPLE_HEAPS
            dprintf(3, ("Starting all gc threads for gc"));
            bgc_t_join.restart();
#endif //MULTIPLE_HEAPS
        }

#ifdef MULTIPLE_HEAPS
        bgc_t_join.join(this, gc_join_after_reset);
        if (bgc_t_join.joined())
#endif //MULTIPLE_HEAPS
        {
            disable_preemptive (true);

#ifndef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
            // When software write watch is enabled, resetting write watch is done while the runtime is suspended above. The
            // post-reset call to revisit_written_pages is only necessary for concurrent reset_write_watch, to discard dirtied
            // pages during the concurrent reset.

#ifdef WRITE_WATCH
            concurrent_print_time_delta ("CRWW begin");

#ifdef MULTIPLE_HEAPS
            for (int i = 0; i < n_heaps; i++)
            {
                g_heaps[i]->reset_write_watch (TRUE);
            }
#else
            reset_write_watch (TRUE);
#endif //MULTIPLE_HEAPS

            concurrent_print_time_delta ("CRWW");
#endif //WRITE_WATCH

#ifdef MULTIPLE_HEAPS
            for (int i = 0; i < n_heaps; i++)
            {
                g_heaps[i]->revisit_written_pages (TRUE, TRUE);
            }
#else
            revisit_written_pages (TRUE, TRUE);
#endif //MULTIPLE_HEAPS

            concurrent_print_time_delta ("CRW");
#endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP

#ifdef MULTIPLE_HEAPS
            for (int i = 0; i < n_heaps; i++)
            {
                g_heaps[i]->current_bgc_state = bgc_mark_handles;
            }
#else
            current_bgc_state = bgc_mark_handles;
#endif //MULTIPLE_HEAPS

            current_c_gc_state = c_gc_state_marking;

            enable_preemptive ();

#ifdef MULTIPLE_HEAPS
            dprintf(3, ("Joining BGC threads after resetting writewatch"));
            bgc_t_join.restart();
#endif //MULTIPLE_HEAPS
        }

        disable_preemptive (true);

        if (num_sizedrefs > 0)
        {
            GCScan::GcScanSizedRefs(background_promote, max_generation, max_generation, &sc);

            enable_preemptive ();

#ifdef MULTIPLE_HEAPS
            bgc_t_join.join(this, gc_join_scan_sizedref_done);
            if (bgc_t_join.joined())
            {
                dprintf(3, ("Done with marking all sized refs. Starting all bgc thread for marking other strong roots"));
                bgc_t_join.restart();
            }
#endif //MULTIPLE_HEAPS

            disable_preemptive (true);
        }

        dprintf (3,("BGC: handle table marking"));
        GCScan::GcScanHandles(background_promote,
                                  max_generation, max_generation,
                                  &sc);
        //concurrent_print_time_delta ("concurrent marking handle table");
        concurrent_print_time_delta ("CRH");

        current_bgc_state = bgc_mark_stack;
        dprintf (2,("concurrent draining mark list"));
        background_drain_mark_list (thread);
        //concurrent_print_time_delta ("concurrent marking stack roots");
        concurrent_print_time_delta ("CRS");

        dprintf (2,("concurrent revisiting dirtied pages"));
        revisit_written_pages (TRUE);
        revisit_written_pages (TRUE);
        //concurrent_print_time_delta ("concurrent marking dirtied pages on LOH");
        concurrent_print_time_delta ("CRre");

        enable_preemptive ();

#ifdef MULTIPLE_HEAPS
        bgc_t_join.join(this, gc_join_concurrent_overflow);
        if (bgc_t_join.joined())
        {
            uint8_t* all_heaps_max = 0;
            uint8_t* all_heaps_min = MAX_PTR;
            int i;
            for (i = 0; i < n_heaps; i++)
            {
                dprintf (3, ("heap %d overflow max is %Ix, min is %Ix",
                    i,
                    g_heaps[i]->background_max_overflow_address,
                    g_heaps[i]->background_min_overflow_address));
                if (all_heaps_max < g_heaps[i]->background_max_overflow_address)
                    all_heaps_max = g_heaps[i]->background_max_overflow_address;
                if (all_heaps_min > g_heaps[i]->background_min_overflow_address)
                    all_heaps_min = g_heaps[i]->background_min_overflow_address;
            }
            for (i = 0; i < n_heaps; i++)
            {
                g_heaps[i]->background_max_overflow_address = all_heaps_max;
                g_heaps[i]->background_min_overflow_address = all_heaps_min;
            }
            dprintf(3, ("Starting all bgc threads after updating the overflow info"));
            bgc_t_join.restart();
        }
#endif //MULTIPLE_HEAPS

        disable_preemptive (true);

        dprintf (2, ("before CRov count: %d", bgc_overflow_count));
        bgc_overflow_count = 0;
        background_process_mark_overflow (TRUE);
        dprintf (2, ("after CRov count: %d", bgc_overflow_count));
        bgc_overflow_count = 0;
        //concurrent_print_time_delta ("concurrent processing mark overflow");
        concurrent_print_time_delta ("CRov");

        // Stop all threads, crawl all stacks and revisit changed pages.
        FIRE_EVENT(BGC1stConEnd);

        dprintf (2, ("Stopping the EE"));

        enable_preemptive ();

#ifdef MULTIPLE_HEAPS
        bgc_t_join.join(this, gc_join_suspend_ee);
        if (bgc_t_join.joined())
        {
            bgc_threads_sync_event.Reset();

            dprintf(3, ("Joining BGC threads for non concurrent final marking"));
            bgc_t_join.restart();
        }
#endif //MULTIPLE_HEAPS

        if (heap_number == 0)
        {
            enter_spin_lock (&gc_lock);

            bgc_suspend_EE ();
            //suspend_EE ();
            bgc_threads_sync_event.Set();
        }
        else
        {
            bgc_threads_sync_event.Wait(INFINITE, FALSE);
            dprintf (2, ("bgc_threads_sync_event is signalled"));
        }

        assert (settings.concurrent);
        assert (settings.condemned_generation == max_generation);

        dprintf (2, ("clearing cm_in_progress"));
        c_write (cm_in_progress, FALSE);

        bgc_alloc_lock->check();

        current_bgc_state = bgc_final_marking;

        //concurrent_print_time_delta ("concurrent marking ended");
        concurrent_print_time_delta ("CR");

        FIRE_EVENT(BGC2ndNonConBegin);

        mark_absorb_new_alloc();

        // We need a join here 'cause find_object would complain if the gen0
        // bricks of another heap haven't been fixed up. So we need to make sure
        // that every heap's gen0 bricks are fixed up before we proceed.
#ifdef MULTIPLE_HEAPS
        bgc_t_join.join(this, gc_join_after_absorb);
        if (bgc_t_join.joined())
        {
#ifdef BGC_SERVO_TUNING
            bgc_tuning::record_bgc_sweep_start();
#endif //BGC_SERVO_TUNING

            dprintf(3, ("Joining BGC threads after absorb"));
            bgc_t_join.restart();
        }
#endif //MULTIPLE_HEAPS

        // give VM a chance to do work
        GCToEEInterface::GcBeforeBGCSweepWork();

        //reset the flag, indicating that the EE no longer expect concurrent
        //marking
        sc.concurrent = FALSE;

        total_loh_size = generation_size (max_generation + 1);
        total_soh_size = generation_sizes (generation_of (max_generation));

        dprintf (GTC_LOG, ("FM: h%d: loh: %Id, soh: %Id", heap_number, total_loh_size, total_soh_size));

        dprintf (2, ("nonconcurrent marking stack roots"));
        GCScan::GcScanRoots(background_promote,
                                max_generation, max_generation,
                                &sc);
        //concurrent_print_time_delta ("nonconcurrent marking stack roots");
        concurrent_print_time_delta ("NRS");

//        finalize_queue->EnterFinalizeLock();
        finalize_queue->GcScanRoots(background_promote, heap_number, 0);
//        finalize_queue->LeaveFinalizeLock();

        dprintf (2, ("nonconcurrent marking handle table"));
        GCScan::GcScanHandles(background_promote,
                                  max_generation, max_generation,
                                  &sc);
        //concurrent_print_time_delta ("nonconcurrent marking handle table");
        concurrent_print_time_delta ("NRH");

        dprintf (2,("---- (GC%d)final going through written pages ----", VolatileLoad(&settings.gc_index)));
        revisit_written_pages (FALSE);
        //concurrent_print_time_delta ("nonconcurrent revisit dirtied pages on LOH");
        concurrent_print_time_delta ("NRre LOH");

#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
#ifdef MULTIPLE_HEAPS
        bgc_t_join.join(this, gc_join_disable_software_write_watch);
        if (bgc_t_join.joined())
#endif // MULTIPLE_HEAPS
        {
            // The runtime is suspended, and we will be doing a final query of dirty pages, so pause tracking written pages to
            // avoid further perf penalty after the runtime is restarted
            SoftwareWriteWatch::DisableForGCHeap();

#ifdef MULTIPLE_HEAPS
            dprintf(3, ("Restarting BGC threads after disabling software write watch"));
            bgc_t_join.restart();
#endif // MULTIPLE_HEAPS
        }
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP

        dprintf (2, ("before NR 1st Hov count: %d", bgc_overflow_count));
        bgc_overflow_count = 0;

        // Dependent handles need to be scanned with a special algorithm (see the header comment on
        // scan_dependent_handles for more detail). We perform an initial scan without processing any mark
        // stack overflow. This is not guaranteed to complete the operation but in a common case (where there
        // are no dependent handles that are due to be collected) it allows us to optimize away further scans.
        // The call to background_scan_dependent_handles is what will cycle through more iterations if
        // required and will also perform processing of any mark stack overflow once the dependent handle
        // table has been fully promoted.
        dprintf (2, ("1st dependent handle scan and process mark overflow"));
        GCScan::GcDhInitialScan(background_promote, max_generation, max_generation, &sc);
        background_scan_dependent_handles (&sc);
        //concurrent_print_time_delta ("1st nonconcurrent dependent handle scan and process mark overflow");
        concurrent_print_time_delta ("NR 1st Hov");

        dprintf (2, ("after NR 1st Hov count: %d", bgc_overflow_count));
        bgc_overflow_count = 0;

#ifdef MULTIPLE_HEAPS
        bgc_t_join.join(this, gc_join_null_dead_short_weak);
        if (bgc_t_join.joined())
#endif //MULTIPLE_HEAPS
        {
            GCToEEInterface::AfterGcScanRoots (max_generation, max_generation, &sc);

#ifdef MULTIPLE_HEAPS
            dprintf(3, ("Joining BGC threads for short weak handle scan"));
            bgc_t_join.restart();
#endif //MULTIPLE_HEAPS
        }

        // null out the target of short weakref that were not promoted.
        GCScan::GcShortWeakPtrScan(background_promote, max_generation, max_generation,&sc);

        //concurrent_print_time_delta ("bgc GcShortWeakPtrScan");
        concurrent_print_time_delta ("NR GcShortWeakPtrScan");
    }

    {
#ifdef MULTIPLE_HEAPS
        bgc_t_join.join(this, gc_join_scan_finalization);
        if (bgc_t_join.joined())
        {
            dprintf(3, ("Joining BGC threads for finalization"));
            bgc_t_join.restart();
        }
#endif //MULTIPLE_HEAPS

        //Handle finalization.
        dprintf(3,("Marking finalization data"));
        //concurrent_print_time_delta ("bgc joined to mark finalization");
        concurrent_print_time_delta ("NRj");

//        finalize_queue->EnterFinalizeLock();
        finalize_queue->ScanForFinalization (background_promote, max_generation, FALSE, __this);
//        finalize_queue->LeaveFinalizeLock();

        concurrent_print_time_delta ("NRF");
    }

    dprintf (2, ("before NR 2nd Hov count: %d", bgc_overflow_count));
    bgc_overflow_count = 0;

    // Scan dependent handles again to promote any secondaries associated with primaries that were promoted
    // for finalization. As before background_scan_dependent_handles will also process any mark stack
    // overflow.
    dprintf (2, ("2nd dependent handle scan and process mark overflow"));
    background_scan_dependent_handles (&sc);
    //concurrent_print_time_delta ("2nd nonconcurrent dependent handle scan and process mark overflow");
    concurrent_print_time_delta ("NR 2nd Hov");

#ifdef MULTIPLE_HEAPS
    bgc_t_join.join(this, gc_join_null_dead_long_weak);
    if (bgc_t_join.joined())
    {
        dprintf(2, ("Joining BGC threads for weak pointer deletion"));
        bgc_t_join.restart();
    }
#endif //MULTIPLE_HEAPS

    // null out the target of long weakref that were not promoted.
    GCScan::GcWeakPtrScan (background_promote, max_generation, max_generation, &sc);
    concurrent_print_time_delta ("NR GcWeakPtrScan");

#ifdef MULTIPLE_HEAPS
    bgc_t_join.join(this, gc_join_null_dead_syncblk);
    if (bgc_t_join.joined())
#endif //MULTIPLE_HEAPS
    {
        dprintf (2, ("calling GcWeakPtrScanBySingleThread"));
        // scan for deleted entries in the syncblk cache
        GCScan::GcWeakPtrScanBySingleThread (max_generation, max_generation, &sc);
        concurrent_print_time_delta ("NR GcWeakPtrScanBySingleThread");
#ifdef MULTIPLE_HEAPS
        dprintf(2, ("Starting BGC threads for end of background mark phase"));
        bgc_t_join.restart();
#endif //MULTIPLE_HEAPS
    }

    gen0_bricks_cleared = FALSE;

    dprintf (2, ("end of bgc mark: loh: %d, soh: %d",
                 generation_size (max_generation + 1),
                 generation_sizes (generation_of (max_generation))));

    for (int gen_idx = max_generation; gen_idx <= (max_generation + 1); gen_idx++)
    {
        generation* gen = generation_of (gen_idx);
        dynamic_data* dd = dynamic_data_of (gen_idx);
        dd_begin_data_size (dd) = generation_size (gen_idx) -
                                   (generation_free_list_space (gen) + generation_free_obj_space (gen)) -
                                   Align (size (generation_allocation_start (gen)));
        dd_survived_size (dd) = 0;
        dd_pinned_survived_size (dd) = 0;
        dd_artificial_pinned_survived_size (dd) = 0;
        dd_added_pinned_size (dd) = 0;
    }

    heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
    PREFIX_ASSUME(seg != NULL);

    while (seg)
    {
        seg->flags &= ~heap_segment_flags_swept;

        if (heap_segment_allocated (seg) == heap_segment_mem (seg))
        {
            // This can't happen...
            FATAL_GC_ERROR();
        }

        if (seg == ephemeral_heap_segment)
        {
            heap_segment_background_allocated (seg) = generation_allocation_start (generation_of (max_generation - 1));
        }
        else
        {
            heap_segment_background_allocated (seg) = heap_segment_allocated (seg);
        }

        dprintf (2, ("seg %Ix background allocated is %Ix",
                      heap_segment_mem (seg),
                      heap_segment_background_allocated (seg)));
        seg = heap_segment_next_rw (seg);
    }

    // We need to void alloc contexts here 'cause while background_ephemeral_sweep is running
    // we can't let the user code consume the left over parts in these alloc contexts.
    repair_allocation_contexts (FALSE);

#ifdef TIME_GC
        finish = GetCycleCount32();
        mark_time = finish - start;
#endif //TIME_GC

    dprintf (2, ("end of bgc mark: gen2 free list space: %d, free obj space: %d",
        generation_free_list_space (generation_of (max_generation)),
        generation_free_obj_space (generation_of (max_generation))));

    dprintf(2,("---- (GC%d)End of background mark phase ----", VolatileLoad(&settings.gc_index)));
}

void
gc_heap::suspend_EE ()
{
    dprintf (2, ("suspend_EE"));
#ifdef MULTIPLE_HEAPS
    gc_heap* hp = gc_heap::g_heaps[0];
    GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
#else
    GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
#endif //MULTIPLE_HEAPS
}

#ifdef MULTIPLE_HEAPS
void
gc_heap::bgc_suspend_EE ()
{
    for (int i = 0; i < n_heaps; i++)
    {
        gc_heap::g_heaps[i]->reset_gc_done();
    }
    gc_started = TRUE;
    dprintf (2, ("bgc_suspend_EE"));
    GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);

    gc_started = FALSE;
    for (int i = 0; i < n_heaps; i++)
    {
        gc_heap::g_heaps[i]->set_gc_done();
    }
}
#else
void
gc_heap::bgc_suspend_EE ()
{
    reset_gc_done();
    gc_started = TRUE;
    dprintf (2, ("bgc_suspend_EE"));
    GCToEEInterface::SuspendEE(SUSPEND_FOR_GC_PREP);
    gc_started = FALSE;
    set_gc_done();
}
#endif //MULTIPLE_HEAPS

void
gc_heap::restart_EE ()
{
    dprintf (2, ("restart_EE"));
#ifdef MULTIPLE_HEAPS
    GCToEEInterface::RestartEE(FALSE);
#else
    GCToEEInterface::RestartEE(FALSE);
#endif //MULTIPLE_HEAPS
}

inline uint8_t* gc_heap::high_page ( heap_segment* seg, BOOL concurrent_p)
{
    if (concurrent_p)
    {
        uint8_t* end = ((seg == ephemeral_heap_segment) ?
                     generation_allocation_start (generation_of (max_generation-1)) :
                     heap_segment_allocated (seg));
        return align_lower_page (end);
    }
    else
    {
        return heap_segment_allocated (seg);
    }
}

void gc_heap::revisit_written_page (uint8_t* page,
                                    uint8_t* end,
                                    BOOL concurrent_p,
                                    heap_segment* seg,
                                    uint8_t*& last_page,
                                    uint8_t*& last_object,
                                    BOOL large_objects_p,
                                    size_t& num_marked_objects)
{
    UNREFERENCED_PARAMETER(seg);

    uint8_t*   start_address = page;
    uint8_t*   o             = 0;
    int align_const = get_alignment_constant (!large_objects_p);
    uint8_t* high_address = end;
    uint8_t* current_lowest_address = background_saved_lowest_address;
    uint8_t* current_highest_address = background_saved_highest_address;
    BOOL no_more_loop_p = FALSE;

    THREAD_FROM_HEAP;
#ifndef MULTIPLE_HEAPS
    const int thread = heap_number;
#endif //!MULTIPLE_HEAPS

    if (large_objects_p)
    {
        o = last_object;
    }
    else
    {
        if (((last_page + WRITE_WATCH_UNIT_SIZE) == page)
            || (start_address <= last_object))
        {
            o = last_object;
        }
        else
        {
            o = find_first_object (start_address, last_object);
            // We can visit the same object again, but on a different page.
            assert (o >= last_object);
        }
    }

    dprintf (3,("page %Ix start: %Ix, %Ix[ ",
               (size_t)page, (size_t)o,
               (size_t)(min (high_address, page + WRITE_WATCH_UNIT_SIZE))));

    while (o < (min (high_address, page + WRITE_WATCH_UNIT_SIZE)))
    {
        size_t s;

        if (concurrent_p && large_objects_p)
        {
            bgc_alloc_lock->bgc_mark_set (o);

            if (((CObjectHeader*)o)->IsFree())
            {
                s = unused_array_size (o);
            }
            else
            {
                s = size (o);
            }
        }
        else
        {
            s = size (o);
        }

        dprintf (3,("Considering object %Ix(%s)", (size_t)o, (background_object_marked (o, FALSE) ? "bm" : "nbm")));

        assert (Align (s) >= Align (min_obj_size));

        uint8_t* next_o =  o + Align (s, align_const);

        if (next_o >= start_address)
        {
#ifdef MULTIPLE_HEAPS
            if (concurrent_p)
            {
                // We set last_object here for SVR BGC here because SVR BGC has more than
                // one GC thread. When we have more than one GC thread we would run into this
                // situation if we skipped unmarked objects:
                // bgc thread 1 calls GWW, and detect object X not marked so it would skip it
                // for revisit.
                // bgc thread 2 marks X and all its current children.
                // user thread comes along and dirties more (and later) pages in X.
                // bgc thread 1 calls GWW again and gets those later pages but it will not mark anything
                // on them because it had already skipped X. We need to detect that this object is now
                // marked and mark the children on the dirtied pages.
                // In the future if we have less BGC threads than we have heaps we should add
                // the check to the number of BGC threads.
                last_object = o;
            }
#endif //MULTIPLE_HEAPS

            if (contain_pointers (o) &&
                (!((o >= current_lowest_address) && (o < current_highest_address)) ||
                background_marked (o)))
            {
                dprintf (3, ("going through %Ix", (size_t)o));
                go_through_object (method_table(o), o, s, poo, start_address, use_start, (o + s),
                                    if ((uint8_t*)poo >= min (high_address, page + WRITE_WATCH_UNIT_SIZE))
                                    {
                                        no_more_loop_p = TRUE;
                                        goto end_limit;
                                    }
                                    uint8_t* oo = *poo;

                                    num_marked_objects++;
                                    background_mark_object (oo THREAD_NUMBER_ARG);
                                );
            }
            else if (
                concurrent_p &&
                ((CObjectHeader*)o)->IsFree() &&
                (next_o > min (high_address, page + WRITE_WATCH_UNIT_SIZE)))
            {
                // We need to not skip the object here because of this corner scenario:
                // A large object was being allocated during BGC mark so we first made it
                // into a free object, then cleared its memory. In this loop we would detect
                // that it's a free object which normally we would skip. But by the next time
                // we call GetWriteWatch we could still be on this object and the object had
                // been made into a valid object and some of its memory was changed. We need
                // to be sure to process those written pages so we can't skip the object just
                // yet.
                //
                // Similarly, when using software write watch, don't advance last_object when
                // the current object is a free object that spans beyond the current page or
                // high_address. Software write watch acquires gc_lock before the concurrent
                // GetWriteWatch() call during revisit_written_pages(). A foreground GC may
                // happen at that point and allocate from this free region, so when
                // revisit_written_pages() continues, it cannot skip now-valid objects in this
                // region.
                no_more_loop_p = TRUE;
                goto end_limit;
            }
        }
end_limit:
        if (concurrent_p && large_objects_p)
        {
            bgc_alloc_lock->bgc_mark_done ();
        }
        if (no_more_loop_p)
        {
            break;
        }
        o = next_o;
    }

#ifdef MULTIPLE_HEAPS
    if (concurrent_p)
    {
        assert (last_object < (min (high_address, page + WRITE_WATCH_UNIT_SIZE)));
    }
    else
#endif //MULTIPLE_HEAPS
    {
        last_object = o;
    }

    dprintf (3,("Last object: %Ix", (size_t)last_object));
    last_page = align_write_watch_lower_page (o);

    if (concurrent_p)
    {
        allow_fgc();
    }
}

// When reset_only_p is TRUE, we should only reset pages that are in range
// because we need to consider the segments or part of segments that were
// allocated out of range all live.
void gc_heap::revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p)
{
#ifdef WRITE_WATCH
    if (concurrent_p && !reset_only_p)
    {
        current_bgc_state = bgc_revisit_soh;
    }

    size_t total_dirtied_pages = 0;
    size_t total_marked_objects = 0;

    heap_segment* seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));

    PREFIX_ASSUME(seg != NULL);

    bool reset_watch_state = !!concurrent_p;
    bool is_runtime_suspended = !concurrent_p;
    BOOL small_object_segments = TRUE;
    int align_const = get_alignment_constant (small_object_segments);

    while (1)
    {
        if (seg == 0)
        {
            if (small_object_segments)
            {
                //switch to large segment
                if (concurrent_p && !reset_only_p)
                {
                    current_bgc_state = bgc_revisit_loh;
                }

                if (!reset_only_p)
                {
                    dprintf (GTC_LOG, ("h%d: SOH: dp:%Id; mo: %Id", heap_number, total_dirtied_pages, total_marked_objects));
                    fire_revisit_event (total_dirtied_pages, total_marked_objects, !small_object_segments);
                    concurrent_print_time_delta (concurrent_p ? "CR SOH" : "NR SOH");
                    total_dirtied_pages = 0;
                    total_marked_objects = 0;
                }

                small_object_segments = FALSE;
                //concurrent_print_time_delta (concurrent_p ? "concurrent marking dirtied pages on SOH" : "nonconcurrent marking dirtied pages on SOH");

                dprintf (3, ("now revisiting large object segments"));
                align_const = get_alignment_constant (small_object_segments);
                seg = heap_segment_rw (generation_start_segment (large_object_generation));

                PREFIX_ASSUME(seg != NULL);

                continue;
            }
            else
            {
                if (reset_only_p)
                {
                    dprintf (GTC_LOG, ("h%d: tdp: %Id", heap_number, total_dirtied_pages));
                }
                else
                {
                    dprintf (GTC_LOG, ("h%d: LOH: dp:%Id; mo: %Id", heap_number, total_dirtied_pages, total_marked_objects));
                    fire_revisit_event (total_dirtied_pages, total_marked_objects, !small_object_segments);
                }
                break;
            }
        }
        uint8_t* base_address = (uint8_t*)heap_segment_mem (seg);
        //we need to truncate to the base of the page because
        //some newly allocated could exist beyond heap_segment_allocated
        //and if we reset the last page write watch status,
        // they wouldn't be guaranteed to be visited -> gc hole.
        uintptr_t bcount = array_size;
        uint8_t* last_page = 0;
        uint8_t* last_object = heap_segment_mem (seg);
        uint8_t* high_address = 0;

        BOOL skip_seg_p = FALSE;

        if (reset_only_p)
        {
            if ((heap_segment_mem (seg) >= background_saved_lowest_address) ||
                (heap_segment_reserved (seg) <= background_saved_highest_address))
            {
                dprintf (3, ("h%d: sseg: %Ix(-%Ix)", heap_number,
                    heap_segment_mem (seg), heap_segment_reserved (seg)));
                skip_seg_p = TRUE;
            }
        }

        if (!skip_seg_p)
        {
            dprintf (3, ("looking at seg %Ix", (size_t)last_object));

            if (reset_only_p)
            {
                base_address = max (base_address, background_saved_lowest_address);
                dprintf (3, ("h%d: reset only starting %Ix", heap_number, base_address));
            }

            dprintf (3, ("h%d: starting: %Ix, seg %Ix-%Ix", heap_number, base_address,
                heap_segment_mem (seg), heap_segment_reserved (seg)));


            while (1)
            {
                if (reset_only_p)
                {
                    high_address = ((seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg));
                    high_address = min (high_address, background_saved_highest_address);
                }
                else
                {
                    high_address = high_page (seg, concurrent_p);
                }

                if ((base_address < high_address) &&
                    (bcount >= array_size))
                {
                    ptrdiff_t region_size = high_address - base_address;
                    dprintf (3, ("h%d: gw: [%Ix(%Id)", heap_number, (size_t)base_address, (size_t)region_size));

#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
                    // When the runtime is not suspended, it's possible for the table to be resized concurrently with the scan
                    // for dirty pages below. Prevent that by synchronizing with grow_brick_card_tables(). When the runtime is
                    // suspended, it's ok to scan for dirty pages concurrently from multiple background GC threads for disjoint
                    // memory regions.
                    if (!is_runtime_suspended)
                    {
                        enter_spin_lock(&gc_lock);
                    }
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP

                    get_write_watch_for_gc_heap (reset_watch_state, base_address, region_size,
                                                 (void**)background_written_addresses,
                                                 &bcount, is_runtime_suspended);

#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
                    if (!is_runtime_suspended)
                    {
                        leave_spin_lock(&gc_lock);
                    }
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP

                    if (bcount != 0)
                    {
                        total_dirtied_pages += bcount;

                        dprintf (3, ("Found %d pages [%Ix, %Ix[",
                                        bcount, (size_t)base_address, (size_t)high_address));
                    }

                    if (!reset_only_p)
                    {
                        for (unsigned i = 0; i < bcount; i++)
                        {
                            uint8_t* page = (uint8_t*)background_written_addresses[i];
                            dprintf (3, ("looking at page %d at %Ix(h: %Ix)", i,
                                (size_t)page, (size_t)high_address));
                            if (page < high_address)
                            {
                                //search for marked objects in the page
                                revisit_written_page (page, high_address, concurrent_p,
                                                    seg, last_page, last_object,
                                                    !small_object_segments,
                                                    total_marked_objects);
                            }
                            else
                            {
                                dprintf (3, ("page %d at %Ix is >= %Ix!", i, (size_t)page, (size_t)high_address));
                                assert (!"page shouldn't have exceeded limit");
                            }
                        }
                    }

                    if (bcount >= array_size){
                        base_address = background_written_addresses [array_size-1] + WRITE_WATCH_UNIT_SIZE;
                        bcount = array_size;
                    }
                }
                else
                {
                    break;
                }
            }
        }

        seg = heap_segment_next_rw (seg);
    }

#endif //WRITE_WATCH
}

void gc_heap::background_grow_c_mark_list()
{
    assert (c_mark_list_index >= c_mark_list_length);
    BOOL should_drain_p = FALSE;
    THREAD_FROM_HEAP;
#ifndef MULTIPLE_HEAPS
    const int thread = heap_number;
#endif //!MULTIPLE_HEAPS

    dprintf (2, ("stack copy buffer overflow"));
    uint8_t** new_c_mark_list = 0;
    {
        FAULT_NOT_FATAL();
        if (c_mark_list_length >= (SIZE_T_MAX / (2 * sizeof (uint8_t*))))
        {
            should_drain_p = TRUE;
        }
        else
        {
            new_c_mark_list = new (nothrow) uint8_t*[c_mark_list_length*2];
            if (new_c_mark_list == 0)
            {
                should_drain_p = TRUE;
            }
        }
    }
    if (should_drain_p)

    {
        dprintf (2, ("No more memory for the stacks copy, draining.."));
        //drain the list by marking its elements
        background_drain_mark_list (thread);
    }
    else
    {
        assert (new_c_mark_list);
        memcpy (new_c_mark_list, c_mark_list, c_mark_list_length*sizeof(uint8_t*));
        c_mark_list_length = c_mark_list_length*2;
        delete c_mark_list;
        c_mark_list = new_c_mark_list;
    }
}

void gc_heap::background_promote_callback (Object** ppObject, ScanContext* sc,
                                  uint32_t flags)
{
    UNREFERENCED_PARAMETER(sc);
    //in order to save space on the array, mark the object,
    //knowing that it will be visited later
    assert (settings.concurrent);

    THREAD_NUMBER_FROM_CONTEXT;
#ifndef MULTIPLE_HEAPS
    const int thread = 0;
#endif //!MULTIPLE_HEAPS

    uint8_t* o = (uint8_t*)*ppObject;

    if (o == 0)
        return;

    HEAP_FROM_THREAD;

    gc_heap* hp = gc_heap::heap_of (o);

    if ((o < hp->background_saved_lowest_address) || (o >= hp->background_saved_highest_address))
    {
        return;
    }

#ifdef INTERIOR_POINTERS
    if (flags & GC_CALL_INTERIOR)
    {
        o = hp->find_object (o, hp->background_saved_lowest_address);
        if (o == 0)
            return;
    }
#endif //INTERIOR_POINTERS

#ifdef FEATURE_CONSERVATIVE_GC
    // For conservative GC, a value on stack may point to middle of a free object.
    // In this case, we don't need to promote the pointer.
    if (GCConfig::GetConservativeGC() && ((CObjectHeader*)o)->IsFree())
    {
        return;
    }
#endif //FEATURE_CONSERVATIVE_GC

#ifdef _DEBUG
    ((CObjectHeader*)o)->Validate();
#endif //_DEBUG

    dprintf (3, ("Concurrent Background Promote %Ix", (size_t)o));
    if (o && (size (o) > loh_size_threshold))
    {
        dprintf (3, ("Brc %Ix", (size_t)o));
    }

    if (hpt->c_mark_list_index >= hpt->c_mark_list_length)
    {
        hpt->background_grow_c_mark_list();
    }
    dprintf (3, ("pushing %08x into mark_list", (size_t)o));
    hpt->c_mark_list [hpt->c_mark_list_index++] = o;

    STRESS_LOG3(LF_GC|LF_GCROOTS, LL_INFO1000000, "    GCHeap::Background Promote: Promote GC Root *%p = %p MT = %pT", ppObject, o, o ? ((Object*) o)->GetGCSafeMethodTable() : NULL);
}

void gc_heap::mark_absorb_new_alloc()
{
    fix_allocation_contexts (FALSE);

    gen0_bricks_cleared = FALSE;

    clear_gen0_bricks();
}

BOOL gc_heap::prepare_bgc_thread(gc_heap* gh)
{
    BOOL success = FALSE;
    BOOL thread_created = FALSE;
    dprintf (2, ("Preparing gc thread"));
    gh->bgc_threads_timeout_cs.Enter();
    if (!(gh->bgc_thread_running))
    {
        dprintf (2, ("GC thread not running"));
        if ((gh->bgc_thread == 0) && create_bgc_thread(gh))
        {
            success = TRUE;
            thread_created = TRUE;
        }
    }
    else
    {
        dprintf (3, ("GC thread already running"));
        success = TRUE;
    }
    gh->bgc_threads_timeout_cs.Leave();

    if(thread_created)
        FIRE_EVENT(GCCreateConcurrentThread_V1);

    return success;
}

BOOL gc_heap::create_bgc_thread(gc_heap* gh)
{
    assert (background_gc_done_event.IsValid());

    //dprintf (2, ("Creating BGC thread"));

    gh->bgc_thread_running = GCToEEInterface::CreateThread(gh->bgc_thread_stub, gh, true, ".NET Background GC");
    return gh->bgc_thread_running;
}

BOOL gc_heap::create_bgc_threads_support (int number_of_heaps)
{
    BOOL ret = FALSE;
    dprintf (3, ("Creating concurrent GC thread for the first time"));
    if (!background_gc_done_event.CreateManualEventNoThrow(TRUE))
    {
        goto cleanup;
    }
    if (!bgc_threads_sync_event.CreateManualEventNoThrow(FALSE))
    {
        goto cleanup;
    }
    if (!ee_proceed_event.CreateAutoEventNoThrow(FALSE))
    {
        goto cleanup;
    }
    if (!bgc_start_event.CreateManualEventNoThrow(FALSE))
    {
        goto cleanup;
    }

#ifdef MULTIPLE_HEAPS
    bgc_t_join.init (number_of_heaps, join_flavor_bgc);
#else
    UNREFERENCED_PARAMETER(number_of_heaps);
#endif //MULTIPLE_HEAPS

    ret = TRUE;

cleanup:

    if (!ret)
    {
        if (background_gc_done_event.IsValid())
        {
            background_gc_done_event.CloseEvent();
        }
        if (bgc_threads_sync_event.IsValid())
        {
            bgc_threads_sync_event.CloseEvent();
        }
        if (ee_proceed_event.IsValid())
        {
            ee_proceed_event.CloseEvent();
        }
        if (bgc_start_event.IsValid())
        {
            bgc_start_event.CloseEvent();
        }
    }

    return ret;
}

BOOL gc_heap::create_bgc_thread_support()
{
    BOOL ret = FALSE;
    uint8_t** parr;

    if (!gc_lh_block_event.CreateManualEventNoThrow(FALSE))
    {
        goto cleanup;
    }

    //needs to have room for enough smallest objects fitting on a page
    parr = new (nothrow) uint8_t*[1 + OS_PAGE_SIZE / MIN_OBJECT_SIZE];
    if (!parr)
    {
        goto cleanup;
    }

    make_c_mark_list (parr);

    ret = TRUE;

cleanup:

    if (!ret)
    {
        if (gc_lh_block_event.IsValid())
        {
            gc_lh_block_event.CloseEvent();
        }
    }

    return ret;
}

int gc_heap::check_for_ephemeral_alloc()
{
    int gen = ((settings.reason == reason_oos_soh) ? (max_generation - 1) : -1);

    if (gen == -1)
    {
#ifdef MULTIPLE_HEAPS
        for (int heap_index = 0; heap_index < n_heaps; heap_index++)
#endif //MULTIPLE_HEAPS
        {
            for (int i = 0; i <= (max_generation - 1); i++)
            {
#ifdef MULTIPLE_HEAPS
                if (g_heaps[heap_index]->get_new_allocation (i) <= 0)
#else
                if (get_new_allocation (i) <= 0)
#endif //MULTIPLE_HEAPS
                {
                    gen = max (gen, i);
                }
                else
                    break;
            }
        }
    }

    return gen;
}

// Wait for gc to finish sequential part
void gc_heap::wait_to_proceed()
{
    assert (background_gc_done_event.IsValid());
    assert (bgc_start_event.IsValid());

    user_thread_wait(&ee_proceed_event, FALSE);
}

// Start a new concurrent gc
void gc_heap::start_c_gc()
{
    assert (background_gc_done_event.IsValid());
    assert (bgc_start_event.IsValid());

//Need to make sure that the gc thread is in the right place.
    background_gc_done_event.Wait(INFINITE, FALSE);
    background_gc_done_event.Reset();
    bgc_start_event.Set();
}

void gc_heap::do_background_gc()
{
    dprintf (2, ("starting a BGC"));
#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < n_heaps; i++)
    {
        g_heaps[i]->init_background_gc();
    }
#else
    init_background_gc();
#endif //MULTIPLE_HEAPS

#ifdef BGC_SERVO_TUNING
    bgc_tuning::record_bgc_start();
#endif //BGC_SERVO_TUNING

    //start the background gc
    start_c_gc ();

    //wait until we get restarted by the BGC.
    wait_to_proceed();
}

void gc_heap::kill_gc_thread()
{
    //assert (settings.concurrent == FALSE);

    // We are doing a two-stage shutdown now.
    // In the first stage, we do minimum work, and call ExitProcess at the end.
    // In the secodn stage, we have the Loader lock and only one thread is
    // alive.  Hence we do not need to kill gc thread.
    background_gc_done_event.CloseEvent();
    gc_lh_block_event.CloseEvent();
    bgc_start_event.CloseEvent();
    bgc_threads_timeout_cs.Destroy();
    bgc_thread = 0;
    recursive_gc_sync::shutdown();
}

void gc_heap::bgc_thread_function()
{
    assert (background_gc_done_event.IsValid());
    assert (bgc_start_event.IsValid());

    dprintf (3, ("gc_thread thread starting..."));

    BOOL do_exit = FALSE;

    bool cooperative_mode = true;
    bgc_thread_id.SetToCurrentThread();
    dprintf (1, ("bgc_thread_id is set to %x", (uint32_t)GCToOSInterface::GetCurrentThreadIdForLogging()));
    while (1)
    {
        // Wait for work to do...
        dprintf (3, ("bgc thread: waiting..."));

        cooperative_mode = enable_preemptive ();
        //current_thread->m_fPreemptiveGCDisabled = 0;

        uint32_t result = bgc_start_event.Wait(
#ifdef _DEBUG
#ifdef MULTIPLE_HEAPS
                                             INFINITE,
#else
                                             2000,
#endif //MULTIPLE_HEAPS
#else //_DEBUG
#ifdef MULTIPLE_HEAPS
                                             INFINITE,
#else
                                             20000,
#endif //MULTIPLE_HEAPS
#endif //_DEBUG
            FALSE);
        dprintf (2, ("gc thread: finished waiting"));

        // not calling disable_preemptive here 'cause we
        // can't wait for GC complete here - RestartEE will be called
        // when we've done the init work.

        if (result == WAIT_TIMEOUT)
        {
            // Should join the bgc threads and terminate all of them
            // at once.
            dprintf (1, ("GC thread timeout"));
            bgc_threads_timeout_cs.Enter();
            if (!keep_bgc_threads_p)
            {
                dprintf (2, ("GC thread exiting"));
                bgc_thread_running = FALSE;
                bgc_thread = 0;
                bgc_thread_id.Clear();
                do_exit = TRUE;
            }
            bgc_threads_timeout_cs.Leave();
            if (do_exit)
                break;
            else
            {
                dprintf (3, ("GC thread needed, not exiting"));
                continue;
            }
        }
        // if we signal the thread with no concurrent work to do -> exit
        if (!settings.concurrent)
        {
            dprintf (3, ("no concurrent GC needed, exiting"));
            break;
        }
        recursive_gc_sync::begin_background();
        dprintf (2, ("beginning of bgc: gen2 FL: %d, FO: %d, frag: %d",
            generation_free_list_space (generation_of (max_generation)),
            generation_free_obj_space (generation_of (max_generation)),
            dd_fragmentation (dynamic_data_of (max_generation))));

        gc1();

        current_bgc_state = bgc_not_in_process;

        enable_preemptive ();
#ifdef MULTIPLE_HEAPS
        bgc_t_join.join(this, gc_join_done);
        if (bgc_t_join.joined())
#endif //MULTIPLE_HEAPS
        {
            enter_spin_lock (&gc_lock);
            dprintf (SPINLOCK_LOG, ("bgc Egc"));

            bgc_start_event.Reset();
            do_post_gc();
#ifdef MULTIPLE_HEAPS
            for (int gen = max_generation; gen <= (max_generation + 1); gen++)
            {
                size_t desired_per_heap = 0;
                size_t total_desired = 0;
                gc_heap* hp = 0;
                dynamic_data* dd;
                for (int i = 0; i < n_heaps; i++)
                {
                    hp = g_heaps[i];
                    dd = hp->dynamic_data_of (gen);
                    size_t temp_total_desired = total_desired + dd_desired_allocation (dd);
                    if (temp_total_desired < total_desired)
                    {
                        // we overflowed.
                        total_desired = (size_t)MAX_PTR;
                        break;
                    }
                    total_desired = temp_total_desired;
                }

                desired_per_heap = Align ((total_desired/n_heaps), get_alignment_constant (FALSE));

                for (int i = 0; i < n_heaps; i++)
                {
                    hp = gc_heap::g_heaps[i];
                    dd = hp->dynamic_data_of (gen);
                    dd_desired_allocation (dd) = desired_per_heap;
                    dd_gc_new_allocation (dd) = desired_per_heap;
                    dd_new_allocation (dd) = desired_per_heap;
                }
            }
#endif //MULTIPLE_HEAPS
#ifdef MULTIPLE_HEAPS
            fire_pevents();
#endif //MULTIPLE_HEAPS

            c_write (settings.concurrent, FALSE);
            recursive_gc_sync::end_background();
            keep_bgc_threads_p = FALSE;
            background_gc_done_event.Set();

            dprintf (SPINLOCK_LOG, ("bgc Lgc"));
            leave_spin_lock (&gc_lock);
#ifdef MULTIPLE_HEAPS
            dprintf(1, ("End of BGC - starting all BGC threads"));
            bgc_t_join.restart();
#endif //MULTIPLE_HEAPS
        }
        // We can't disable preempt here because there might've been a GC already
        // started and decided to do a BGC and waiting for a BGC thread to restart
        // vm. That GC will be waiting in wait_to_proceed and we are waiting for it
        // to restart the VM so we deadlock.
        //gc_heap::disable_preemptive (true);
    }

    FIRE_EVENT(GCTerminateConcurrentThread_V1);

    dprintf (3, ("bgc_thread thread exiting"));
    return;
}

#ifdef BGC_SERVO_TUNING
bool gc_heap::bgc_tuning::stepping_trigger (uint32_t current_memory_load, size_t current_gen2_count)
{
    if (!bgc_tuning::enable_fl_tuning)
    {
        return false;
    }

    bool stepping_trigger_p = false;
    if (use_stepping_trigger_p)
    {
        dprintf (BGC_TUNING_LOG, ("current ml: %d, goal: %d",
            current_memory_load, memory_load_goal));
        // We don't go all the way up to mem goal because if we do we could end up with every
        // BGC being triggered by stepping all the way up to goal, and when we actually reach
        // goal we have no time to react 'cause the next BGC could already be over goal.
        if ((current_memory_load <= (memory_load_goal * 2 / 3)) ||
            ((memory_load_goal > current_memory_load) &&
             ((memory_load_goal - current_memory_load) > (stepping_interval * 3))))
        {
            int memory_load_delta = (int)current_memory_load - (int)last_stepping_mem_load;
            if (memory_load_delta >= (int)stepping_interval)
            {
                stepping_trigger_p = (current_gen2_count == last_stepping_bgc_count);
                if (stepping_trigger_p)
                {
                    current_gen2_count++;
                }

                dprintf (BGC_TUNING_LOG, ("current ml: %d - %d = %d (>= %d), gen2 count: %d->%d, stepping trigger: %s ",
                    current_memory_load, last_stepping_mem_load, memory_load_delta, stepping_interval,
                    last_stepping_bgc_count, current_gen2_count,
                    (stepping_trigger_p ? "yes" : "no")));
                last_stepping_mem_load = current_memory_load;
                last_stepping_bgc_count = current_gen2_count;
            }
        }
        else
        {
            use_stepping_trigger_p = false;
        }
    }

    return stepping_trigger_p;
}

// Note that I am doing this per heap but as we are in this calculation other
// heaps could increase their fl alloc. We are okay with that inaccurancy.
bool gc_heap::bgc_tuning::should_trigger_bgc_loh()
{
    if (fl_tuning_triggered)
    {
#ifdef MULTIPLE_HEAPS
        gc_heap* hp = g_heaps[0];
#else
        gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS

        if (!(recursive_gc_sync::background_running_p()))
        {
            int loh_gen_number = max_generation + 1;
            size_t current_alloc = get_total_servo_alloc (loh_gen_number);
            tuning_calculation* current_gen_calc = &gen_calc[loh_gen_number - max_generation];

            if (current_alloc < current_gen_calc->last_bgc_end_alloc)
            {
                dprintf (BGC_TUNING_LOG, ("BTL: current alloc: %Id, last alloc: %Id?",
                    current_alloc, current_gen_calc->last_bgc_end_alloc));
            }

            bool trigger_p = ((current_alloc - current_gen_calc->last_bgc_end_alloc) >= current_gen_calc->alloc_to_trigger);
            dprintf (2, ("BTL3: LOH a %Id, la: %Id(%Id), %Id",
                    current_alloc, current_gen_calc->last_bgc_end_alloc,
                    (current_alloc - current_gen_calc->last_bgc_end_alloc),
                    current_gen_calc->alloc_to_trigger));

            if (trigger_p)
            {
                dprintf (BGC_TUNING_LOG, ("BTL3: LOH detected (%Id - %Id) >= %Id, TRIGGER",
                        current_alloc, current_gen_calc->last_bgc_end_alloc, current_gen_calc->alloc_to_trigger));
                return true;
            }
        }
    }

    return false;
}

bool gc_heap::bgc_tuning::should_trigger_bgc()
{
    if (!bgc_tuning::enable_fl_tuning || recursive_gc_sync::background_running_p())
    {
        return false;
    }

    if (settings.reason == reason_bgc_tuning_loh)
    {
        // TODO: this should be an assert because if the reason was reason_bgc_tuning_loh,
        // we should have already set to condemn max_generation but I'm keeping it
        // for now in case we are reverting it for other reasons.
        bgc_tuning::next_bgc_p = true;
        dprintf (BGC_TUNING_LOG, ("BTL LOH triggered"));
        return true;
    }

    if (!bgc_tuning::next_bgc_p &&
        !fl_tuning_triggered &&
        (gc_heap::settings.entry_memory_load >= (memory_load_goal * 2 / 3)) &&
        (gc_heap::full_gc_counts[gc_type_background] >= 2))
    {
        next_bgc_p = true;

        gen_calc[0].first_alloc_to_trigger = gc_heap::get_total_servo_alloc (max_generation);
        gen_calc[1].first_alloc_to_trigger = gc_heap::get_total_servo_alloc (max_generation + 1);
        dprintf (BGC_TUNING_LOG, ("BTL[GTC] mem high enough: %d(goal: %d), %Id BGCs done, g2a=%Id, g3a=%Id, trigger FL tuning!",
            gc_heap::settings.entry_memory_load, memory_load_goal,
            gc_heap::full_gc_counts[gc_type_background],
            gen_calc[0].first_alloc_to_trigger,
            gen_calc[1].first_alloc_to_trigger));
    }

    if (bgc_tuning::next_bgc_p)
    {
        dprintf (BGC_TUNING_LOG, ("BTL started FL tuning"));
        return true;
    }

    if (!fl_tuning_triggered)
    {
        return false;
    }

    // If the tuning started, we need to check if we've exceeded the alloc.
    int index = 0;
    bgc_tuning::tuning_calculation* current_gen_calc = 0;

    index = 0;
    current_gen_calc = &bgc_tuning::gen_calc[index];

#ifdef MULTIPLE_HEAPS
    gc_heap* hp = g_heaps[0];
#else
    gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS

    size_t current_gen1_index = dd_collection_count (hp->dynamic_data_of (max_generation - 1));
    size_t gen1_so_far = current_gen1_index - gen1_index_last_bgc_end;

    if (current_gen_calc->alloc_to_trigger > 0)
    {
        // We are specifically checking for gen2 here. LOH is covered by should_trigger_bgc_loh.
        size_t current_alloc = get_total_servo_alloc (max_generation);
        if ((current_alloc - current_gen_calc->last_bgc_end_alloc) >= current_gen_calc->alloc_to_trigger)
        {
            dprintf (BGC_TUNING_LOG, ("BTL2: SOH detected (%Id - %Id) >= %Id, TRIGGER",
                    current_alloc, current_gen_calc->last_bgc_end_alloc, current_gen_calc->alloc_to_trigger));
            settings.reason = reason_bgc_tuning_soh;
            return true;
        }
    }

    return false;
}

bool gc_heap::bgc_tuning::should_delay_alloc (int gen_number)
{
    if ((gen_number != max_generation) || !bgc_tuning::enable_fl_tuning)
        return false;

    if (current_c_gc_state == c_gc_state_planning)
    {
        int i = 0;
#ifdef MULTIPLE_HEAPS
        for (; i < gc_heap::n_heaps; i++)
        {
            gc_heap* hp = gc_heap::g_heaps[i];
            size_t current_fl_size = generation_free_list_space (hp->generation_of (max_generation));
            size_t last_bgc_fl_size = hp->bgc_maxgen_end_fl_size;
#else
        {
            size_t current_fl_size = generation_free_list_space (generation_of (max_generation));
            size_t last_bgc_fl_size = bgc_maxgen_end_fl_size;
#endif //MULTIPLE_HEAPS

            if (last_bgc_fl_size)
            {
                float current_flr = (float) current_fl_size / (float)last_bgc_fl_size;
                if (current_flr < 0.4)
                {
                    dprintf (BGC_TUNING_LOG, ("BTL%d h%d last fl %Id, curr fl %Id (%.3f) d1",
                            gen_number, i, last_bgc_fl_size, current_fl_size, current_flr));
                    return true;
                }
            }
        }
    }

    return false;
}

void gc_heap::bgc_tuning::update_bgc_start (int gen_number, size_t num_gen1s_since_end)
{
    int tuning_data_index = gen_number - max_generation;
    tuning_calculation* current_gen_calc = &gen_calc[tuning_data_index];
    tuning_stats* current_gen_stats = &gen_stats[tuning_data_index];

    size_t total_generation_size = get_total_generation_size (gen_number);
    ptrdiff_t current_bgc_fl_size = get_total_generation_fl_size (gen_number);

    double physical_gen_flr = (double)current_bgc_fl_size * 100.0 / (double)total_generation_size;

    ptrdiff_t artificial_additional_fl = 0;

    if (fl_tuning_triggered)
    {
        artificial_additional_fl = ((current_gen_calc->end_gen_size_goal > total_generation_size) ? (current_gen_calc->end_gen_size_goal - total_generation_size) : 0);
        total_generation_size += artificial_additional_fl;
        current_bgc_fl_size += artificial_additional_fl;
    }

    current_gen_calc->current_bgc_start_flr = (double)current_bgc_fl_size * 100.0 / (double)total_generation_size;

    size_t current_alloc = get_total_servo_alloc (gen_number);
    dprintf (BGC_TUNING_LOG, ("BTL%d: st a: %Id, la: %Id",
        gen_number, current_alloc, current_gen_stats->last_alloc));
    current_gen_stats->last_alloc_end_to_start = current_alloc - current_gen_stats->last_alloc;
    current_gen_stats->last_alloc = current_alloc;

    current_gen_calc->actual_alloc_to_trigger = current_alloc - current_gen_calc->last_bgc_end_alloc;

    dprintf (BGC_TUNING_LOG, ("BTL%d: st: %Id g1s (%Id->%Id/gen1) since end, flr: %.3f(afl: %Id, %.3f)",
             gen_number, actual_num_gen1s_to_trigger,
             current_gen_stats->last_alloc_end_to_start,
             (num_gen1s_since_end ? (current_gen_stats->last_alloc_end_to_start / num_gen1s_since_end) : 0),
             current_gen_calc->current_bgc_start_flr, artificial_additional_fl, physical_gen_flr));
}

void gc_heap::bgc_tuning::record_bgc_start()
{
    if (!bgc_tuning::enable_fl_tuning)
        return;

    size_t elapsed_time_so_far = GetHighPrecisionTimeStamp() - start_time;

    // Note that younger gen's collection count is always updated with older gen's collections.
    // So to calcuate the actual # of gen1 occurred we really should take the # of gen2s into
    // acccount (and deduct from gen1's collection count). But right now I am using it for stats.
    size_t current_gen1_index = get_current_gc_index (max_generation - 1);

    dprintf (BGC_TUNING_LOG, ("BTL: g2t[st][g1 %Id]: %0.3f minutes",
        current_gen1_index,
        (double)elapsed_time_so_far / (double)1000 / (double)60));

    actual_num_gen1s_to_trigger = current_gen1_index - gen1_index_last_bgc_end;
    gen1_index_last_bgc_start = current_gen1_index;

    update_bgc_start (max_generation, actual_num_gen1s_to_trigger);
    update_bgc_start ((max_generation + 1), actual_num_gen1s_to_trigger);
}

double convert_range (double lower, double upper, double num, double percentage)
{
    double d = num - lower;
    if (d < 0.0)
        return 0.0;
    else
    {
        d = min ((upper - lower), d);
        return (d * percentage);
    }
}

double calculate_gradual_d (double delta_double, double step)
{
    bool changed_sign = false;
    if (delta_double < 0.0)
    {
        delta_double = -delta_double;
        changed_sign = true;
    }
    double res = 0;
    double current_lower_limit = 0;
    double current_ratio = 1.0;
    // Given a step, we will gradually reduce the weight of the portion
    // in each step.
    // We reduce by *0.6 each time so there will be 3 iterations:
    // 1->0.6->0.36 (next one would be 0.216 and terminate the loop)
    // This will produce a result that's between 0 and 0.098.
    while (current_ratio > 0.22)
    {
        res += convert_range (current_lower_limit, (current_lower_limit + step), delta_double, current_ratio);
        current_lower_limit += step;
        current_ratio *= 0.6;
    }

    if (changed_sign)
        res = -res;

    return res;
}

void gc_heap::bgc_tuning::update_bgc_sweep_start (int gen_number, size_t num_gen1s_since_start)
{
    int tuning_data_index = gen_number - max_generation;
    tuning_calculation* current_gen_calc = &gen_calc[tuning_data_index];
    tuning_stats* current_gen_stats = &gen_stats[tuning_data_index];

    size_t total_generation_size = 0;
    ptrdiff_t current_bgc_fl_size = 0;

    total_generation_size = get_total_generation_size (gen_number);
    current_bgc_fl_size = get_total_generation_fl_size (gen_number);

    double physical_gen_flr = (double)current_bgc_fl_size * 100.0 / (double)total_generation_size;

    ptrdiff_t artificial_additional_fl = 0;
    if (fl_tuning_triggered)
    {
        artificial_additional_fl = ((current_gen_calc->end_gen_size_goal > total_generation_size) ? (current_gen_calc->end_gen_size_goal - total_generation_size) : 0);
        total_generation_size += artificial_additional_fl;
        current_bgc_fl_size += artificial_additional_fl;
    }

    current_gen_calc->current_bgc_sweep_flr = (double)current_bgc_fl_size * 100.0 / (double)total_generation_size;

    size_t current_alloc = get_total_servo_alloc (gen_number);
    dprintf (BGC_TUNING_LOG, ("BTL%d: sw a: %Id, la: %Id",
        gen_number, current_alloc, current_gen_stats->last_alloc));
    current_gen_stats->last_alloc_start_to_sweep = current_alloc - current_gen_stats->last_alloc;
    // We are resetting gen2 alloc at sweep start.
    current_gen_stats->last_alloc = 0;

#ifdef SIMPLE_DPRINTF
    dprintf (BGC_TUNING_LOG, ("BTL%d: sflr: %.3f%%->%.3f%% (%Id->%Id, %Id->%Id) (%Id:%Id-%Id/gen1) since start (afl: %Id, %.3f)",
             gen_number,
             current_gen_calc->last_bgc_flr, current_gen_calc->current_bgc_sweep_flr,
             current_gen_calc->last_bgc_size, total_generation_size,
             current_gen_stats->last_bgc_fl_size, current_bgc_fl_size,
             num_gen1s_since_start, current_gen_stats->last_alloc_start_to_sweep,
             (num_gen1s_since_start? (current_gen_stats->last_alloc_start_to_sweep / num_gen1s_since_start) : 0),
             artificial_additional_fl, physical_gen_flr));
#endif //SIMPLE_DPRINTF
}

void gc_heap::bgc_tuning::record_bgc_sweep_start()
{
    if (!bgc_tuning::enable_fl_tuning)
        return;

    size_t current_gen1_index = get_current_gc_index (max_generation - 1);
    size_t num_gen1s_since_start = current_gen1_index - gen1_index_last_bgc_start;
    gen1_index_last_bgc_sweep = current_gen1_index;

    size_t elapsed_time_so_far = GetHighPrecisionTimeStamp() - start_time;
    dprintf (BGC_TUNING_LOG, ("BTL: g2t[sw][g1 %Id]: %0.3f minutes",
        current_gen1_index,
        (double)elapsed_time_so_far / (double)1000 / (double)60));

    update_bgc_sweep_start (max_generation, num_gen1s_since_start);
    update_bgc_sweep_start ((max_generation + 1), num_gen1s_since_start);
}

void gc_heap::bgc_tuning::calculate_tuning (int gen_number, bool use_this_loop_p)
{
    BOOL use_kd_p = enable_kd;
    BOOL use_ki_p = enable_ki;
    BOOL use_smooth_p = enable_smooth;
    BOOL use_tbh_p = enable_tbh;
    BOOL use_ff_p = enable_ff;

    int tuning_data_index = gen_number - max_generation;
    tuning_calculation* current_gen_calc = &gen_calc[tuning_data_index];
    tuning_stats* current_gen_stats = &gen_stats[tuning_data_index];
    bgc_size_data* data = &current_bgc_end_data[tuning_data_index];

    size_t total_generation_size = data->gen_size;
    size_t current_bgc_fl = data->gen_fl_size;

    size_t current_bgc_surv_size = get_total_surv_size (gen_number);
    size_t current_bgc_begin_data_size = get_total_begin_data_size (gen_number);

    // This is usually 0 unless a GC happened where we joined at the end of sweep
    size_t current_alloc = get_total_servo_alloc (gen_number);
    //dprintf (BGC_TUNING_LOG, ("BTL%d: current fl alloc: %Id, last recorded alloc: %Id, last_bgc_end_alloc: %Id",
    dprintf (BGC_TUNING_LOG, ("BTL%d: en a: %Id, la: %Id, lbgca: %Id",
        gen_number, current_alloc, current_gen_stats->last_alloc, current_gen_calc->last_bgc_end_alloc));

    double current_bgc_surv_rate = (current_bgc_begin_data_size == 0) ?
                                    0 : ((double)current_bgc_surv_size * 100.0 / (double)current_bgc_begin_data_size);

    current_gen_stats->last_alloc_sweep_to_end = current_alloc - current_gen_stats->last_alloc;

    size_t gen1_index = get_current_gc_index (max_generation - 1);
    size_t gen2_index = get_current_gc_index (max_generation);

    size_t num_gen1s_since_sweep = gen1_index - gen1_index_last_bgc_sweep;
    size_t num_gen1s_bgc_end = gen1_index - gen1_index_last_bgc_end;

    size_t gen_end_size_goal = current_gen_calc->end_gen_size_goal;
    double gen_sweep_flr_goal = current_gen_calc->sweep_flr_goal;
    size_t last_gen_alloc_to_trigger = current_gen_calc->alloc_to_trigger;
    size_t gen_actual_alloc_to_trigger = current_gen_calc->actual_alloc_to_trigger;
    size_t last_gen_alloc_to_trigger_0 = current_gen_calc->alloc_to_trigger_0;

    double current_end_to_sweep_flr = current_gen_calc->last_bgc_flr - current_gen_calc->current_bgc_sweep_flr;
    bool current_sweep_above_p = (current_gen_calc->current_bgc_sweep_flr > gen_sweep_flr_goal);

#ifdef SIMPLE_DPRINTF
    dprintf (BGC_TUNING_LOG, ("BTL%d: sflr: c %.3f (%s), p %s, palloc: %Id, aalloc %Id(%s)",
        gen_number,
        current_gen_calc->current_bgc_sweep_flr,
        (current_sweep_above_p ? "above" : "below"),
        (current_gen_calc->last_sweep_above_p ? "above" : "below"),
        last_gen_alloc_to_trigger,
        current_gen_calc->actual_alloc_to_trigger,
        (use_this_loop_p ? "this" : "last")));

    dprintf (BGC_TUNING_LOG, ("BTL%d-en[g1: %Id, g2: %Id]: end fl: %Id (%Id: S-%Id, %.3f%%->%.3f%%)",
            gen_number,
            gen1_index, gen2_index, current_bgc_fl,
            total_generation_size, current_bgc_surv_size,
            current_gen_stats->last_bgc_surv_rate, current_bgc_surv_rate));

    dprintf (BGC_TUNING_LOG, ("BTLS%d sflr: %.3f, end-start: %Id(%Id), start-sweep: %Id(%Id), sweep-end: %Id(%Id)",
            gen_number,
            current_gen_calc->current_bgc_sweep_flr,
            (gen1_index_last_bgc_start - gen1_index_last_bgc_end), current_gen_stats->last_alloc_end_to_start,
            (gen1_index_last_bgc_sweep - gen1_index_last_bgc_start), current_gen_stats->last_alloc_start_to_sweep,
            num_gen1s_since_sweep, current_gen_stats->last_alloc_sweep_to_end));
#endif //SIMPLE_DPRINTF

    size_t saved_alloc_to_trigger = 0;

    // during our calculation alloc can be negative so use double here.
    double current_alloc_to_trigger = 0.0;

    if (!fl_tuning_triggered && use_tbh_p)
    {
        current_gen_calc->alloc_to_trigger_0 = current_gen_calc->actual_alloc_to_trigger;
        dprintf (BGC_TUNING_LOG, ("BTL%d[g1: %Id]: not in FL tuning yet, setting alloc_to_trigger_0 to %Id",
                 gen_number,
                 gen1_index, current_gen_calc->alloc_to_trigger_0));
    }

    if (fl_tuning_triggered)
    {
        BOOL tuning_kd_finished_p = FALSE;

        // We shouldn't have an alloc_to_trigger that's > what's consumed before sweep happens.
        double max_alloc_to_trigger = ((double)current_bgc_fl * (100 - gen_sweep_flr_goal) / 100.0);
        double min_alloc_to_trigger = (double)current_bgc_fl * 0.05;

        {
            if (current_gen_calc->current_bgc_sweep_flr < 0.0)
            {
                dprintf (BGC_TUNING_LOG, ("BTL%d: sflr is %.3f!!! < 0, make it 0", gen_number, current_gen_calc->current_bgc_sweep_flr));
                current_gen_calc->current_bgc_sweep_flr = 0.0;
            }

            double adjusted_above_goal_kp = above_goal_kp;
            double above_goal_distance = current_gen_calc->current_bgc_sweep_flr - gen_sweep_flr_goal;
            if (use_ki_p)
            {
                if (current_gen_calc->above_goal_accu_error > max_alloc_to_trigger)
                {
                    dprintf (BGC_TUNING_LOG, ("g%d: ae TB! %.1f->%.1f", gen_number, current_gen_calc->above_goal_accu_error, max_alloc_to_trigger));
                }
                else if (current_gen_calc->above_goal_accu_error < min_alloc_to_trigger)
                {
                    dprintf (BGC_TUNING_LOG, ("g%d: ae TS! %.1f->%.1f", gen_number, current_gen_calc->above_goal_accu_error, min_alloc_to_trigger));
                }

                current_gen_calc->above_goal_accu_error = min (max_alloc_to_trigger, current_gen_calc->above_goal_accu_error);
                current_gen_calc->above_goal_accu_error = max (min_alloc_to_trigger, current_gen_calc->above_goal_accu_error);

                double above_goal_ki_gain = above_goal_ki * above_goal_distance * current_bgc_fl;
                double temp_accu_error = current_gen_calc->above_goal_accu_error + above_goal_ki_gain;
                // anti-windup
                if ((temp_accu_error > min_alloc_to_trigger) &&
                    (temp_accu_error < max_alloc_to_trigger))
                {
                    current_gen_calc->above_goal_accu_error = temp_accu_error;
                }
                else
                {
                    //dprintf (BGC_TUNING_LOG, ("alloc accu err + %.1f=%.1f, exc",
                    dprintf (BGC_TUNING_LOG, ("g%d: aae + %.1f=%.1f, exc", gen_number,
                            above_goal_ki_gain,
                            temp_accu_error));
                }
            }

            // First we do the PI loop.
            {
                saved_alloc_to_trigger = current_gen_calc->alloc_to_trigger;
                current_alloc_to_trigger = adjusted_above_goal_kp * above_goal_distance * current_bgc_fl;
                // la is last alloc_to_trigger, +%Id is the diff between la and the new alloc.
                // laa is the last actual alloc (gen_actual_alloc_to_trigger), +%Id is the diff between la and laa.
                dprintf (BGC_TUNING_LOG, ("BTL%d: sflr %.3f above * %.4f * %Id = %Id bytes in alloc, la: %Id(+%Id), laa: %Id(+%Id)",
                        gen_number,
                        (current_gen_calc->current_bgc_sweep_flr - (double)gen_sweep_flr_goal),
                        adjusted_above_goal_kp,
                        current_bgc_fl,
                        (size_t)current_alloc_to_trigger,
                        saved_alloc_to_trigger,
                        (size_t)(current_alloc_to_trigger - (double)saved_alloc_to_trigger),
                        gen_actual_alloc_to_trigger,
                        (gen_actual_alloc_to_trigger - saved_alloc_to_trigger)));

                if (use_ki_p)
                {
                    current_alloc_to_trigger += current_gen_calc->above_goal_accu_error;
                    dprintf (BGC_TUNING_LOG, ("BTL%d: +accu err %Id=%Id",
                            gen_number,
                            (size_t)(current_gen_calc->above_goal_accu_error),
                            (size_t)current_alloc_to_trigger));
                }
            }

            if (use_tbh_p)
            {
                if (current_gen_calc->last_sweep_above_p != current_sweep_above_p)
                {
                    size_t new_alloc_to_trigger_0 = (last_gen_alloc_to_trigger + last_gen_alloc_to_trigger_0) / 2;
                    dprintf (BGC_TUNING_LOG, ("BTL%d: tbh crossed SP, setting both to %Id", gen_number, new_alloc_to_trigger_0));
                    current_gen_calc->alloc_to_trigger_0 = new_alloc_to_trigger_0;
                    current_gen_calc->alloc_to_trigger = new_alloc_to_trigger_0;
                }

                tuning_kd_finished_p = TRUE;
            }
        }

        if (!tuning_kd_finished_p)
        {
            if (use_kd_p)
            {
                saved_alloc_to_trigger = last_gen_alloc_to_trigger;
                size_t alloc_delta = saved_alloc_to_trigger - gen_actual_alloc_to_trigger;
                double adjust_ratio = (double)alloc_delta / (double)gen_actual_alloc_to_trigger;
                double saved_adjust_ratio = adjust_ratio;
                if (enable_gradual_d)
                {
                    adjust_ratio = calculate_gradual_d (adjust_ratio, above_goal_kd);
                    dprintf (BGC_TUNING_LOG, ("BTL%d: gradual kd - reduced from %.3f to %.3f",
                            gen_number, saved_adjust_ratio, adjust_ratio));
                }
                else
                {
                    double kd = above_goal_kd;
                    double neg_kd = 0 - kd;
                    if (adjust_ratio > kd) adjust_ratio = kd;
                    if (adjust_ratio < neg_kd) adjust_ratio = neg_kd;
                    dprintf (BGC_TUNING_LOG, ("BTL%d: kd - reduced from %.3f to %.3f",
                            gen_number, saved_adjust_ratio, adjust_ratio));
                }

                current_gen_calc->alloc_to_trigger = (size_t)((double)gen_actual_alloc_to_trigger * (1 + adjust_ratio));

                dprintf (BGC_TUNING_LOG, ("BTL%d: kd %.3f, reduced it to %.3f * %Id, adjust %Id->%Id",
                        gen_number, saved_adjust_ratio,
                        adjust_ratio, gen_actual_alloc_to_trigger,
                        saved_alloc_to_trigger, current_gen_calc->alloc_to_trigger));
            }

            if (use_smooth_p && use_this_loop_p)
            {
                saved_alloc_to_trigger = current_gen_calc->alloc_to_trigger;
                size_t gen_smoothed_alloc_to_trigger = current_gen_calc->smoothed_alloc_to_trigger;
                double current_num_gen1s_smooth_factor = (num_gen1s_smooth_factor > (double)num_bgcs_since_tuning_trigger) ?
                                                        (double)num_bgcs_since_tuning_trigger : num_gen1s_smooth_factor;
                current_gen_calc->smoothed_alloc_to_trigger = (size_t)((double)saved_alloc_to_trigger / current_num_gen1s_smooth_factor +
                    ((double)gen_smoothed_alloc_to_trigger / current_num_gen1s_smooth_factor) * (current_num_gen1s_smooth_factor - 1.0));

                dprintf (BGC_TUNING_LOG, ("BTL%d: smoothed %Id / %.3f + %Id / %.3f * %.3f adjust %Id->%Id",
                    gen_number, saved_alloc_to_trigger, current_num_gen1s_smooth_factor,
                    gen_smoothed_alloc_to_trigger, current_num_gen1s_smooth_factor,
                    (current_num_gen1s_smooth_factor - 1.0),
                    saved_alloc_to_trigger, current_gen_calc->smoothed_alloc_to_trigger));
                current_gen_calc->alloc_to_trigger = current_gen_calc->smoothed_alloc_to_trigger;
            }
        }

        if (use_ff_p)
        {
            double next_end_to_sweep_flr = data->gen_flr - gen_sweep_flr_goal;

            if (next_end_to_sweep_flr > 0.0)
            {
                saved_alloc_to_trigger = current_gen_calc->alloc_to_trigger;
                double ff_ratio = next_end_to_sweep_flr / current_end_to_sweep_flr - 1;

                if (use_this_loop_p)
                {
                    // if we adjust down we want ff to be bigger, so the alloc will be even smaller;
                    // if we adjust up want ff to be smaller, so the alloc will also be smaller;
                    // the idea is we want to be slower at increase than decrease
                    double ff_step = above_goal_ff * 0.5;
                    double adjusted_above_goal_ff = above_goal_ff;
                    if (ff_ratio > 0)
                        adjusted_above_goal_ff -= ff_step;
                    else
                        adjusted_above_goal_ff += ff_step;

                    double adjusted_ff_ratio = ff_ratio * adjusted_above_goal_ff;
                    current_gen_calc->alloc_to_trigger = saved_alloc_to_trigger + (size_t)((double)saved_alloc_to_trigger * adjusted_ff_ratio);
                    dprintf (BGC_TUNING_LOG, ("BTL%d: ff (%.3f / %.3f - 1) * %.3f = %.3f adjust %Id->%Id",
                        gen_number, next_end_to_sweep_flr, current_end_to_sweep_flr, adjusted_above_goal_ff, adjusted_ff_ratio,
                        saved_alloc_to_trigger, current_gen_calc->alloc_to_trigger));
                }
            }
        }

        if (use_this_loop_p)
        {
            // apply low/high caps.
            if (current_alloc_to_trigger > max_alloc_to_trigger)
            {
                dprintf (BGC_TUNING_LOG, ("BTL%d: TB! %.1f -> %.1f",
                    gen_number, current_alloc_to_trigger, max_alloc_to_trigger));
                current_alloc_to_trigger = max_alloc_to_trigger;
            }

            if (current_alloc_to_trigger < min_alloc_to_trigger)
            {
                dprintf (BGC_TUNING_LOG, ("BTL%d: TS! %Id -> %Id",
                        gen_number, (ptrdiff_t)current_alloc_to_trigger, (size_t)min_alloc_to_trigger));
                current_alloc_to_trigger = min_alloc_to_trigger;
            }

            current_gen_calc->alloc_to_trigger = (size_t)current_alloc_to_trigger;
        }
        else
        {
            // we can't do the above comparison - we could be in the situation where
            // we haven't done any alloc.
            dprintf (BGC_TUNING_LOG, ("BTL%d: ag, revert %Id->%Id",
                gen_number, current_gen_calc->alloc_to_trigger, last_gen_alloc_to_trigger));
            current_gen_calc->alloc_to_trigger = last_gen_alloc_to_trigger;
        }
    }

    // This is only executed once to get the tuning started.
    if (next_bgc_p)
    {
        size_t first_alloc = (size_t)((double)current_gen_calc->first_alloc_to_trigger * 0.75);
        // The initial conditions can be quite erratic so check to see if the first alloc we set was reasonable - take 5% of the FL
        size_t min_first_alloc = current_bgc_fl / 20;

        current_gen_calc->alloc_to_trigger = max (first_alloc, min_first_alloc);

        dprintf (BGC_TUNING_LOG, ("BTL%d[g1: %Id]: BGC end, trigger FL, set gen%d alloc to max (0.75 of first: %Id, 5%% fl: %Id), actual alloc: %Id",
            gen_number, gen1_index, gen_number,
            first_alloc, min_first_alloc,
            current_gen_calc->actual_alloc_to_trigger));
    }

    dprintf (BGC_TUNING_LOG, ("BTL%d* %Id, %.3f, %.3f, %.3f, %.3f, %.3f, %Id, %Id, %Id, %Id",
                              gen_number,
                              total_generation_size,
                              current_gen_calc->current_bgc_start_flr,
                              current_gen_calc->current_bgc_sweep_flr,
                              current_bgc_end_data[tuning_data_index].gen_flr,
                              current_gen_stats->last_gen_increase_flr,
                              current_bgc_surv_rate,
                              actual_num_gen1s_to_trigger,
                              num_gen1s_bgc_end,
                              gen_actual_alloc_to_trigger,
                              current_gen_calc->alloc_to_trigger));

    gen1_index_last_bgc_end = gen1_index;

    current_gen_calc->last_bgc_size = total_generation_size;
    current_gen_calc->last_bgc_flr = current_bgc_end_data[tuning_data_index].gen_flr;
    current_gen_calc->last_sweep_above_p = current_sweep_above_p;
    current_gen_calc->last_bgc_end_alloc = current_alloc;

    current_gen_stats->last_bgc_physical_size = data->gen_physical_size;
    current_gen_stats->last_alloc_end_to_start = 0;
    current_gen_stats->last_alloc_start_to_sweep = 0;
    current_gen_stats->last_alloc_sweep_to_end = 0;
    current_gen_stats->last_alloc = current_alloc;
    current_gen_stats->last_bgc_fl_size = current_bgc_end_data[tuning_data_index].gen_fl_size;
    current_gen_stats->last_bgc_surv_rate = current_bgc_surv_rate;
    current_gen_stats->last_gen_increase_flr = 0;
}

// Note that in this method for the !use_this_loop_p generation we will adjust
// its sweep_flr accordingly. And the inner loop will not need to know about this.
void gc_heap::bgc_tuning::init_bgc_end_data (int gen_number, bool use_this_loop_p)
{
    int index = gen_number - max_generation;
    bgc_size_data* data = &current_bgc_end_data[index];

    size_t physical_size = get_total_generation_size (gen_number);
    ptrdiff_t physical_fl_size = get_total_generation_fl_size (gen_number);
    data->gen_actual_phys_fl_size = physical_fl_size;

    if (fl_tuning_triggered && !use_this_loop_p)
    {
        tuning_calculation* current_gen_calc = &gen_calc[gen_number - max_generation];

        if (current_gen_calc->actual_alloc_to_trigger > current_gen_calc->alloc_to_trigger)
        {
            dprintf (BGC_TUNING_LOG, ("BTL%d: gen alloc also exceeded %Id (la: %Id), no action",
                gen_number, current_gen_calc->actual_alloc_to_trigger, current_gen_calc->alloc_to_trigger));
        }
        else
        {
            // We will deduct the missing portion from alloc to fl, simulating that we consumed it.
            size_t remaining_alloc = current_gen_calc->alloc_to_trigger -
                                     current_gen_calc->actual_alloc_to_trigger;

            // now re-calc current_bgc_sweep_flr
            // TODO: note that I am assuming the physical size at sweep was <= end_gen_size_goal which
            // not have been the case.
            size_t gen_size = current_gen_calc->end_gen_size_goal;
            double sweep_flr = current_gen_calc->current_bgc_sweep_flr;
            size_t sweep_fl_size = (size_t)((double)gen_size * sweep_flr / 100.0);

            if (sweep_fl_size < remaining_alloc)
            {
                dprintf (BGC_TUNING_LOG, ("BTL%d: sweep fl %Id < remain alloc %Id", gen_number, sweep_fl_size, remaining_alloc));
                // TODO: this is saying that we didn't have enough fl to accommodate the
                // remaining alloc which is suspicious. To set remaining_alloc to
                // something slightly smaller is only so that we could continue with
                // our calculation but this is something we should look into.
                remaining_alloc = sweep_fl_size - (10 * 1024);
            }

            size_t new_sweep_fl_size = sweep_fl_size - remaining_alloc;
            ptrdiff_t signed_new_sweep_fl_size = sweep_fl_size - remaining_alloc;

            double new_current_bgc_sweep_flr = (double)new_sweep_fl_size * 100.0 / (double)gen_size;
            double signed_new_current_bgc_sweep_flr = (double)signed_new_sweep_fl_size * 100.0 / (double)gen_size;

            dprintf (BGC_TUNING_LOG, ("BTL%d: sg: %Id(%Id), sfl: %Id->%Id(%Id)(%.3f->%.3f(%.3f)), la: %Id, aa: %Id",
                gen_number, gen_size, physical_size, sweep_fl_size,
                new_sweep_fl_size, signed_new_sweep_fl_size,
                sweep_flr, new_current_bgc_sweep_flr, signed_new_current_bgc_sweep_flr,
                current_gen_calc->alloc_to_trigger, current_gen_calc->actual_alloc_to_trigger));

            current_gen_calc->actual_alloc_to_trigger = current_gen_calc->alloc_to_trigger;
            current_gen_calc->current_bgc_sweep_flr = new_current_bgc_sweep_flr;

            // TODO: NOTE this is duplicated in calculate_tuning except I am not * 100.0 here.
            size_t current_bgc_surv_size = get_total_surv_size (gen_number);
            size_t current_bgc_begin_data_size = get_total_begin_data_size (gen_number);
            double current_bgc_surv_rate = (current_bgc_begin_data_size == 0) ?
                                            0 : ((double)current_bgc_surv_size / (double)current_bgc_begin_data_size);

            size_t remaining_alloc_surv = (size_t)((double)remaining_alloc * current_bgc_surv_rate);
            physical_fl_size -= remaining_alloc_surv;
            dprintf (BGC_TUNING_LOG, ("BTL%d: asfl %Id-%Id=%Id, flr %.3f->%.3f, %.3f%% s, fl %Id-%Id->%Id",
                gen_number, sweep_fl_size, remaining_alloc, new_sweep_fl_size,
                sweep_flr, current_gen_calc->current_bgc_sweep_flr,
                (current_bgc_surv_rate * 100.0),
                (physical_fl_size + remaining_alloc_surv),
                remaining_alloc_surv, physical_fl_size));
        }
    }

    double physical_gen_flr = (double)physical_fl_size * 100.0 / (double)physical_size;
    data->gen_physical_size = physical_size;
    data->gen_physical_fl_size = physical_fl_size;
    data->gen_physical_flr = physical_gen_flr;
}

void gc_heap::bgc_tuning::calc_end_bgc_fl (int gen_number)
{
    int index = gen_number - max_generation;
    bgc_size_data* data = &current_bgc_end_data[index];

    tuning_calculation* current_gen_calc = &gen_calc[gen_number - max_generation];

    size_t virtual_size = current_gen_calc->end_gen_size_goal;
    size_t physical_size = data->gen_physical_size;
    ptrdiff_t physical_fl_size = data->gen_physical_fl_size;
    ptrdiff_t virtual_fl_size = (ptrdiff_t)virtual_size - (ptrdiff_t)physical_size;
    ptrdiff_t end_gen_fl_size = physical_fl_size + virtual_fl_size;

    if (end_gen_fl_size < 0)
    {
        end_gen_fl_size = 0;
    }

    data->gen_size = virtual_size;
    data->gen_fl_size = end_gen_fl_size;
    data->gen_flr = (double)(data->gen_fl_size) * 100.0 / (double)(data->gen_size);

    dprintf (BGC_TUNING_LOG, ("BTL%d: vfl: %Id, size %Id->%Id, fl %Id->%Id, flr %.3f->%.3f",
        gen_number, virtual_fl_size,
        data->gen_physical_size, data->gen_size,
        data->gen_physical_fl_size, data->gen_fl_size,
        data->gen_physical_flr, data->gen_flr));
}

// reduce_p is for NGC2s. we want to reduce the ki so we don't overshoot.
double gc_heap::bgc_tuning::calculate_ml_tuning (uint64_t current_available_physical, bool reduce_p, ptrdiff_t* _vfl_from_kp, ptrdiff_t* _vfl_from_ki)
{
    ptrdiff_t error = (ptrdiff_t)(current_available_physical - available_memory_goal);

    // This is questionable as gen0/1 and other processes are consuming memory
    // too
    size_t gen2_physical_size = current_bgc_end_data[0].gen_physical_size;
    size_t gen3_physical_size = current_bgc_end_data[1].gen_physical_size;

    double max_output = (double)(total_physical_mem - available_memory_goal -
                                 gen2_physical_size - gen3_physical_size);

    double error_ratio = (double)error / (double)total_physical_mem;

    // do we want this to contribute to the integral term?
    bool include_in_i_p = ((error_ratio > 0.005) || (error_ratio < -0.005));

    dprintf (BGC_TUNING_LOG, ("total phy %Id, mem goal: %Id, curr phy: %Id, g2 phy: %Id, g3 phy: %Id",
            (size_t)total_physical_mem, (size_t)available_memory_goal,
            (size_t)current_available_physical,
            gen2_physical_size - gen3_physical_size));
    dprintf (BGC_TUNING_LOG, ("BTL: Max output: %Id, ER %Id / %Id = %.3f, %s",
            (size_t)max_output,
            error, available_memory_goal, error_ratio,
            (include_in_i_p ? "inc" : "exc")));

    if (include_in_i_p)
    {
        double error_ki = ml_ki * (double)error;
        double temp_accu_error = accu_error + error_ki;
        // anti-windup
        if ((temp_accu_error > 0) && (temp_accu_error < max_output))
            accu_error = temp_accu_error;
        else
        {
            //dprintf (BGC_TUNING_LOG, ("ml accu err + %Id=%Id, exc",
            dprintf (BGC_TUNING_LOG, ("mae + %Id=%Id, exc",
                    (size_t)error_ki, (size_t)temp_accu_error));
        }
    }

    if (reduce_p)
    {
        double saved_accu_error = accu_error;
        accu_error = accu_error * 2.0 / 3.0;
        panic_activated_p = false;
        accu_error_panic = 0;
        dprintf (BGC_TUNING_LOG, ("BTL reduced accu ki %Id->%Id", (ptrdiff_t)saved_accu_error, (ptrdiff_t)accu_error));
    }

    if (panic_activated_p)
        accu_error_panic += (double)error;
    else
        accu_error_panic = 0.0;

    double vfl_from_kp = (double)error * ml_kp;
    double total_virtual_fl_size = vfl_from_kp + accu_error;
    // limit output
    if (total_virtual_fl_size < 0)
    {
        dprintf (BGC_TUNING_LOG, ("BTL vfl %Id < 0", (size_t)total_virtual_fl_size));
        total_virtual_fl_size = 0;
    }
    else if (total_virtual_fl_size > max_output)
    {
        dprintf (BGC_TUNING_LOG, ("BTL vfl %Id > max", (size_t)total_virtual_fl_size));
        total_virtual_fl_size = max_output;
    }

    *_vfl_from_kp = (ptrdiff_t)vfl_from_kp;
    *_vfl_from_ki = (ptrdiff_t)accu_error;
    return total_virtual_fl_size;
}

void gc_heap::bgc_tuning::set_total_gen_sizes (bool use_gen2_loop_p, bool use_gen3_loop_p)
{
    size_t gen2_physical_size = current_bgc_end_data[0].gen_physical_size;
    size_t gen3_physical_size = 0;
    ptrdiff_t gen3_virtual_fl_size = 0;
    gen3_physical_size = current_bgc_end_data[1].gen_physical_size;
    double gen2_size_ratio = (double)gen2_physical_size / ((double)gen2_physical_size + (double)gen3_physical_size);

    // We know how far we are from the memory load goal, assuming that the memory is only
    // used by gen2/3 (which is obviously not the case, but that's why we are not setting the
    // memory goal at 90+%. Assign the memory proportionally to them.
    //
    // We use entry memory load info because that seems to be more closedly correlated to what the VMM decides
    // in memory load.
    uint32_t current_memory_load = settings.entry_memory_load;
    uint64_t current_available_physical = settings.entry_available_physical_mem;

    panic_activated_p = (current_memory_load >= (memory_load_goal + memory_load_goal_slack));

    if (panic_activated_p)
    {
        dprintf (BGC_TUNING_LOG, ("BTL: exceeded slack %Id >= (%Id + %Id)",
            (size_t)current_memory_load, (size_t)memory_load_goal,
            (size_t)memory_load_goal_slack));
    }

    ptrdiff_t vfl_from_kp = 0;
    ptrdiff_t vfl_from_ki = 0;
    double total_virtual_fl_size = calculate_ml_tuning (current_available_physical, false, &vfl_from_kp, &vfl_from_ki);

    if (use_gen2_loop_p || use_gen3_loop_p)
    {
        if (use_gen2_loop_p)
        {
            gen2_ratio_correction += ratio_correction_step;
        }
        else
        {
            gen2_ratio_correction -= ratio_correction_step;
        }

        dprintf (BGC_TUNING_LOG, ("BTL: rc: g2 ratio %.3f%% + %d%% = %.3f%%",
            (gen2_size_ratio * 100.0), (int)(gen2_ratio_correction * 100.0), ((gen2_size_ratio + gen2_ratio_correction) * 100.0)));

        gen2_ratio_correction = min (0.99, gen2_ratio_correction);
        gen2_ratio_correction = max (-0.99, gen2_ratio_correction);

        dprintf (BGC_TUNING_LOG, ("BTL: rc again: g2 ratio %.3f%% + %d%% = %.3f%%",
            (gen2_size_ratio * 100.0), (int)(gen2_ratio_correction * 100.0), ((gen2_size_ratio + gen2_ratio_correction) * 100.0)));

        gen2_size_ratio += gen2_ratio_correction;

        if (gen2_size_ratio <= 0.0)
        {
            gen2_size_ratio = 0.01;
            dprintf (BGC_TUNING_LOG, ("BTL: rc: g2 ratio->0.01"));
        }

        if (gen2_size_ratio >= 1.0)
        {
            gen2_size_ratio = 0.99;
            dprintf (BGC_TUNING_LOG, ("BTL: rc: g2 ratio->0.99"));
        }
    }

    ptrdiff_t gen2_virtual_fl_size = (ptrdiff_t)(total_virtual_fl_size * gen2_size_ratio);
    gen3_virtual_fl_size = (ptrdiff_t)(total_virtual_fl_size * (1.0 - gen2_size_ratio));
    if (gen2_virtual_fl_size < 0)
    {
        ptrdiff_t saved_gen2_virtual_fl_size = gen2_virtual_fl_size;
        ptrdiff_t half_gen2_physical_size = (ptrdiff_t)((double)gen2_physical_size * 0.5);
        if (-gen2_virtual_fl_size > half_gen2_physical_size)
        {
            gen2_virtual_fl_size = -half_gen2_physical_size;
        }

        dprintf (BGC_TUNING_LOG, ("BTL2: n_vfl %Id(%Id)->%Id", saved_gen2_virtual_fl_size, half_gen2_physical_size, gen2_virtual_fl_size));
        gen2_virtual_fl_size = 0;
    }

    if (gen3_virtual_fl_size < 0)
    {
        ptrdiff_t saved_gen3_virtual_fl_size = gen3_virtual_fl_size;
        ptrdiff_t half_gen3_physical_size = (ptrdiff_t)((double)gen3_physical_size * 0.5);
        if (-gen3_virtual_fl_size > half_gen3_physical_size)
        {
            gen3_virtual_fl_size = -half_gen3_physical_size;
        }

        dprintf (BGC_TUNING_LOG, ("BTL3: n_vfl %Id(%Id)->%Id", saved_gen3_virtual_fl_size, half_gen3_physical_size, gen3_virtual_fl_size));
        gen3_virtual_fl_size = 0;
    }

    gen_calc[0].end_gen_size_goal = gen2_physical_size + gen2_virtual_fl_size;
    gen_calc[1].end_gen_size_goal = gen3_physical_size + gen3_virtual_fl_size;

    // We calculate the end info here because the ff in fl servo loop is using this.
    calc_end_bgc_fl (max_generation);
    calc_end_bgc_fl (max_generation + 1);

#ifdef SIMPLE_DPRINTF
    dprintf (BGC_TUNING_LOG, ("BTL: ml: %d (g: %d)(%s), a: %I64d (g: %I64d, elg: %Id+%Id=%Id, %Id+%Id=%Id, pi=%Id), vfl: %Id=%Id+%Id",
        current_memory_load, memory_load_goal,
        ((current_available_physical > available_memory_goal) ? "above" : "below"),
        current_available_physical, available_memory_goal,
        gen2_physical_size, gen2_virtual_fl_size, gen_calc[0].end_gen_size_goal,
        gen3_physical_size, gen3_virtual_fl_size, gen_calc[1].end_gen_size_goal,
        (ptrdiff_t)accu_error_panic,
        (ptrdiff_t)total_virtual_fl_size, vfl_from_kp, vfl_from_ki));
#endif //SIMPLE_DPRINTF
}

bool gc_heap::bgc_tuning::should_trigger_ngc2()
{
    return panic_activated_p;
}

// This is our outer ml servo loop where we calculate the control for the inner fl servo loop.
void gc_heap::bgc_tuning::convert_to_fl (bool use_gen2_loop_p, bool use_gen3_loop_p)
{
    size_t current_bgc_count = full_gc_counts[gc_type_background];

#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < gc_heap::n_heaps; i++)
    {
        gc_heap* hp = gc_heap::g_heaps[i];
        hp->bgc_maxgen_end_fl_size = generation_free_list_space (hp->generation_of (max_generation));
    }
#else
    bgc_maxgen_end_fl_size = generation_free_list_space (generation_of (max_generation));
#endif //MULTIPLE_HEAPS

    init_bgc_end_data (max_generation, use_gen2_loop_p);
    init_bgc_end_data ((max_generation + 1), use_gen3_loop_p);
    set_total_gen_sizes (use_gen2_loop_p, use_gen3_loop_p);

    dprintf (BGC_TUNING_LOG, ("BTL: gen2 %Id, fl %Id(%.3f)->%Id; gen3 %Id, fl %Id(%.3f)->%Id, %Id BGCs",
        current_bgc_end_data[0].gen_size, current_bgc_end_data[0].gen_fl_size,
        current_bgc_end_data[0].gen_flr, gen_calc[0].end_gen_size_goal,
        current_bgc_end_data[1].gen_size, current_bgc_end_data[1].gen_fl_size,
        current_bgc_end_data[1].gen_flr, gen_calc[1].end_gen_size_goal,
        current_bgc_count));
}

void gc_heap::bgc_tuning::record_and_adjust_bgc_end()
{
    if (!bgc_tuning::enable_fl_tuning)
        return;

    size_t elapsed_time_so_far = GetHighPrecisionTimeStamp() - start_time;
    size_t current_gen1_index = get_current_gc_index (max_generation - 1);
    dprintf (BGC_TUNING_LOG, ("BTL: g2t[en][g1 %Id]: %0.3f minutes",
        current_gen1_index,
        (double)elapsed_time_so_far / (double)1000 / (double)60));

    if (fl_tuning_triggered)
    {
        num_bgcs_since_tuning_trigger++;
    }

    bool use_gen2_loop_p = (settings.reason == reason_bgc_tuning_soh);
    bool use_gen3_loop_p = (settings.reason == reason_bgc_tuning_loh);
    dprintf (BGC_TUNING_LOG, ("BTL: reason: %d, gen2 loop: %s; gen3 loop: %s, promoted %Id bytes",
        (((settings.reason != reason_bgc_tuning_soh) && (settings.reason != reason_bgc_tuning_loh)) ?
            saved_bgc_tuning_reason : settings.reason),
        (use_gen2_loop_p ? "yes" : "no"),
        (use_gen3_loop_p ? "yes" : "no"),
        get_total_bgc_promoted()));

    convert_to_fl (use_gen2_loop_p, use_gen3_loop_p);

    calculate_tuning (max_generation, true);

    if (total_loh_a_last_bgc > 0)
    {
        calculate_tuning ((max_generation + 1), true);
    }
    else
    {
        dprintf (BGC_TUNING_LOG, ("BTL: gen3 not allocated"));
    }

    if (next_bgc_p)
    {
        next_bgc_p = false;
        fl_tuning_triggered = true;
        dprintf (BGC_TUNING_LOG, ("BTL: FL tuning ENABLED!!!"));
    }

    saved_bgc_tuning_reason = -1;
}
#endif //BGC_SERVO_TUNING
#endif //BACKGROUND_GC

//Clear the cards [start_card, end_card[
void gc_heap::clear_cards (size_t start_card, size_t end_card)
{
    if (start_card < end_card)
    {
        size_t start_word = card_word (start_card);
        size_t end_word = card_word (end_card);
        if (start_word < end_word)
        {
            // Figure out the bit positions of the cards within their words
            unsigned bits = card_bit (start_card);
            card_table [start_word] &= lowbits (~0, bits);
            for (size_t i = start_word+1; i < end_word; i++)
                card_table [i] = 0;
            bits = card_bit (end_card);
            // Don't write beyond end_card (and possibly uncommitted card table space).
            if (bits != 0)
            {
                card_table [end_word] &= highbits (~0, bits);
            }
        }
        else
        {
            // If the start and end cards are in the same word, just clear the appropriate card
            // bits in that word.
            card_table [start_word] &= (lowbits (~0, card_bit (start_card)) |
                                        highbits (~0, card_bit (end_card)));
        }
#ifdef VERYSLOWDEBUG
        size_t  card = start_card;
        while (card < end_card)
        {
            assert (! (card_set_p (card)));
            card++;
        }
#endif //VERYSLOWDEBUG
        dprintf (3,("Cleared cards [%Ix:%Ix, %Ix:%Ix[",
                  start_card, (size_t)card_address (start_card),
                  end_card, (size_t)card_address (end_card)));
    }
}

void gc_heap::clear_card_for_addresses (uint8_t* start_address, uint8_t* end_address)
{
    size_t   start_card = card_of (align_on_card (start_address));
    size_t   end_card = card_of (align_lower_card (end_address));
    clear_cards (start_card, end_card);
}

// copy [srccard, ...[ to [dst_card, end_card[
// This will set the same bit twice. Can be optimized.
inline
void gc_heap::copy_cards (size_t dst_card,
                          size_t src_card,
                          size_t end_card,
                          BOOL nextp)
{
    // If the range is empty, this function is a no-op - with the subtlety that
    // either of the accesses card_table[srcwrd] or card_table[dstwrd] could be
    // outside the committed region.  To avoid the access, leave early.
    if (!(dst_card < end_card))
        return;

    unsigned int srcbit = card_bit (src_card);
    unsigned int dstbit = card_bit (dst_card);
    size_t srcwrd = card_word (src_card);
    size_t dstwrd = card_word (dst_card);
    unsigned int srctmp = card_table[srcwrd];
    unsigned int dsttmp = card_table[dstwrd];

    for (size_t card = dst_card; card < end_card; card++)
    {
        if (srctmp & (1 << srcbit))
            dsttmp |= 1 << dstbit;
        else
            dsttmp &= ~(1 << dstbit);
        if (!(++srcbit % 32))
        {
            srctmp = card_table[++srcwrd];
            srcbit = 0;
        }

        if (nextp)
        {
            if (srctmp & (1 << srcbit))
                dsttmp |= 1 << dstbit;
        }

        if (!(++dstbit % 32))
        {
            card_table[dstwrd] = dsttmp;

#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
            if (dsttmp != 0)
            {
                card_bundle_set(cardw_card_bundle(dstwrd));
            }
#endif

            dstwrd++;
            dsttmp = card_table[dstwrd];
            dstbit = 0;
        }
    }

    card_table[dstwrd] = dsttmp;

#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
    if (dsttmp != 0)
    {
        card_bundle_set(cardw_card_bundle(dstwrd));
    }
#endif
}

void gc_heap::copy_cards_for_addresses (uint8_t* dest, uint8_t* src, size_t len)
{
    ptrdiff_t relocation_distance = src - dest;
    size_t start_dest_card = card_of (align_on_card (dest));
    size_t end_dest_card = card_of (dest + len - 1);
    size_t dest_card = start_dest_card;
    size_t src_card = card_of (card_address (dest_card)+relocation_distance);
    dprintf (3,("Copying cards [%Ix:%Ix->%Ix:%Ix, ",
                 src_card, (size_t)src, dest_card, (size_t)dest));
    dprintf (3,(" %Ix->%Ix:%Ix[",
              (size_t)src+len, end_dest_card, (size_t)dest+len));

    dprintf (3, ("dest: %Ix, src: %Ix, len: %Ix, reloc: %Ix, align_on_card(dest) is %Ix",
        dest, src, len, relocation_distance, (align_on_card (dest))));

    dprintf (3, ("start_dest_card: %Ix (address: %Ix), end_dest_card: %Ix(addr: %Ix), card_of (dest): %Ix",
        start_dest_card, card_address (start_dest_card), end_dest_card, card_address (end_dest_card), card_of (dest)));

    //First card has two boundaries
    if (start_dest_card != card_of (dest))
    {
        if ((card_of (card_address (start_dest_card) + relocation_distance) <= card_of (src + len - 1))&&
            card_set_p (card_of (card_address (start_dest_card) + relocation_distance)))
        {
            dprintf (3, ("card_address (start_dest_card) + reloc is %Ix, card: %Ix(set), src+len-1: %Ix, card: %Ix",
                    (card_address (start_dest_card) + relocation_distance),
                    card_of (card_address (start_dest_card) + relocation_distance),
                    (src + len - 1),
                    card_of (src + len - 1)));

            dprintf (3, ("setting card: %Ix", card_of (dest)));
            set_card (card_of (dest));
        }
    }

    if (card_set_p (card_of (src)))
        set_card (card_of (dest));


    copy_cards (dest_card, src_card, end_dest_card,
                ((dest - align_lower_card (dest)) != (src - align_lower_card (src))));

    //Last card has two boundaries.
    if ((card_of (card_address (end_dest_card) + relocation_distance) >= card_of (src)) &&
        card_set_p (card_of (card_address (end_dest_card) + relocation_distance)))
    {
        dprintf (3, ("card_address (end_dest_card) + reloc is %Ix, card: %Ix(set), src: %Ix, card: %Ix",
                (card_address (end_dest_card) + relocation_distance),
                card_of (card_address (end_dest_card) + relocation_distance),
                src,
                card_of (src)));

        dprintf (3, ("setting card: %Ix", end_dest_card));
        set_card (end_dest_card);
    }

    if (card_set_p (card_of (src + len - 1)))
        set_card (end_dest_card);

#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
    card_bundles_set(cardw_card_bundle(card_word(card_of(dest))), cardw_card_bundle(align_cardw_on_bundle(card_word(end_dest_card))));
#endif
}

#ifdef BACKGROUND_GC
// this does not need the Interlocked version of mark_array_set_marked.
void gc_heap::copy_mark_bits_for_addresses (uint8_t* dest, uint8_t* src, size_t len)
{
    dprintf (3, ("Copying mark_bits for addresses [%Ix->%Ix, %Ix->%Ix[",
                 (size_t)src, (size_t)dest,
                 (size_t)src+len, (size_t)dest+len));

    uint8_t* src_o = src;
    uint8_t* dest_o;
    uint8_t* src_end = src + len;
    int align_const = get_alignment_constant (TRUE);
    ptrdiff_t reloc = dest - src;

    while (src_o < src_end)
    {
        uint8_t*  next_o = src_o + Align (size (src_o), align_const);

        if (background_object_marked (src_o, TRUE))
        {
            dest_o = src_o + reloc;

            //if (background_object_marked (dest_o, FALSE))
            //{
            //    dprintf (3, ("*%Ix shouldn't have already been marked!", (size_t)(dest_o)));
            //    FATAL_GC_ERROR();
            //}

            background_mark (dest_o,
                             background_saved_lowest_address,
                             background_saved_highest_address);
            dprintf (3, ("bc*%Ix*bc, b*%Ix*b", (size_t)src_o, (size_t)(dest_o)));
        }

        src_o = next_o;
    }
}
#endif //BACKGROUND_GC

void gc_heap::fix_brick_to_highest (uint8_t* o, uint8_t* next_o)
{
    size_t new_current_brick = brick_of (o);
    set_brick (new_current_brick,
               (o - brick_address (new_current_brick)));
    size_t b = 1 + new_current_brick;
    size_t limit = brick_of (next_o);
    //dprintf(3,(" fixing brick %Ix to point to object %Ix, till %Ix(%Ix)",
    dprintf(3,("b:%Ix->%Ix-%Ix",
               new_current_brick, (size_t)o, (size_t)next_o));
    while (b < limit)
    {
        set_brick (b,(new_current_brick - b));
        b++;
    }
}

// start can not be >= heap_segment_allocated for the segment.
uint8_t* gc_heap::find_first_object (uint8_t* start, uint8_t* first_object)
{
    size_t brick = brick_of (start);
    uint8_t* o = 0;
    //last_object == null -> no search shortcut needed
    if ((brick == brick_of (first_object) || (start <= first_object)))
    {
        o = first_object;
    }
    else
    {
        ptrdiff_t  min_brick = (ptrdiff_t)brick_of (first_object);
        ptrdiff_t  prev_brick = (ptrdiff_t)brick - 1;
        int         brick_entry = 0;
        while (1)
        {
            if (prev_brick < min_brick)
            {
                break;
            }
            if ((brick_entry = get_brick_entry(prev_brick)) >= 0)
            {
                break;
            }
            assert (! ((brick_entry == 0)));
            prev_brick = (brick_entry + prev_brick);

        }
        o = ((prev_brick < min_brick) ? first_object :
                      brick_address (prev_brick) + brick_entry - 1);
        assert (o <= start);
    }

    assert (Align (size (o)) >= Align (min_obj_size));
    uint8_t*  next_o = o + Align (size (o));
    size_t curr_cl = (size_t)next_o / brick_size;
    size_t min_cl = (size_t)first_object / brick_size;

    //dprintf (3,( "Looking for intersection with %Ix from %Ix", (size_t)start, (size_t)o));
#ifdef TRACE_GC
    unsigned int n_o = 1;
#endif //TRACE_GC

    uint8_t* next_b = min (align_lower_brick (next_o) + brick_size, start+1);

    while (next_o <= start)
    {
        do
        {
#ifdef TRACE_GC
            n_o++;
#endif //TRACE_GC
            o = next_o;
            assert (Align (size (o)) >= Align (min_obj_size));
            next_o = o + Align (size (o));
            Prefetch (next_o);
        }while (next_o < next_b);

        if (((size_t)next_o / brick_size) != curr_cl)
        {
            if (curr_cl >= min_cl)
            {
                fix_brick_to_highest (o, next_o);
            }
            curr_cl = (size_t) next_o / brick_size;
        }
        next_b = min (align_lower_brick (next_o) + brick_size, start+1);
    }

    size_t bo = brick_of (o);
    //dprintf (3, ("Looked at %Id objects, fixing brick [%Ix-[%Ix",
    dprintf (3, ("%Id o, [%Ix-[%Ix",
        n_o, bo, brick));
    if (bo < brick)
    {
        set_brick (bo, (o - brick_address(bo)));
        size_t b = 1 + bo;
        int x = -1;
        while (b < brick)
        {
            set_brick (b,x--);
            b++;
        }
    }

    return o;
}

#ifdef CARD_BUNDLE

// Find the first non-zero card word between cardw and cardw_end.
// The index of the word we find is returned in cardw.
BOOL gc_heap::find_card_dword (size_t& cardw, size_t cardw_end)
{
    dprintf (3, ("gc: %d, find_card_dword cardw: %Ix, cardw_end: %Ix",
                 dd_collection_count (dynamic_data_of (0)), cardw, cardw_end));

    if (card_bundles_enabled())
    {
        size_t cardb = cardw_card_bundle (cardw);
        size_t end_cardb = cardw_card_bundle (align_cardw_on_bundle (cardw_end));
        while (1)
        {
            // Find a non-zero bundle
            while ((cardb < end_cardb) && (card_bundle_set_p (cardb) == 0))
            {
                cardb++;
            }
            if (cardb == end_cardb)
                return FALSE;

            uint32_t* card_word = &card_table[max(card_bundle_cardw (cardb),cardw)];
            uint32_t* card_word_end = &card_table[min(card_bundle_cardw (cardb+1),cardw_end)];
            while ((card_word < card_word_end) && !(*card_word))
            {
                card_word++;
            }

            if (card_word != card_word_end)
            {
                cardw = (card_word - &card_table[0]);
                return TRUE;
            }
            else if ((cardw <= card_bundle_cardw (cardb)) &&
                     (card_word == &card_table [card_bundle_cardw (cardb+1)]))
            {
                // a whole bundle was explored and is empty
                dprintf  (3, ("gc: %d, find_card_dword clear bundle: %Ix cardw:[%Ix,%Ix[",
                        dd_collection_count (dynamic_data_of (0)),
                        cardb, card_bundle_cardw (cardb),
                        card_bundle_cardw (cardb+1)));
                card_bundle_clear (cardb);
            }

            cardb++;
        }
    }
    else
    {
        uint32_t* card_word = &card_table[cardw];
        uint32_t* card_word_end = &card_table [cardw_end];

        while (card_word < card_word_end)
        {
            if ((*card_word) != 0)
            {
                cardw = (card_word - &card_table [0]);
                return TRUE;
            }

            card_word++;
        }
        return FALSE;

    }

}

#endif //CARD_BUNDLE

// Find cards that are set between two points in a card table.
// Parameters
//     card_table    : The card table.
//     card          : [in/out] As input, the card to start searching from.
//                              As output, the first card that's set.
//     card_word_end : The card word at which to stop looking.
//     end_card      : [out] The last card which is set.
BOOL gc_heap::find_card(uint32_t* card_table,
                        size_t&   card,
                        size_t    card_word_end,
                        size_t&   end_card)
{
    uint32_t* last_card_word;
    uint32_t card_word_value;
    uint32_t bit_position;

    if (card_word (card) >= card_word_end)
        return FALSE;

    // Find the first card which is set
    last_card_word = &card_table [card_word (card)];
    bit_position = card_bit (card);
    card_word_value = (*last_card_word) >> bit_position;
    if (!card_word_value)
    {
        bit_position = 0;
#ifdef CARD_BUNDLE
        // Using the card bundle, go through the remaining card words between here and
        // card_word_end until we find one that is non-zero.
        size_t lcw = card_word(card) + 1;
        if (gc_heap::find_card_dword (lcw, card_word_end) == FALSE)
        {
            return FALSE;
        }
        else
        {
            last_card_word = &card_table [lcw];
            card_word_value = *last_card_word;
        }

#else //CARD_BUNDLE
        // Go through the remaining card words between here and card_word_end until we find
        // one that is non-zero.
        do
        {
            ++last_card_word;
        }

        while ((last_card_word < &card_table [card_word_end]) && !(*last_card_word));
        if (last_card_word < &card_table [card_word_end])
        {
            card_word_value = *last_card_word;
        }
        else
        {
            // We failed to find any non-zero card words before we got to card_word_end
            return FALSE;
        }
#endif //CARD_BUNDLE
    }


    // Look for the lowest bit set
    if (card_word_value)
    {
        while (!(card_word_value & 1))
        {
            bit_position++;
            card_word_value = card_word_value / 2;
        }
    }

    // card is the card word index * card size + the bit index within the card
    card = (last_card_word - &card_table[0]) * card_word_width + bit_position;

    do
    {
        // Keep going until we get to an un-set card.
        bit_position++;
        card_word_value = card_word_value / 2;

        // If we reach the end of the card word and haven't hit a 0 yet, start going
        // card word by card word until we get to one that's not fully set (0xFFFF...)
        // or we reach card_word_end.
        if ((bit_position == card_word_width) && (last_card_word < &card_table [card_word_end-1]))
        {
            do
            {
                card_word_value = *(++last_card_word);
            } while ((last_card_word < &card_table [card_word_end-1]) &&
                     (card_word_value == ~0u /* (1 << card_word_width)-1 */));
            bit_position = 0;
        }
    } while (card_word_value & 1);

    end_card = (last_card_word - &card_table [0])* card_word_width + bit_position;

    //dprintf (3, ("find_card: [%Ix, %Ix[ set", card, end_card));
    dprintf (3, ("fc: [%Ix, %Ix[", card, end_card));
    return TRUE;
}


    //because of heap expansion, computing end is complicated.
uint8_t* compute_next_end (heap_segment* seg, uint8_t* low)
{
    if ((low >=  heap_segment_mem (seg)) &&
        (low < heap_segment_allocated (seg)))
        return low;
    else
        return heap_segment_allocated (seg);
}

uint8_t*
gc_heap::compute_next_boundary (uint8_t* low, int gen_number,
                                BOOL relocating)
{
    UNREFERENCED_PARAMETER(low);

    //when relocating, the fault line is the plan start of the younger
    //generation because the generation is promoted.
    if (relocating && (gen_number == (settings.condemned_generation + 1)))
    {
        generation* gen = generation_of (gen_number - 1);
        uint8_t* gen_alloc = generation_plan_allocation_start (gen);
        assert (gen_alloc);
        return gen_alloc;
    }
    else
    {
        assert (gen_number > settings.condemned_generation);
        return generation_allocation_start (generation_of (gen_number - 1 ));
    }

}

inline void
gc_heap::keep_card_live (uint8_t* o, size_t& n_gen,
                         size_t& cg_pointers_found)
{
    THREAD_FROM_HEAP;
    if ((gc_low <= o) && (gc_high > o))
    {
        n_gen++;
    }
#ifdef MULTIPLE_HEAPS
    else if (o)
    {
        gc_heap* hp = heap_of (o);
        if (hp != this)
        {
            if ((hp->gc_low <= o) &&
                (hp->gc_high > o))
            {
                n_gen++;
            }
        }
    }
#endif //MULTIPLE_HEAPS
    cg_pointers_found ++;
    dprintf (4, ("keep card live for %Ix", o));
}

inline void
gc_heap::mark_through_cards_helper (uint8_t** poo, size_t& n_gen,
                                    size_t& cg_pointers_found,
                                    card_fn fn, uint8_t* nhigh,
                                    uint8_t* next_boundary
                                    CARD_MARKING_STEALING_ARG(gc_heap* hpt))
{
#if defined(FEATURE_CARD_MARKING_STEALING) && defined(MULTIPLE_HEAPS)
    int thread = hpt->heap_number;
#else
    THREAD_FROM_HEAP;
#endif
    if ((gc_low <= *poo) && (gc_high > *poo))
    {
        n_gen++;
        call_fn(hpt,fn) (poo THREAD_NUMBER_ARG);
    }
#ifdef MULTIPLE_HEAPS
    else if (*poo)
    {
        gc_heap* hp = heap_of_gc (*poo);
        if (hp != this)
        {
            if ((hp->gc_low <= *poo) &&
                (hp->gc_high > *poo))
            {
                n_gen++;
                call_fn(hpt,fn) (poo THREAD_NUMBER_ARG);
            }
            if ((fn == &gc_heap::relocate_address) ||
                ((hp->ephemeral_low <= *poo) &&
                 (hp->ephemeral_high > *poo)))
            {
                cg_pointers_found++;
            }
        }
    }
#endif //MULTIPLE_HEAPS
    if ((next_boundary <= *poo) && (nhigh > *poo))
    {
        cg_pointers_found ++;
        dprintf (4, ("cg pointer %Ix found, %Id so far",
                     (size_t)*poo, cg_pointers_found ));

    }
}

BOOL gc_heap::card_transition (uint8_t* po, uint8_t* end, size_t card_word_end,
                               size_t& cg_pointers_found,
                               size_t& n_eph, size_t& n_card_set,
                               size_t& card, size_t& end_card,
                               BOOL& foundp, uint8_t*& start_address,
                               uint8_t*& limit, size_t& n_cards_cleared
                               CARD_MARKING_STEALING_ARGS(card_marking_enumerator& card_mark_enumerator, heap_segment* seg, size_t &card_word_end_out))
{
    dprintf (3, ("pointer %Ix past card %Ix", (size_t)po, (size_t)card));
    dprintf (3, ("ct: %Id cg", cg_pointers_found));
    BOOL passed_end_card_p = FALSE;
    foundp = FALSE;

    if (cg_pointers_found == 0)
    {
        //dprintf(3,(" Clearing cards [%Ix, %Ix[ ",
        dprintf(3,(" CC [%Ix, %Ix[ ",
                (size_t)card_address(card), (size_t)po));
        clear_cards (card, card_of(po));
        n_card_set -= (card_of (po) - card);
        n_cards_cleared += (card_of (po) - card);

    }
    n_eph +=cg_pointers_found;
    cg_pointers_found = 0;
    card = card_of (po);
    if (card >= end_card)
    {
        passed_end_card_p = TRUE;
        dprintf (3, ("card %Ix exceeding end_card %Ix",
                    (size_t)card, (size_t)end_card));
        foundp = find_card (card_table, card, card_word_end, end_card);
        if (foundp)
        {
            n_card_set+= end_card - card;
            start_address = card_address (card);
            dprintf (3, ("NewC: %Ix, start: %Ix, end: %Ix",
                        (size_t)card, (size_t)start_address,
                        (size_t)card_address (end_card)));
        }
        limit = min (end, card_address (end_card));

#ifdef FEATURE_CARD_MARKING_STEALING
        // the card bit @ end_card should not be set
        // if end_card is still shy of the limit set by card_word_end
        assert(!((card_word(end_card) < card_word_end) &&
            card_set_p(end_card)));
        if (!foundp)
        {
            card_word_end_out = 0;
            foundp = find_next_chunk(card_mark_enumerator, seg, n_card_set, start_address, limit, card, end_card, card_word_end_out);
        }
#else
        // the card bit @ end_card should not be set -
        // find_card is supposed to terminate only when it finds a 0 bit
        // or the end of the segment
        assert (!((limit < end) &&
                card_set_p (end_card)));
#endif
    }

    return passed_end_card_p;
}

#ifdef FEATURE_CARD_MARKING_STEALING
bool card_marking_enumerator::move_next(heap_segment* seg, uint8_t*& low, uint8_t*& high)
{
    if (segment == nullptr)
        return false;

    uint32_t chunk_index = old_chunk_index;
    old_chunk_index = INVALID_CHUNK_INDEX;
    if (chunk_index == INVALID_CHUNK_INDEX)
        chunk_index = Interlocked::Increment((volatile int32_t *)chunk_index_counter);

    while (true)
    {
        uint32_t chunk_index_within_seg = chunk_index - segment_start_chunk_index;

        uint8_t* start = heap_segment_mem(segment);
        uint8_t* end = compute_next_end(segment, gc_low);

        uint8_t* aligned_start = (uint8_t*)((size_t)start & ~(CARD_MARKING_STEALING_GRANULARITY - 1));
        size_t seg_size = end - aligned_start;
        uint32_t chunk_count_within_seg = (uint32_t)((seg_size + (CARD_MARKING_STEALING_GRANULARITY - 1)) / CARD_MARKING_STEALING_GRANULARITY);
        if (chunk_index_within_seg < chunk_count_within_seg)
        {
            if (seg == segment)
            {
                low = (chunk_index_within_seg == 0) ? start : (aligned_start + (size_t)chunk_index_within_seg * CARD_MARKING_STEALING_GRANULARITY);
                high = (chunk_index_within_seg + 1 == chunk_count_within_seg) ? end : (aligned_start + (size_t)(chunk_index_within_seg + 1) * CARD_MARKING_STEALING_GRANULARITY);
                chunk_high = high;
                return true;
            }
            else
            {
                // we found the correct segment, but it's not the segment our caller is in

                // our caller should still be in one of the previous segments
#ifdef _DEBUG
                for (heap_segment* cur_seg = seg; cur_seg != segment; cur_seg = heap_segment_next_in_range(cur_seg))
                {                    
                    assert(cur_seg);
                }
#endif //_DEBUG

                // keep the chunk index for later
                old_chunk_index = chunk_index;
                return false;
            }
        }

        segment = heap_segment_next_in_range(segment);
        if (segment == nullptr)
        {
            return false;
        }
        segment_start_chunk_index += chunk_count_within_seg;
    }
}

bool gc_heap::find_next_chunk(card_marking_enumerator& card_mark_enumerator, heap_segment* seg, size_t& n_card_set,
    uint8_t*& start_address, uint8_t*& limit,
    size_t& card, size_t& end_card, size_t& card_word_end)
{
    while (true)
    {
        if (card_word_end != 0 && find_card(card_table, card, card_word_end, end_card))
        {
            assert(end_card <= card_word_end * card_word_width);
            n_card_set += end_card - card;
            start_address = card_address(card);
            dprintf(3, ("NewC: %Ix, start: %Ix, end: %Ix",
                (size_t)card, (size_t)start_address,
                (size_t)card_address(end_card)));
            limit = min(card_mark_enumerator.get_chunk_high(), card_address(end_card));
            dprintf (3, ("New run of cards on heap %d: [%Ix,%Ix[", heap_number, (size_t)start_address, (size_t)limit));
            return true;
        }
        // we have exhausted this chunk, get the next one
        uint8_t* chunk_low = nullptr;
        uint8_t* chunk_high = nullptr;
        if (!card_mark_enumerator.move_next(seg, chunk_low, chunk_high))
        {
            dprintf (3, ("No more chunks on heap %d\n", heap_number));
            return false;
        }
        card = card_of (chunk_low);
        card_word_end = (card_of(align_on_card_word(chunk_high)) / card_word_width);
        dprintf (3, ("Moved to next chunk on heap %d: [%Ix,%Ix[", heap_number, (size_t)chunk_low, (size_t)chunk_high));
    }
}
#endif // FEATURE_CARD_MARKING_STEALING

void gc_heap::mark_through_cards_for_segments (card_fn fn, BOOL relocating CARD_MARKING_STEALING_ARG(gc_heap* hpt))
{
#ifdef BACKGROUND_GC
    dprintf (3, ("current_sweep_pos is %Ix, saved_sweep_ephemeral_seg is %Ix(%Ix)",
                 current_sweep_pos, saved_sweep_ephemeral_seg, saved_sweep_ephemeral_start));

    heap_segment* soh_seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
    PREFIX_ASSUME(soh_seg != NULL);

    while (soh_seg)
    {
        dprintf (3, ("seg %Ix, bgc_alloc: %Ix, alloc: %Ix",
            soh_seg,
            heap_segment_background_allocated (soh_seg),
            heap_segment_allocated (soh_seg)));

        soh_seg = heap_segment_next_rw (soh_seg);
    }
#endif //BACKGROUND_GC

    uint8_t* low = gc_low;
    uint8_t* high = gc_high;
    size_t end_card = 0;

    generation*   oldest_gen        = generation_of (max_generation);
    int           curr_gen_number   = max_generation;
    uint8_t*      gen_boundary      = generation_allocation_start(generation_of(curr_gen_number - 1));
    uint8_t*      next_boundary     = compute_next_boundary(gc_low, curr_gen_number, relocating);

    heap_segment* seg               = heap_segment_rw (generation_start_segment (oldest_gen));
    PREFIX_ASSUME(seg != NULL);

    uint8_t*      beg               = generation_allocation_start (oldest_gen);
    uint8_t*      end               = compute_next_end (seg, low);
    uint8_t*      last_object       = beg;

    size_t  cg_pointers_found = 0;

    size_t  card_word_end = (card_of (align_on_card_word (end)) / card_word_width);

    size_t        n_eph             = 0;
    size_t        n_gen             = 0;
    size_t        n_card_set        = 0;
    uint8_t*      nhigh             = (relocating ?
                                       heap_segment_plan_allocated (ephemeral_heap_segment) : high);

    BOOL          foundp            = FALSE;
    uint8_t*      start_address     = 0;
    uint8_t*      limit             = 0;
    size_t        card              = card_of (beg);
#ifdef BACKGROUND_GC
    BOOL consider_bgc_mark_p        = FALSE;
    BOOL check_current_sweep_p      = FALSE;
    BOOL check_saved_sweep_p        = FALSE;
    should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
#endif //BACKGROUND_GC

    dprintf(3, ("CMs: %Ix->%Ix", (size_t)beg, (size_t)end));
    size_t total_cards_cleared = 0;

#ifdef FEATURE_CARD_MARKING_STEALING
    card_marking_enumerator card_mark_enumerator (seg, low, (VOLATILE(uint32_t)*)&card_mark_chunk_index_soh);
    card_word_end = 0;
#endif // FEATURE_CARD_MARKING_STEALING

    while (1)
    {
        if (card_of(last_object) > card)
        {
            dprintf (3, ("Found %Id cg pointers", cg_pointers_found));
            if (cg_pointers_found == 0)
            {
                uint8_t* last_object_processed = last_object;
#ifdef FEATURE_CARD_MARKING_STEALING
                last_object_processed = min(limit, last_object);
#endif // FEATURE_CARD_MARKING_STEALING
                dprintf (3, (" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card), (size_t)last_object_processed));
                clear_cards(card, card_of(last_object_processed));
                n_card_set -= (card_of(last_object_processed) - card);
                total_cards_cleared += (card_of(last_object_processed) - card);
            }

            n_eph += cg_pointers_found;
            cg_pointers_found = 0;
            card = card_of (last_object);
        }

        if (card >= end_card)
        {
#ifdef FEATURE_CARD_MARKING_STEALING
            // find another chunk with some cards set
            foundp = find_next_chunk(card_mark_enumerator, seg, n_card_set, start_address, limit, card, end_card, card_word_end);
#else // FEATURE_CARD_MARKING_STEALING
            foundp = find_card(card_table, card, card_word_end, end_card);
            if (foundp)
            {
                n_card_set += end_card - card;
                start_address = max (beg, card_address (card));
            }
            limit = min (end, card_address (end_card));
#endif // FEATURE_CARD_MARKING_STEALING
        }
        if (!foundp || (last_object >= end) || (card_address (card) >= end))
        {
            if (foundp && (cg_pointers_found == 0))
            {
                dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card),
                           (size_t)end));
                clear_cards (card, card_of (end));
                n_card_set -= (card_of (end) - card);
                total_cards_cleared += (card_of (end) - card);
            }
            n_eph += cg_pointers_found;
            cg_pointers_found = 0;
#ifdef FEATURE_CARD_MARKING_STEALING
            // we have decided to move to the next segment - make sure we exhaust the chunk enumerator for this segment
            card_mark_enumerator.exhaust_segment(seg);
#endif // FEATURE_CARD_MARKING_STEALING
            if ((seg = heap_segment_next_in_range (seg)) != 0)
            {
#ifdef BACKGROUND_GC
                should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
#endif //BACKGROUND_GC
                beg = heap_segment_mem (seg);
                end = compute_next_end (seg, low);
#ifdef FEATURE_CARD_MARKING_STEALING
                card_word_end = 0;
#else // FEATURE_CARD_MARKING_STEALING
                card_word_end = card_of (align_on_card_word (end)) / card_word_width;
#endif // FEATURE_CARD_MARKING_STEALING
                card = card_of (beg);
                last_object = beg;
                end_card = 0;
                continue;
            }
            else
            {
                break;
            }
        }

        assert (card_set_p (card));
        {
            uint8_t* o = last_object;

            o = find_first_object (start_address, last_object);
            // Never visit an object twice.
            assert (o >= last_object);

            //dprintf(3,("Considering card %Ix start object: %Ix, %Ix[ boundary: %Ix",
            dprintf(3, ("c: %Ix, o: %Ix, l: %Ix[ boundary: %Ix",
                   card, (size_t)o, (size_t)limit, (size_t)gen_boundary));

            while (o < limit)
            {
                assert (Align (size (o)) >= Align (min_obj_size));
                size_t s = size (o);

                // next_o is the next object in the heap walk
                uint8_t* next_o =  o + Align (s);

                // while cont_o is the object we should continue with at the end_object label
                uint8_t* cont_o = next_o;

                Prefetch (next_o);

                if ((o >= gen_boundary) &&
                    (seg == ephemeral_heap_segment))
                {
                    dprintf (3, ("switching gen boundary %Ix", (size_t)gen_boundary));
                    curr_gen_number--;
                    assert ((curr_gen_number > 0));
                    gen_boundary = generation_allocation_start
                        (generation_of (curr_gen_number - 1));
                    next_boundary = (compute_next_boundary
                                     (low, curr_gen_number, relocating));
                }

                dprintf (4, ("|%Ix|", (size_t)o));

                if (next_o < start_address)
                {
                    goto end_object;
                }

#ifdef BACKGROUND_GC
                if (!fgc_should_consider_object (o, seg, consider_bgc_mark_p, check_current_sweep_p, check_saved_sweep_p))
                {
                    goto end_object;
                }
#endif //BACKGROUND_GC

#ifdef COLLECTIBLE_CLASS
                if (is_collectible(o))
                {
                    BOOL passed_end_card_p = FALSE;

                    if (card_of (o) > card)
                    {
                        passed_end_card_p = card_transition (o, end, card_word_end,
                            cg_pointers_found,
                            n_eph, n_card_set,
                            card, end_card,
                            foundp, start_address,
                            limit, total_cards_cleared
                            CARD_MARKING_STEALING_ARGS(card_mark_enumerator, seg, card_word_end));
                    }

                    if ((!passed_end_card_p || foundp) && (card_of (o) == card))
                    {
                        // card is valid and it covers the head of the object
                        if (fn == &gc_heap::relocate_address)
                        {
                            keep_card_live (o, n_gen, cg_pointers_found);
                        }
                        else
                        {
                            uint8_t* class_obj = get_class_object (o);
                            mark_through_cards_helper (&class_obj, n_gen,
                                                       cg_pointers_found, fn,
                                                       nhigh, next_boundary CARD_MARKING_STEALING_ARG(hpt));
                        }
                    }

                    if (passed_end_card_p)
                    {
                        if (foundp && (card_address (card) < next_o))
                        {
                            goto go_through_refs;
                        }
                        else if (foundp && (start_address < limit))
                        {
                            cont_o = find_first_object (start_address, o);
                            goto end_object;
                        }
                        else
                            goto end_limit;
                    }
                }

go_through_refs:
#endif //COLLECTIBLE_CLASS

                if (contain_pointers (o))
                {
                    dprintf(3,("Going through %Ix start_address: %Ix", (size_t)o, (size_t)start_address));

                    {
                        dprintf (4, ("normal object path"));
                        go_through_object
                            (method_table(o), o, s, poo,
                             start_address, use_start, (o + s),
                             {
                                 dprintf (4, ("<%Ix>:%Ix", (size_t)poo, (size_t)*poo));
                                 if (card_of ((uint8_t*)poo) > card)
                                 {
                                     BOOL passed_end_card_p  = card_transition ((uint8_t*)poo, end,
                                            card_word_end,
                                            cg_pointers_found,
                                            n_eph, n_card_set,
                                            card, end_card,
                                            foundp, start_address,
                                            limit, total_cards_cleared
                                            CARD_MARKING_STEALING_ARGS(card_mark_enumerator, seg, card_word_end));

                                     if (passed_end_card_p)
                                     {
                                         if (foundp && (card_address (card) < next_o))
                                         {
                                             //new_start();
                                             {
                                                 if (ppstop <= (uint8_t**)start_address)
                                                     {break;}
                                                 else if (poo < (uint8_t**)start_address)
                                                     {poo = (uint8_t**)start_address;}
                                             }
                                         }
                                         else if (foundp && (start_address < limit))
                                         {
                                             cont_o = find_first_object (start_address, o);
                                             goto end_object;
                                         }
                                         else
                                             goto end_limit;
                                     }
                                 }

                                 mark_through_cards_helper (poo, n_gen,
                                                            cg_pointers_found, fn,
                                                            nhigh, next_boundary CARD_MARKING_STEALING_ARG(hpt));
                             }
                            );
                    }
                }

            end_object:
                if (((size_t)next_o / brick_size) != ((size_t) o / brick_size))
                {
                    if (brick_table [brick_of (o)] <0)
                        fix_brick_to_highest (o, next_o);
                }
                o = cont_o;
            }
        end_limit:
            last_object = o;
        }
    }
    // compute the efficiency ratio of the card table
    if (!relocating)
    {
        generation_skip_ratio = ((n_eph > 400)? (int)(((float)n_gen / (float)n_eph) * 100) : 100);
        dprintf (3, ("Msoh: cross: %Id, useful: %Id, cards set: %Id, cards cleared: %Id, ratio: %d",
            n_eph, n_gen , n_card_set, total_cards_cleared, generation_skip_ratio));
    }
    else
    {
        dprintf (3, ("R: Msoh: cross: %Id, useful: %Id, cards set: %Id, cards cleared: %Id, ratio: %d",
            n_gen, n_eph, n_card_set, total_cards_cleared, generation_skip_ratio));
    }
}

#ifdef SEG_REUSE_STATS
size_t gc_heap::dump_buckets (size_t* ordered_indices, int count, size_t* total_size)
{
    size_t total_items = 0;
    *total_size = 0;
    for (int i = 0; i < count; i++)
    {
        total_items += ordered_indices[i];
        *total_size += ordered_indices[i] << (MIN_INDEX_POWER2 + i);
        dprintf (SEG_REUSE_LOG_0, ("[%d]%4d 2^%2d", heap_number, ordered_indices[i], (MIN_INDEX_POWER2 + i)));
    }
    dprintf (SEG_REUSE_LOG_0, ("[%d]Total %d items, total size is 0x%Ix", heap_number, total_items, *total_size));
    return total_items;
}
#endif // SEG_REUSE_STATS

void gc_heap::count_plug (size_t last_plug_size, uint8_t*& last_plug)
{
    // detect pinned plugs
    if (!pinned_plug_que_empty_p() && (last_plug == pinned_plug (oldest_pin())))
    {
        deque_pinned_plug();
        update_oldest_pinned_plug();
        dprintf (3, ("deque pin,now oldest pin is %Ix", pinned_plug (oldest_pin())));
    }
    else
    {
        size_t plug_size = last_plug_size + Align(min_obj_size);
        BOOL is_padded = FALSE;

#ifdef SHORT_PLUGS
        plug_size += Align (min_obj_size);
        is_padded = TRUE;
#endif //SHORT_PLUGS

#ifdef RESPECT_LARGE_ALIGNMENT
        plug_size += switch_alignment_size (is_padded);
#endif //RESPECT_LARGE_ALIGNMENT

        total_ephemeral_plugs += plug_size;
        size_t plug_size_power2 = round_up_power2 (plug_size);
        ordered_plug_indices[relative_index_power2_plug (plug_size_power2)]++;
        dprintf (SEG_REUSE_LOG_1, ("[%d]count_plug: adding 0x%Ix - %Id (2^%d) to ordered plug array",
            heap_number,
            last_plug,
            plug_size,
            (relative_index_power2_plug (plug_size_power2) + MIN_INDEX_POWER2)));
    }
}

void gc_heap::count_plugs_in_brick (uint8_t* tree, uint8_t*& last_plug)
{
    assert ((tree != NULL));
    if (node_left_child (tree))
    {
        count_plugs_in_brick (tree + node_left_child (tree), last_plug);
    }

    if (last_plug != 0)
    {
        uint8_t*  plug = tree;
        size_t gap_size = node_gap_size (plug);
        uint8_t*   gap = (plug - gap_size);
        uint8_t*  last_plug_end = gap;
        size_t  last_plug_size = (last_plug_end - last_plug);
        dprintf (3, ("tree: %Ix, last plug: %Ix, gap size: %Ix, gap: %Ix, last plug size: %Ix",
            tree, last_plug, gap_size, gap, last_plug_size));

        if (tree == oldest_pinned_plug)
        {
            dprintf (3, ("tree %Ix is pinned, last plug is %Ix, size is %Ix",
                tree, last_plug, last_plug_size));
            mark* m = oldest_pin();
            if (m->has_pre_plug_info())
            {
                last_plug_size += sizeof (gap_reloc_pair);
                dprintf (3, ("pin %Ix has pre plug, adjusting plug size to %Ix", tree, last_plug_size));
            }
        }
        // Can't assert here - if it's a pinned plug it can be less.
        //assert (last_plug_size >= Align (min_obj_size));

        count_plug (last_plug_size, last_plug);
    }

    last_plug = tree;

    if (node_right_child (tree))
    {
        count_plugs_in_brick (tree + node_right_child (tree), last_plug);
    }
}

void gc_heap::build_ordered_plug_indices ()
{
    memset (ordered_plug_indices, 0, sizeof(ordered_plug_indices));
    memset (saved_ordered_plug_indices, 0, sizeof(saved_ordered_plug_indices));

    uint8_t*  start_address = generation_limit (max_generation);
    uint8_t* end_address = heap_segment_allocated (ephemeral_heap_segment);
    size_t  current_brick = brick_of (start_address);
    size_t  end_brick = brick_of (end_address - 1);
    uint8_t* last_plug = 0;

    //Look for the right pinned plug to start from.
    reset_pinned_queue_bos();
    while (!pinned_plug_que_empty_p())
    {
        mark* m = oldest_pin();
        if ((m->first >= start_address) && (m->first < end_address))
        {
            dprintf (3, ("found a pin %Ix between %Ix and %Ix", m->first, start_address, end_address));

            break;
        }
        else
            deque_pinned_plug();
    }

    update_oldest_pinned_plug();

    while (current_brick <= end_brick)
    {
        int brick_entry =  brick_table [ current_brick ];
        if (brick_entry >= 0)
        {
            count_plugs_in_brick (brick_address (current_brick) + brick_entry -1, last_plug);
        }

        current_brick++;
    }

    if (last_plug !=0)
    {
        count_plug (end_address - last_plug, last_plug);
    }

    // we need to make sure that after fitting all the existing plugs, we
    // have big enough free space left to guarantee that the next allocation
    // will succeed.
    size_t extra_size = END_SPACE_AFTER_GC + Align (min_obj_size);
    total_ephemeral_plugs += extra_size;
    dprintf (SEG_REUSE_LOG_0, ("Making sure we can fit a large object after fitting all plugs"));
    ordered_plug_indices[relative_index_power2_plug (round_up_power2 (extra_size))]++;

    memcpy (saved_ordered_plug_indices, ordered_plug_indices, sizeof(ordered_plug_indices));

#ifdef SEG_REUSE_STATS
    dprintf (SEG_REUSE_LOG_0, ("Plugs:"));
    size_t total_plug_power2 = 0;
    dump_buckets (ordered_plug_indices, MAX_NUM_BUCKETS, &total_plug_power2);
    dprintf (SEG_REUSE_LOG_0, ("plugs: 0x%Ix (rounded up to 0x%Ix (%d%%))",
                total_ephemeral_plugs,
                total_plug_power2,
                (total_ephemeral_plugs ?
                    (total_plug_power2 * 100 / total_ephemeral_plugs) :
                    0)));
    dprintf (SEG_REUSE_LOG_0, ("-------------------"));
#endif // SEG_REUSE_STATS
}

void gc_heap::init_ordered_free_space_indices ()
{
    memset (ordered_free_space_indices, 0, sizeof(ordered_free_space_indices));
    memset (saved_ordered_free_space_indices, 0, sizeof(saved_ordered_free_space_indices));
}

void gc_heap::trim_free_spaces_indices ()
{
    trimmed_free_space_index = -1;
    size_t max_count = max_free_space_items - 1;
    size_t count = 0;
    int i = 0;
    for (i = (MAX_NUM_BUCKETS - 1); i >= 0; i--)
    {
        count += ordered_free_space_indices[i];

        if (count >= max_count)
        {
            break;
        }
    }

    ptrdiff_t extra_free_space_items = count - max_count;

    if (extra_free_space_items > 0)
    {
        ordered_free_space_indices[i] -= extra_free_space_items;
        free_space_items = max_count;
        trimmed_free_space_index = i;
    }
    else
    {
        free_space_items = count;
    }

    if (i == -1)
    {
        i = 0;
    }

    free_space_buckets = MAX_NUM_BUCKETS - i;

    for (--i; i >= 0; i--)
    {
        ordered_free_space_indices[i] = 0;
    }

    memcpy (saved_ordered_free_space_indices,
            ordered_free_space_indices,
            sizeof(ordered_free_space_indices));
}

// We fit as many plugs as we can and update the number of plugs left and the number
// of free spaces left.
BOOL gc_heap::can_fit_in_spaces_p (size_t* ordered_blocks, int small_index, size_t* ordered_spaces, int big_index)
{
    assert (small_index <= big_index);
    assert (big_index < MAX_NUM_BUCKETS);

    size_t small_blocks = ordered_blocks[small_index];

    if (small_blocks == 0)
    {
        return TRUE;
    }

    size_t big_spaces = ordered_spaces[big_index];

    if (big_spaces == 0)
    {
        return FALSE;
    }

    dprintf (SEG_REUSE_LOG_1, ("[%d]Fitting %Id 2^%d plugs into %Id 2^%d free spaces",
        heap_number,
        small_blocks, (small_index + MIN_INDEX_POWER2),
        big_spaces, (big_index + MIN_INDEX_POWER2)));

    size_t big_to_small = big_spaces << (big_index - small_index);

    ptrdiff_t extra_small_spaces = big_to_small - small_blocks;
    dprintf (SEG_REUSE_LOG_1, ("[%d]%d 2^%d spaces can fit %d 2^%d blocks",
        heap_number,
        big_spaces, (big_index + MIN_INDEX_POWER2), big_to_small, (small_index + MIN_INDEX_POWER2)));
    BOOL can_fit = (extra_small_spaces >= 0);

    if (can_fit)
    {
        dprintf (SEG_REUSE_LOG_1, ("[%d]Can fit with %d 2^%d extras blocks",
            heap_number,
            extra_small_spaces, (small_index + MIN_INDEX_POWER2)));
    }

    int i = 0;

    dprintf (SEG_REUSE_LOG_1, ("[%d]Setting # of 2^%d spaces to 0", heap_number, (big_index + MIN_INDEX_POWER2)));
    ordered_spaces[big_index] = 0;
    if (extra_small_spaces > 0)
    {
        dprintf (SEG_REUSE_LOG_1, ("[%d]Setting # of 2^%d blocks to 0", heap_number, (small_index + MIN_INDEX_POWER2)));
        ordered_blocks[small_index] = 0;
        for (i = small_index; i < big_index; i++)
        {
            if (extra_small_spaces & 1)
            {
                dprintf (SEG_REUSE_LOG_1, ("[%d]Increasing # of 2^%d spaces from %d to %d",
                    heap_number,
                    (i + MIN_INDEX_POWER2), ordered_spaces[i], (ordered_spaces[i] + 1)));
                ordered_spaces[i] += 1;
            }
            extra_small_spaces >>= 1;
        }

        dprintf (SEG_REUSE_LOG_1, ("[%d]Finally increasing # of 2^%d spaces from %d to %d",
            heap_number,
            (i + MIN_INDEX_POWER2), ordered_spaces[i], (ordered_spaces[i] + extra_small_spaces)));
        ordered_spaces[i] += extra_small_spaces;
    }
    else
    {
        dprintf (SEG_REUSE_LOG_1, ("[%d]Decreasing # of 2^%d blocks from %d to %d",
            heap_number,
            (small_index + MIN_INDEX_POWER2),
            ordered_blocks[small_index],
            (ordered_blocks[small_index] - big_to_small)));
        ordered_blocks[small_index] -= big_to_small;
    }

#ifdef SEG_REUSE_STATS
    size_t temp;
    dprintf (SEG_REUSE_LOG_1, ("[%d]Plugs became:", heap_number));
    dump_buckets (ordered_blocks, MAX_NUM_BUCKETS, &temp);

    dprintf (SEG_REUSE_LOG_1, ("[%d]Free spaces became:", heap_number));
    dump_buckets (ordered_spaces, MAX_NUM_BUCKETS, &temp);
#endif //SEG_REUSE_STATS

    return can_fit;
}

// space_index gets updated to the biggest available space index.
BOOL gc_heap::can_fit_blocks_p (size_t* ordered_blocks, int block_index, size_t* ordered_spaces, int* space_index)
{
    assert (*space_index >= block_index);

    while (!can_fit_in_spaces_p (ordered_blocks, block_index, ordered_spaces, *space_index))
    {
        (*space_index)--;
        if (*space_index < block_index)
        {
            return FALSE;
        }
    }

    return TRUE;
}

BOOL gc_heap::can_fit_all_blocks_p (size_t* ordered_blocks, size_t* ordered_spaces, int count)
{
#ifdef FEATURE_STRUCTALIGN
    // BARTOKTODO (4841): reenable when can_fit_in_spaces_p takes alignment requirements into account
    return FALSE;
#endif // FEATURE_STRUCTALIGN
    int space_index = count - 1;
    for (int block_index = (count - 1); block_index >= 0; block_index--)
    {
        if (!can_fit_blocks_p (ordered_blocks, block_index, ordered_spaces, &space_index))
        {
            return FALSE;
        }
    }

    return TRUE;
}

void gc_heap::build_ordered_free_spaces (heap_segment* seg)
{
    assert (bestfit_seg);

    //bestfit_seg->add_buckets (MAX_NUM_BUCKETS - free_space_buckets + MIN_INDEX_POWER2,
    //                    ordered_free_space_indices + (MAX_NUM_BUCKETS - free_space_buckets),
    //                    free_space_buckets,
    //                    free_space_items);

    bestfit_seg->add_buckets (MIN_INDEX_POWER2,
                        ordered_free_space_indices,
                        MAX_NUM_BUCKETS,
                        free_space_items);

    assert (settings.condemned_generation == max_generation);

    uint8_t* first_address = heap_segment_mem (seg);
    uint8_t* end_address   = heap_segment_reserved (seg);
    //look through the pinned plugs for relevant ones.
    //Look for the right pinned plug to start from.
    reset_pinned_queue_bos();
    mark* m = 0;
    // See comment in can_expand_into_p why we need (max_generation + 1).
    size_t eph_gen_starts = (Align (min_obj_size)) * (max_generation + 1);
    BOOL has_fit_gen_starts = FALSE;

    while (!pinned_plug_que_empty_p())
    {
        m = oldest_pin();
        if ((pinned_plug (m) >= first_address) &&
            (pinned_plug (m) < end_address) &&
            (pinned_len (m) >= eph_gen_starts))
        {

            assert ((pinned_plug (m) - pinned_len (m)) == bestfit_first_pin);
            break;
        }
        else
        {
            deque_pinned_plug();
        }
    }

    if (!pinned_plug_que_empty_p())
    {
        bestfit_seg->add ((void*)m, TRUE, TRUE);
        deque_pinned_plug();
        m = oldest_pin();
        has_fit_gen_starts = TRUE;
    }

    while (!pinned_plug_que_empty_p() &&
            ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address)))
    {
        bestfit_seg->add ((void*)m, TRUE, FALSE);
        deque_pinned_plug();
        m = oldest_pin();
    }

    if (commit_end_of_seg)
    {
        if (!has_fit_gen_starts)
        {
            assert (bestfit_first_pin == heap_segment_plan_allocated (seg));
        }
        bestfit_seg->add ((void*)seg, FALSE, (!has_fit_gen_starts));
    }

#ifdef _DEBUG
    bestfit_seg->check();
#endif //_DEBUG
}

BOOL gc_heap::try_best_fit (BOOL end_of_segment_p)
{
    if (!end_of_segment_p)
    {
        trim_free_spaces_indices ();
    }

    BOOL can_bestfit = can_fit_all_blocks_p (ordered_plug_indices,
                                             ordered_free_space_indices,
                                             MAX_NUM_BUCKETS);

    return can_bestfit;
}

BOOL gc_heap::best_fit (size_t free_space,
                        size_t largest_free_space,
                        size_t additional_space,
                        BOOL* use_additional_space)
{
    dprintf (SEG_REUSE_LOG_0, ("gen%d: trying best fit mechanism", settings.condemned_generation));

    assert (!additional_space || (additional_space && use_additional_space));
    if (use_additional_space)
    {
        *use_additional_space = FALSE;
    }

    if (ordered_plug_indices_init == FALSE)
    {
        total_ephemeral_plugs = 0;
        build_ordered_plug_indices();
        ordered_plug_indices_init = TRUE;
    }
    else
    {
        memcpy (ordered_plug_indices, saved_ordered_plug_indices, sizeof(ordered_plug_indices));
    }

    if (total_ephemeral_plugs == (END_SPACE_AFTER_GC + Align (min_obj_size)))
    {
        dprintf (SEG_REUSE_LOG_0, ("No ephemeral plugs to realloc, done"));
        size_t empty_eph = (END_SPACE_AFTER_GC + Align (min_obj_size) + (Align (min_obj_size)) * (max_generation + 1));
        BOOL can_fit_empty_eph = (largest_free_space >= empty_eph);
        if (!can_fit_empty_eph)
        {
            can_fit_empty_eph = (additional_space >= empty_eph);

            if (can_fit_empty_eph)
            {
                *use_additional_space = TRUE;
            }
        }

        return can_fit_empty_eph;
    }

    if ((total_ephemeral_plugs + approximate_new_allocation()) >= (free_space + additional_space))
    {
        dprintf (SEG_REUSE_LOG_0, ("We won't have enough free space left in this segment after fitting, done"));
        return FALSE;
    }

    if ((free_space + additional_space) == 0)
    {
        dprintf (SEG_REUSE_LOG_0, ("No free space in this segment, done"));
        return FALSE;
    }

#ifdef SEG_REUSE_STATS
    dprintf (SEG_REUSE_LOG_0, ("Free spaces:"));
    size_t total_free_space_power2 = 0;
    size_t total_free_space_items =
        dump_buckets (ordered_free_space_indices,
                      MAX_NUM_BUCKETS,
                      &total_free_space_power2);
    dprintf (SEG_REUSE_LOG_0, ("currently max free spaces is %Id", max_free_space_items));

    dprintf (SEG_REUSE_LOG_0, ("Ephemeral plugs: 0x%Ix, free space: 0x%Ix (rounded down to 0x%Ix (%Id%%)), additional free_space: 0x%Ix",
                total_ephemeral_plugs,
                free_space,
                total_free_space_power2,
                (free_space ? (total_free_space_power2 * 100 / free_space) : 0),
                additional_space));

    size_t saved_all_free_space_indices[MAX_NUM_BUCKETS];
    memcpy (saved_all_free_space_indices,
            ordered_free_space_indices,
            sizeof(saved_all_free_space_indices));

#endif // SEG_REUSE_STATS

    if (total_ephemeral_plugs > (free_space + additional_space))
    {
        return FALSE;
    }

    use_bestfit = try_best_fit(FALSE);

    if (!use_bestfit && additional_space)
    {
        int relative_free_space_index = relative_index_power2_free_space (round_down_power2 (additional_space));

        if (relative_free_space_index != -1)
        {
            int relative_plug_index = 0;
            size_t plugs_to_fit = 0;

            for (relative_plug_index = (MAX_NUM_BUCKETS - 1); relative_plug_index >= 0; relative_plug_index--)
            {
                plugs_to_fit = ordered_plug_indices[relative_plug_index];
                if (plugs_to_fit != 0)
                {
                    break;
                }
            }

            if ((relative_plug_index > relative_free_space_index) ||
                ((relative_plug_index == relative_free_space_index) &&
                (plugs_to_fit > 1)))
            {
#ifdef SEG_REUSE_STATS
                dprintf (SEG_REUSE_LOG_0, ("additional space is 2^%d but we stopped at %d 2^%d plug(s)",
                            (relative_free_space_index + MIN_INDEX_POWER2),
                            plugs_to_fit,
                            (relative_plug_index + MIN_INDEX_POWER2)));
#endif // SEG_REUSE_STATS
                goto adjust;
            }

            dprintf (SEG_REUSE_LOG_0, ("Adding end of segment (2^%d)", (relative_free_space_index + MIN_INDEX_POWER2)));
            ordered_free_space_indices[relative_free_space_index]++;
            use_bestfit = try_best_fit(TRUE);
            if (use_bestfit)
            {
                free_space_items++;
                // Since we might've trimmed away some of the free spaces we had, we should see
                // if we really need to use end of seg space - if it's the same or smaller than
                // the largest space we trimmed we can just add that one back instead of
                // using end of seg.
                if (relative_free_space_index > trimmed_free_space_index)
                {
                    *use_additional_space = TRUE;
                }
                else
                {
                    // If the addition space is <= than the last trimmed space, we
                    // should just use that last trimmed space instead.
                    saved_ordered_free_space_indices[trimmed_free_space_index]++;
                }
            }
        }
    }

adjust:

    if (!use_bestfit)
    {
        dprintf (SEG_REUSE_LOG_0, ("couldn't fit..."));

#ifdef SEG_REUSE_STATS
        size_t saved_max = max_free_space_items;
        BOOL temp_bestfit = FALSE;

        dprintf (SEG_REUSE_LOG_0, ("----Starting experiment process----"));
        dprintf (SEG_REUSE_LOG_0, ("----Couldn't fit with max free items %Id", max_free_space_items));

        // TODO: need to take the end of segment into consideration.
        while (max_free_space_items <= total_free_space_items)
        {
            max_free_space_items += max_free_space_items / 2;
            dprintf (SEG_REUSE_LOG_0, ("----Temporarily increasing max free spaces to %Id", max_free_space_items));
            memcpy (ordered_free_space_indices,
                    saved_all_free_space_indices,
                    sizeof(ordered_free_space_indices));
            if (try_best_fit(FALSE))
            {
                temp_bestfit = TRUE;
                break;
            }
        }

        if (temp_bestfit)
        {
            dprintf (SEG_REUSE_LOG_0, ("----With %Id max free spaces we could fit", max_free_space_items));
        }
        else
        {
            dprintf (SEG_REUSE_LOG_0, ("----Tried all free spaces and still couldn't fit, lost too much space"));
        }

        dprintf (SEG_REUSE_LOG_0, ("----Restoring max free spaces to %Id", saved_max));
        max_free_space_items = saved_max;
#endif // SEG_REUSE_STATS
        if (free_space_items)
        {
            max_free_space_items = min (MAX_NUM_FREE_SPACES, free_space_items * 2);
            max_free_space_items = max (max_free_space_items, MIN_NUM_FREE_SPACES);
        }
        else
        {
            max_free_space_items = MAX_NUM_FREE_SPACES;
        }
    }

    dprintf (SEG_REUSE_LOG_0, ("Adjusted number of max free spaces to %Id", max_free_space_items));
    dprintf (SEG_REUSE_LOG_0, ("------End of best fitting process------\n"));

    return use_bestfit;
}

BOOL gc_heap::process_free_space (heap_segment* seg,
                         size_t free_space,
                         size_t min_free_size,
                         size_t min_cont_size,
                         size_t* total_free_space,
                         size_t* largest_free_space)
{
    *total_free_space += free_space;
    *largest_free_space = max (*largest_free_space, free_space);

#ifdef SIMPLE_DPRINTF
    dprintf (SEG_REUSE_LOG_1, ("free space len: %Ix, total free space: %Ix, largest free space: %Ix",
                free_space, *total_free_space, *largest_free_space));
#endif //SIMPLE_DPRINTF

    if ((*total_free_space >= min_free_size) && (*largest_free_space >= min_cont_size))
    {
#ifdef SIMPLE_DPRINTF
        dprintf (SEG_REUSE_LOG_0, ("(gen%d)total free: %Ix(min: %Ix), largest free: %Ix(min: %Ix). Found segment %Ix to reuse without bestfit",
            settings.condemned_generation,
            *total_free_space, min_free_size, *largest_free_space, min_cont_size,
            (size_t)seg));
#else
        UNREFERENCED_PARAMETER(seg);
#endif //SIMPLE_DPRINTF
        return TRUE;
    }

    int free_space_index = relative_index_power2_free_space (round_down_power2 (free_space));
    if (free_space_index != -1)
    {
        ordered_free_space_indices[free_space_index]++;
    }
    return FALSE;
}

BOOL gc_heap::expand_reused_seg_p()
{
    BOOL reused_seg = FALSE;
    int heap_expand_mechanism = gc_data_per_heap.get_mechanism (gc_heap_expand);
    if ((heap_expand_mechanism == expand_reuse_bestfit) ||
        (heap_expand_mechanism == expand_reuse_normal))
    {
        reused_seg = TRUE;
    }

    return reused_seg;
}

BOOL gc_heap::can_expand_into_p (heap_segment* seg, size_t min_free_size, size_t min_cont_size,
                                 allocator* gen_allocator)
{
    min_cont_size += END_SPACE_AFTER_GC;
    use_bestfit = FALSE;
    commit_end_of_seg = FALSE;
    bestfit_first_pin = 0;
    uint8_t* first_address = heap_segment_mem (seg);
    uint8_t* end_address   = heap_segment_reserved (seg);
    size_t end_extra_space = end_space_after_gc();

    if ((heap_segment_reserved (seg) - end_extra_space) <= heap_segment_plan_allocated (seg))
    {
        dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p: can't use segment [%Ix %Ix, has less than %d bytes at the end",
                                   first_address, end_address, end_extra_space));
        return FALSE;
    }

    end_address -= end_extra_space;

    dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p(gen%d): min free: %Ix, min continuous: %Ix",
        settings.condemned_generation, min_free_size, min_cont_size));
    size_t eph_gen_starts = eph_gen_starts_size;

    if (settings.condemned_generation == max_generation)
    {
        size_t free_space = 0;
        size_t largest_free_space = free_space;
        dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p: gen2: testing segment [%Ix %Ix", first_address, end_address));
        //Look through the pinned plugs for relevant ones and Look for the right pinned plug to start from.
        //We are going to allocate the generation starts in the 1st free space,
        //so start from the first free space that's big enough for gen starts and a min object size.
        // If we see a free space that is >= gen starts but < gen starts + min obj size we just don't use it -
        // we could use it by allocating the last generation start a bit bigger but
        // the complexity isn't worth the effort (those plugs are from gen2
        // already anyway).
        reset_pinned_queue_bos();
        mark* m = 0;
        BOOL has_fit_gen_starts = FALSE;

        init_ordered_free_space_indices ();
        while (!pinned_plug_que_empty_p())
        {
            m = oldest_pin();
            if ((pinned_plug (m) >= first_address) &&
                (pinned_plug (m) < end_address) &&
                (pinned_len (m) >= (eph_gen_starts + Align (min_obj_size))))
            {
                break;
            }
            else
            {
                deque_pinned_plug();
            }
        }

        if (!pinned_plug_que_empty_p())
        {
            bestfit_first_pin = pinned_plug (m) - pinned_len (m);

            if (process_free_space (seg,
                                    pinned_len (m) - eph_gen_starts,
                                    min_free_size, min_cont_size,
                                    &free_space, &largest_free_space))
            {
                return TRUE;
            }

            deque_pinned_plug();
            m = oldest_pin();
            has_fit_gen_starts = TRUE;
        }

        dprintf (3, ("first pin is %Ix", pinned_plug (m)));

        //tally up free space
        while (!pinned_plug_que_empty_p() &&
               ((pinned_plug (m) >= first_address) && (pinned_plug (m) < end_address)))
        {
            dprintf (3, ("looking at pin %Ix", pinned_plug (m)));
            if (process_free_space (seg,
                                    pinned_len (m),
                                    min_free_size, min_cont_size,
                                    &free_space, &largest_free_space))
            {
                return TRUE;
            }

            deque_pinned_plug();
            m = oldest_pin();
        }

        //try to find space at the end of the segment.
        size_t end_space = (end_address - heap_segment_plan_allocated (seg));
        size_t additional_space = ((min_free_size > free_space) ? (min_free_size - free_space) : 0);
        dprintf (SEG_REUSE_LOG_0, ("end space: %Ix; additional: %Ix", end_space, additional_space));
        if (end_space >= additional_space)
        {
            BOOL can_fit = TRUE;
            commit_end_of_seg = TRUE;

            if (largest_free_space < min_cont_size)
            {
                if (end_space >= min_cont_size)
                {
                    additional_space = max (min_cont_size, additional_space);
                    dprintf (SEG_REUSE_LOG_0, ("(gen2)Found segment %Ix to reuse without bestfit, with committing end of seg for eph",
                        seg));
                }
                else
                {
                    if (settings.concurrent)
                    {
                        can_fit = FALSE;
                        commit_end_of_seg = FALSE;
                    }
                    else
                    {
                        size_t additional_space_bestfit = additional_space;
                        if (!has_fit_gen_starts)
                        {
                            if (additional_space_bestfit < (eph_gen_starts + Align (min_obj_size)))
                            {
                                dprintf (SEG_REUSE_LOG_0, ("(gen2)Couldn't fit, gen starts not allocated yet and end space is too small: %Id",
                                        additional_space_bestfit));
                                return FALSE;
                            }

                            bestfit_first_pin = heap_segment_plan_allocated (seg);
                            additional_space_bestfit -= eph_gen_starts;
                        }

                        can_fit = best_fit (free_space,
                                            largest_free_space,
                                            additional_space_bestfit,
                                            &commit_end_of_seg);

                        if (can_fit)
                        {
                            dprintf (SEG_REUSE_LOG_0, ("(gen2)Found segment %Ix to reuse with bestfit, %s committing end of seg",
                                seg, (commit_end_of_seg ? "with" : "without")));
                        }
                        else
                        {
                            dprintf (SEG_REUSE_LOG_0, ("(gen2)Couldn't fit, total free space is %Ix", (free_space + end_space)));
                        }
                    }
                }
            }
            else
            {
                dprintf (SEG_REUSE_LOG_0, ("(gen2)Found segment %Ix to reuse without bestfit, with committing end of seg", seg));
            }

            assert (additional_space <= end_space);
            if (commit_end_of_seg)
            {
                if (!grow_heap_segment (seg, heap_segment_plan_allocated (seg) + additional_space))
                {
                    dprintf (2, ("Couldn't commit end of segment?!"));
                    use_bestfit = FALSE;

                    return FALSE;
                }

                if (use_bestfit)
                {
                    // We increase the index here because growing heap segment could create a discrepency with
                    // the additional space we used (could be bigger).
                    size_t free_space_end_of_seg =
                        heap_segment_committed (seg) - heap_segment_plan_allocated (seg);
                    int relative_free_space_index = relative_index_power2_free_space (round_down_power2 (free_space_end_of_seg));
                    saved_ordered_free_space_indices[relative_free_space_index]++;
                }
            }

            if (use_bestfit)
            {
                memcpy (ordered_free_space_indices,
                        saved_ordered_free_space_indices,
                        sizeof(ordered_free_space_indices));
                max_free_space_items = max (MIN_NUM_FREE_SPACES, free_space_items * 3 / 2);
                max_free_space_items = min (MAX_NUM_FREE_SPACES, max_free_space_items);
                dprintf (SEG_REUSE_LOG_0, ("could fit! %Id free spaces, %Id max", free_space_items, max_free_space_items));
            }

            return can_fit;
        }

        dprintf (SEG_REUSE_LOG_0, ("(gen2)Couldn't fit, total free space is %Ix", (free_space + end_space)));
        return FALSE;
    }
    else
    {
        assert (settings.condemned_generation == (max_generation-1));
        size_t free_space = (end_address - heap_segment_plan_allocated (seg));
        size_t largest_free_space = free_space;
        dprintf (SEG_REUSE_LOG_0, ("can_expand_into_p: gen1: testing segment [%Ix %Ix", first_address, end_address));
        //find the first free list in range of the current segment
        size_t sz_list = gen_allocator->first_bucket_size();
        unsigned int a_l_idx = 0;
        uint8_t* free_list = 0;
        for (; a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++)
        {
            if ((eph_gen_starts <= sz_list) || (a_l_idx == (gen_allocator->number_of_buckets()-1)))
            {
                free_list = gen_allocator->alloc_list_head_of (a_l_idx);
                while (free_list)
                {
                    if ((free_list >= first_address) &&
                        (free_list < end_address) &&
                        (unused_array_size (free_list) >= eph_gen_starts))
                    {
                        goto next;
                    }
                    else
                    {
                        free_list = free_list_slot (free_list);
                    }
                }
            }
        }
next:
        if (free_list)
        {
            init_ordered_free_space_indices ();
            if (process_free_space (seg,
                                    unused_array_size (free_list) - eph_gen_starts + Align (min_obj_size),
                                    min_free_size, min_cont_size,
                                    &free_space, &largest_free_space))
            {
                return TRUE;
            }

            free_list = free_list_slot (free_list);
        }
        else
        {
            dprintf (SEG_REUSE_LOG_0, ("(gen1)Couldn't fit, no free list"));
            return FALSE;
        }

       //tally up free space

        while (1)
        {
            while (free_list)
            {
                if ((free_list >= first_address) && (free_list < end_address) &&
                    process_free_space (seg,
                                        unused_array_size (free_list),
                                        min_free_size, min_cont_size,
                                        &free_space, &largest_free_space))
                {
                    return TRUE;
                }

                free_list = free_list_slot (free_list);
            }
            a_l_idx++;
            if (a_l_idx < gen_allocator->number_of_buckets())
            {
                free_list = gen_allocator->alloc_list_head_of (a_l_idx);
            }
            else
                break;
        }

        dprintf (SEG_REUSE_LOG_0, ("(gen1)Couldn't fit, total free space is %Ix", free_space));
        return FALSE;

        /*
        BOOL can_fit = best_fit (free_space, 0, NULL);
        if (can_fit)
        {
            dprintf (SEG_REUSE_LOG_0, ("(gen1)Found segment %Ix to reuse with bestfit", seg));
        }
        else
        {
            dprintf (SEG_REUSE_LOG_0, ("(gen1)Couldn't fit, total free space is %Ix", free_space));
        }

        return can_fit;
        */
    }
}

void gc_heap::realloc_plug (size_t last_plug_size, uint8_t*& last_plug,
                            generation* gen, uint8_t* start_address,
                            unsigned int& active_new_gen_number,
                            uint8_t*& last_pinned_gap, BOOL& leftp,
                            BOOL shortened_p
#ifdef SHORT_PLUGS
                            , mark* pinned_plug_entry
#endif //SHORT_PLUGS
                            )
{
    // detect generation boundaries
    // make sure that active_new_gen_number is not the youngest generation.
    // because the generation_limit wouldn't return the right thing in this case.
    if (!use_bestfit)
    {
        if ((active_new_gen_number > 1) &&
            (last_plug >= generation_limit (active_new_gen_number)))
        {
            assert (last_plug >= start_address);
            active_new_gen_number--;
            realloc_plan_generation_start (generation_of (active_new_gen_number), gen);
            assert (generation_plan_allocation_start (generation_of (active_new_gen_number)));
            leftp = FALSE;
        }
    }

    // detect pinned plugs
    if (!pinned_plug_que_empty_p() && (last_plug == pinned_plug (oldest_pin())))
    {
        size_t  entry = deque_pinned_plug();
        mark*  m = pinned_plug_of (entry);

        size_t saved_pinned_len = pinned_len(m);
        pinned_len(m) = last_plug - last_pinned_gap;
        //dprintf (3,("Adjusting pinned gap: [%Ix, %Ix[", (size_t)last_pinned_gap, (size_t)last_plug));

        if (m->has_post_plug_info())
        {
            last_plug_size += sizeof (gap_reloc_pair);
            dprintf (3, ("ra pinned %Ix was shortened, adjusting plug size to %Ix", last_plug, last_plug_size))
        }

        last_pinned_gap = last_plug + last_plug_size;
        dprintf (3, ("ra found pin %Ix, len: %Ix->%Ix, last_p: %Ix, last_p_size: %Ix",
            pinned_plug (m), saved_pinned_len, pinned_len (m), last_plug, last_plug_size));
        leftp = FALSE;

        //we are creating a generation fault. set the cards.
        {
            size_t end_card = card_of (align_on_card (last_plug + last_plug_size));
            size_t card = card_of (last_plug);
            while (card != end_card)
            {
                set_card (card);
                card++;
            }
        }
    }
    else if (last_plug >= start_address)
    {
#ifdef FEATURE_STRUCTALIGN
        int requiredAlignment;
        ptrdiff_t pad;
        node_aligninfo (last_plug, requiredAlignment, pad);

        // from how we previously aligned the plug's destination address,
        // compute the actual alignment offset.
        uint8_t* reloc_plug = last_plug + node_relocation_distance (last_plug);
        ptrdiff_t alignmentOffset = ComputeStructAlignPad(reloc_plug, requiredAlignment, 0);
        if (!alignmentOffset)
        {
            // allocate_in_expanded_heap doesn't expect alignmentOffset to be zero.
            alignmentOffset = requiredAlignment;
        }

        //clear the alignment info because we are reallocating
        clear_node_aligninfo (last_plug);
#else // FEATURE_STRUCTALIGN
        //clear the realignment flag because we are reallocating
        clear_node_realigned (last_plug);
#endif // FEATURE_STRUCTALIGN
        BOOL adjacentp = FALSE;
        BOOL set_padding_on_saved_p = FALSE;

        if (shortened_p)
        {
            last_plug_size += sizeof (gap_reloc_pair);

#ifdef SHORT_PLUGS
            assert (pinned_plug_entry != NULL);
            if (last_plug_size <= sizeof (plug_and_gap))
            {
                set_padding_on_saved_p = TRUE;
            }
#endif //SHORT_PLUGS

            dprintf (3, ("ra plug %Ix was shortened, adjusting plug size to %Ix", last_plug, last_plug_size))
        }

#ifdef SHORT_PLUGS
        clear_padding_in_expand (last_plug, set_padding_on_saved_p, pinned_plug_entry);
#endif //SHORT_PLUGS

        uint8_t* new_address = allocate_in_expanded_heap(gen, last_plug_size, adjacentp, last_plug,
#ifdef SHORT_PLUGS
                                     set_padding_on_saved_p,
                                     pinned_plug_entry,
#endif //SHORT_PLUGS
                                     TRUE, active_new_gen_number REQD_ALIGN_AND_OFFSET_ARG);

        dprintf (3, ("ra NA: [%Ix, %Ix[: %Ix", new_address, (new_address + last_plug_size), last_plug_size));
        assert (new_address);
        set_node_relocation_distance (last_plug, new_address - last_plug);
#ifdef FEATURE_STRUCTALIGN
        if (leftp && node_alignpad (last_plug) == 0)
#else // FEATURE_STRUCTALIGN
        if (leftp && !node_realigned (last_plug))
#endif // FEATURE_STRUCTALIGN
        {
            // TODO - temporarily disable L optimization because of a bug in it.
            //set_node_left (last_plug);
        }
        dprintf (3,(" Re-allocating %Ix->%Ix len %Id", (size_t)last_plug, (size_t)new_address, last_plug_size));
        leftp = adjacentp;
    }
}

void gc_heap::realloc_in_brick (uint8_t* tree, uint8_t*& last_plug,
                                uint8_t* start_address,
                                generation* gen,
                                unsigned int& active_new_gen_number,
                                uint8_t*& last_pinned_gap, BOOL& leftp)
{
    assert (tree != NULL);
    int   left_node = node_left_child (tree);
    int   right_node = node_right_child (tree);

    dprintf (3, ("ra: tree: %Ix, last_pin_gap: %Ix, last_p: %Ix, L: %d, R: %d",
        tree, last_pinned_gap, last_plug, left_node, right_node));

    if (left_node)
    {
        dprintf (3, ("LN: realloc %Ix(%Ix)", (tree + left_node), last_plug));
        realloc_in_brick ((tree + left_node), last_plug, start_address,
                          gen, active_new_gen_number, last_pinned_gap,
                          leftp);
    }

    if (last_plug != 0)
    {
        uint8_t*  plug = tree;

        BOOL has_pre_plug_info_p = FALSE;
        BOOL has_post_plug_info_p = FALSE;
        mark* pinned_plug_entry = get_next_pinned_entry (tree,
                                                         &has_pre_plug_info_p,
                                                         &has_post_plug_info_p,
                                                         FALSE);

        // We only care about the pre plug info 'cause that's what decides if the last plug is shortened.
        // The pinned plugs are handled in realloc_plug.
        size_t gap_size = node_gap_size (plug);
        uint8_t*   gap = (plug - gap_size);
        uint8_t*  last_plug_end = gap;
        size_t  last_plug_size = (last_plug_end - last_plug);
        // Cannot assert this - a plug could be less than that due to the shortened ones.
        //assert (last_plug_size >= Align (min_obj_size));
        dprintf (3, ("ra: plug %Ix, gap size: %Ix, last_pin_gap: %Ix, last_p: %Ix, last_p_end: %Ix, shortened: %d",
            plug, gap_size, last_pinned_gap, last_plug, last_plug_end, (has_pre_plug_info_p ? 1 : 0)));
        realloc_plug (last_plug_size, last_plug, gen, start_address,
                      active_new_gen_number, last_pinned_gap,
                      leftp, has_pre_plug_info_p
#ifdef SHORT_PLUGS
                      , pinned_plug_entry
#endif //SHORT_PLUGS
                      );
    }

    last_plug = tree;

    if (right_node)
    {
        dprintf (3, ("RN: realloc %Ix(%Ix)", (tree + right_node), last_plug));
        realloc_in_brick ((tree + right_node), last_plug, start_address,
                          gen, active_new_gen_number, last_pinned_gap,
                          leftp);
    }
}

void
gc_heap::realloc_plugs (generation* consing_gen, heap_segment* seg,
                        uint8_t* start_address, uint8_t* end_address,
                        unsigned active_new_gen_number)
{
    dprintf (3, ("--- Reallocing ---"));

    if (use_bestfit)
    {
        //make sure that every generation has a planned allocation start
        int  gen_number = max_generation - 1;
        while (gen_number >= 0)
        {
            generation* gen = generation_of (gen_number);
            if (0 == generation_plan_allocation_start (gen))
            {
                generation_plan_allocation_start (gen) =
                    bestfit_first_pin + (max_generation - gen_number - 1) * Align (min_obj_size);
                generation_plan_allocation_start_size (gen) = Align (min_obj_size);
                assert (generation_plan_allocation_start (gen));
            }
            gen_number--;
        }
    }

    uint8_t* first_address = start_address;
    //Look for the right pinned plug to start from.
    reset_pinned_queue_bos();
    uint8_t* planned_ephemeral_seg_end = heap_segment_plan_allocated (seg);
    while (!pinned_plug_que_empty_p())
    {
        mark* m = oldest_pin();
        if ((pinned_plug (m) >= planned_ephemeral_seg_end) && (pinned_plug (m) < end_address))
        {
            if (pinned_plug (m) < first_address)
            {
                first_address = pinned_plug (m);
            }
            break;
        }
        else
            deque_pinned_plug();
    }

    size_t  current_brick = brick_of (first_address);
    size_t  end_brick = brick_of (end_address-1);
    uint8_t*  last_plug = 0;

    uint8_t* last_pinned_gap = heap_segment_plan_allocated (seg);
    BOOL leftp = FALSE;

    dprintf (3, ("start addr: %Ix, first addr: %Ix, current oldest pin: %Ix",
        start_address, first_address, pinned_plug (oldest_pin())));

    while (current_brick <= end_brick)
    {
        int   brick_entry =  brick_table [ current_brick ];
        if (brick_entry >= 0)
        {
            realloc_in_brick ((brick_address (current_brick) + brick_entry - 1),
                              last_plug, start_address, consing_gen,
                              active_new_gen_number, last_pinned_gap,
                              leftp);
        }
        current_brick++;
    }

    if (last_plug != 0)
    {
        realloc_plug (end_address - last_plug, last_plug, consing_gen,
                      start_address,
                      active_new_gen_number, last_pinned_gap,
                      leftp, FALSE
#ifdef SHORT_PLUGS
                      , NULL
#endif //SHORT_PLUGS
                      );
    }

    //Fix the old segment allocated size
    assert (last_pinned_gap >= heap_segment_mem (seg));
    assert (last_pinned_gap <= heap_segment_committed (seg));
    heap_segment_plan_allocated (seg) = last_pinned_gap;
}

void gc_heap::verify_no_pins (uint8_t* start, uint8_t* end)
{
#ifdef VERIFY_HEAP
    if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
    {
        BOOL contains_pinned_plugs = FALSE;
        size_t mi = 0;
        mark* m = 0;
        while (mi != mark_stack_tos)
        {
            m = pinned_plug_of (mi);
            if ((pinned_plug (m) >= start) && (pinned_plug (m) < end))
            {
                contains_pinned_plugs = TRUE;
                break;
            }
            else
                mi++;
        }

        if (contains_pinned_plugs)
        {
            FATAL_GC_ERROR();
        }
    }
#endif //VERIFY_HEAP
}

void gc_heap::set_expand_in_full_gc (int condemned_gen_number)
{
    if (!should_expand_in_full_gc)
    {
        if ((condemned_gen_number != max_generation) &&
            (settings.pause_mode != pause_low_latency) &&
            (settings.pause_mode != pause_sustained_low_latency))
        {
            should_expand_in_full_gc = TRUE;
        }
    }
}

void gc_heap::save_ephemeral_generation_starts()
{
    for (int ephemeral_generation = 0; ephemeral_generation < max_generation; ephemeral_generation++)
    {
        saved_ephemeral_plan_start[ephemeral_generation] =
            generation_plan_allocation_start (generation_of (ephemeral_generation));
        saved_ephemeral_plan_start_size[ephemeral_generation] =
            generation_plan_allocation_start_size (generation_of (ephemeral_generation));
    }
}

generation* gc_heap::expand_heap (int condemned_generation,
                                  generation* consing_gen,
                                  heap_segment* new_heap_segment)
{
    UNREFERENCED_PARAMETER(condemned_generation);
    assert (condemned_generation >= (max_generation -1));
    unsigned int active_new_gen_number = max_generation; //Set one too high to get generation gap
    uint8_t*  start_address = generation_limit (max_generation);
    uint8_t*  end_address = heap_segment_allocated (ephemeral_heap_segment);
    BOOL should_promote_ephemeral = FALSE;
    ptrdiff_t eph_size = total_ephemeral_size;
#ifdef BACKGROUND_GC
    dprintf(2,("%s: ---- Heap Expansion ----", (recursive_gc_sync::background_running_p() ? "FGC" : "NGC")));
#endif //BACKGROUND_GC
    settings.heap_expansion = TRUE;

#ifdef BACKGROUND_GC
    if (cm_in_progress)
    {
        if (!expanded_in_fgc)
        {
            expanded_in_fgc = TRUE;
        }
    }
#endif //BACKGROUND_GC

    //reset the elevation state for next time.
    dprintf (2, ("Elevation: elevation = el_none"));
    if (settings.should_lock_elevation && !expand_reused_seg_p())
        settings.should_lock_elevation = FALSE;

    heap_segment* new_seg = new_heap_segment;

    if (!new_seg)
        return consing_gen;

    //copy the card and brick tables
    if (g_gc_card_table!= card_table)
        copy_brick_card_table();

    BOOL new_segment_p = (heap_segment_next (new_seg) == 0);
    dprintf (2, ("new_segment_p %Ix", (size_t)new_segment_p));

    assert (generation_plan_allocation_start (generation_of (max_generation-1)));
    assert (generation_plan_allocation_start (generation_of (max_generation-1)) >=
            heap_segment_mem (ephemeral_heap_segment));
    assert (generation_plan_allocation_start (generation_of (max_generation-1)) <=
            heap_segment_committed (ephemeral_heap_segment));

    assert (generation_plan_allocation_start (youngest_generation));
    assert (generation_plan_allocation_start (youngest_generation) <
            heap_segment_plan_allocated (ephemeral_heap_segment));

    if (settings.pause_mode == pause_no_gc)
    {
        // We don't reuse for no gc, so the size used on the new eph seg is eph_size.
        if ((size_t)(heap_segment_reserved (new_seg) - heap_segment_mem (new_seg)) < (eph_size + soh_allocation_no_gc))
            should_promote_ephemeral = TRUE;
    }
    else
    {
        if (!use_bestfit)
        {
            should_promote_ephemeral = dt_low_ephemeral_space_p (tuning_deciding_promote_ephemeral);
        }
    }

    if (should_promote_ephemeral)
    {
        ephemeral_promotion = TRUE;
        get_gc_data_per_heap()->set_mechanism (gc_heap_expand, expand_new_seg_ep);
        dprintf (2, ("promoting ephemeral"));
        save_ephemeral_generation_starts();
    }
    else
    {
        // commit the new ephemeral segment all at once if it is a new one.
        if ((eph_size > 0) && new_segment_p)
        {
#ifdef FEATURE_STRUCTALIGN
            // The destination may require a larger alignment padding than the source.
            // Assume the worst possible alignment padding.
            eph_size += ComputeStructAlignPad(heap_segment_mem (new_seg), MAX_STRUCTALIGN, OBJECT_ALIGNMENT_OFFSET);
#endif // FEATURE_STRUCTALIGN
#ifdef RESPECT_LARGE_ALIGNMENT
            //Since the generation start can be larger than min_obj_size
            //The alignment could be switched.
            eph_size += switch_alignment_size(FALSE);
#endif //RESPECT_LARGE_ALIGNMENT
            //Since the generation start can be larger than min_obj_size
            //Compare the alignment of the first object in gen1
            if (grow_heap_segment (new_seg, heap_segment_mem (new_seg) + eph_size) == 0)
            {
                fgm_result.set_fgm (fgm_commit_eph_segment, eph_size, FALSE);
                return consing_gen;
            }
            heap_segment_used (new_seg) = heap_segment_committed (new_seg);
        }

        //Fix the end of the old ephemeral heap segment
        heap_segment_plan_allocated (ephemeral_heap_segment) =
            generation_plan_allocation_start (generation_of (max_generation-1));

        dprintf (3, ("Old ephemeral allocated set to %Ix",
                    (size_t)heap_segment_plan_allocated (ephemeral_heap_segment)));
    }

    if (new_segment_p)
    {
        // TODO - Is this really necessary? We should think about it.
        //initialize the first brick
        size_t first_brick = brick_of (heap_segment_mem (new_seg));
        set_brick (first_brick,
                heap_segment_mem (new_seg) - brick_address (first_brick));
    }

    //From this point on, we cannot run out of memory

    //reset the allocation of the consing generation back to the end of the
    //old ephemeral segment
    generation_allocation_limit (consing_gen) =
        heap_segment_plan_allocated (ephemeral_heap_segment);
    generation_allocation_pointer (consing_gen) = generation_allocation_limit (consing_gen);
    generation_allocation_segment (consing_gen) = ephemeral_heap_segment;

    //clear the generation gap for all of the ephemeral generations
    {
        int generation_num = max_generation-1;
        while (generation_num >= 0)
        {
            generation* gen = generation_of (generation_num);
            generation_plan_allocation_start (gen) = 0;
            generation_num--;
        }
    }

    heap_segment* old_seg = ephemeral_heap_segment;
    ephemeral_heap_segment = new_seg;

    //Note: the ephemeral segment shouldn't be threaded onto the segment chain
    //because the relocation and compact phases shouldn't see it

    // set the generation members used by allocate_in_expanded_heap
    // and switch to ephemeral generation
    consing_gen = ensure_ephemeral_heap_segment (consing_gen);

    if (!should_promote_ephemeral)
    {
        realloc_plugs (consing_gen, old_seg, start_address, end_address,
                    active_new_gen_number);
    }

    if (!use_bestfit)
    {
        repair_allocation_in_expanded_heap (consing_gen);
    }

    // assert that the generation gap for all of the ephemeral generations were allocated.
#ifdef _DEBUG
    {
        int generation_num = max_generation-1;
        while (generation_num >= 0)
        {
            generation* gen = generation_of (generation_num);
            assert (generation_plan_allocation_start (gen));
            generation_num--;
        }
    }
#endif // _DEBUG

    if (!new_segment_p)
    {
        dprintf (2, ("Demoting ephemeral segment"));
        //demote the entire segment.
        settings.demotion = TRUE;
        get_gc_data_per_heap()->set_mechanism_bit (gc_demotion_bit);
        demotion_low = heap_segment_mem (ephemeral_heap_segment);
        demotion_high = heap_segment_reserved (ephemeral_heap_segment);
    }
    else
    {
        demotion_low = MAX_PTR;
        demotion_high = 0;
#ifndef MULTIPLE_HEAPS
        settings.demotion = FALSE;
        get_gc_data_per_heap()->clear_mechanism_bit (gc_demotion_bit);
#endif //!MULTIPLE_HEAPS
    }
    ptrdiff_t eph_size1 = total_ephemeral_size;
    MAYBE_UNUSED_VAR(eph_size1);

    if (!should_promote_ephemeral && new_segment_p)
    {
        assert (eph_size1 <= eph_size);
    }

    if (heap_segment_mem (old_seg) == heap_segment_plan_allocated (old_seg))
    {
        // This is to catch when we accidently delete a segment that has pins.
        verify_no_pins (heap_segment_mem (old_seg), heap_segment_reserved (old_seg));
    }

    verify_no_pins (heap_segment_plan_allocated (old_seg), heap_segment_reserved(old_seg));

    dprintf(2,("---- End of Heap Expansion ----"));
    return consing_gen;
}

void gc_heap::set_static_data()
{
    static_data* pause_mode_sdata = static_data_table[latency_level];
    for (int i = 0; i < NUMBERGENERATIONS; i++)
    {
        dynamic_data* dd = dynamic_data_of (i);
        static_data* sdata = &pause_mode_sdata[i];

        dd->sdata = sdata;
        dd->min_size = sdata->min_size;

        dprintf (GTC_LOG, ("PM: %d, gen%d:  min: %Id, max: %Id, fr_l: %Id, fr_b: %d%%",
            settings.pause_mode,i,
            dd->min_size, dd_max_size (dd),
            sdata->fragmentation_limit, (int)(sdata->fragmentation_burden_limit * 100)));
    }
}

// Initialize the values that are not const.
void gc_heap::init_static_data()
{
    size_t gen0_min_size = get_gen0_min_size();

    size_t gen0_max_size =
#ifdef MULTIPLE_HEAPS
        max (6*1024*1024, min ( Align(soh_segment_size/2), 200*1024*1024));
#else //MULTIPLE_HEAPS
        (gc_can_use_concurrent ?
            6*1024*1024 :
            max (6*1024*1024,  min ( Align(soh_segment_size/2), 200*1024*1024)));
#endif //MULTIPLE_HEAPS

    gen0_max_size = max (gen0_min_size, gen0_max_size);

    if (heap_hard_limit)
    {
        size_t gen0_max_size_seg = soh_segment_size / 4;
        dprintf (GTC_LOG, ("limit gen0 max %Id->%Id", gen0_max_size, gen0_max_size_seg));
        gen0_max_size = min (gen0_max_size, gen0_max_size_seg);
    }

    size_t gen0_max_size_config = (size_t)GCConfig::GetGCGen0MaxBudget();

    if (gen0_max_size_config)
    {
        gen0_max_size = min (gen0_max_size, gen0_max_size_config);
    }

    gen0_max_size = Align (gen0_max_size);
    gen0_min_size = min (gen0_min_size, gen0_max_size);

    // TODO: gen0_max_size has a 200mb cap; gen1_max_size should also have a cap.
    size_t gen1_max_size = (size_t)
#ifdef MULTIPLE_HEAPS
        max (6*1024*1024, Align(soh_segment_size/2));
#else //MULTIPLE_HEAPS
        (gc_can_use_concurrent ?
            6*1024*1024 :
            max (6*1024*1024, Align(soh_segment_size/2)));
#endif //MULTIPLE_HEAPS

    dprintf (GTC_LOG, ("gen0 min: %Id, max: %Id, gen1 max: %Id",
        gen0_min_size, gen0_max_size, gen1_max_size));

    for (int i = latency_level_first; i <= latency_level_last; i++)
    {
        static_data_table[i][0].min_size = gen0_min_size;
        static_data_table[i][0].max_size = gen0_max_size;
        static_data_table[i][1].max_size = gen1_max_size;
    }
}

bool gc_heap::init_dynamic_data()
{
    uint64_t now_raw_ts = RawGetHighPrecisionTimeStamp ();
#ifdef HEAP_BALANCE_INSTRUMENTATION
    start_raw_ts = now_raw_ts;
#endif //HEAP_BALANCE_INSTRUMENTATION
    uint32_t now = (uint32_t)(now_raw_ts / (qpf / 1000));

    set_static_data();

    if (heap_number == 0)
    {
        smoothed_desired_per_heap = dynamic_data_of (0)->min_size;
#ifdef HEAP_BALANCE_INSTRUMENTATION
        last_gc_end_time_ms = now;
        dprintf (HEAP_BALANCE_LOG, ("qpf=%I64d, start: %I64d(%d)", qpf, start_raw_ts, now));
#endif //HEAP_BALANCE_INSTRUMENTATION
    }

    for (int i = 0; i <= max_generation+1; i++)
    {
        dynamic_data* dd = dynamic_data_of (i);
        dd->gc_clock = 0;
        dd->time_clock = now;
        dd->current_size = 0;
        dd->promoted_size = 0;
        dd->collection_count = 0;
        dd->new_allocation = dd->min_size;
        dd->gc_new_allocation = dd->new_allocation;
        dd->desired_allocation = dd->new_allocation;
        dd->fragmentation = 0;
    }

#ifdef GC_CONFIG_DRIVEN
    if (heap_number == 0)
        time_init = now;
#endif //GC_CONFIG_DRIVEN

    return true;
}

float gc_heap::surv_to_growth (float cst, float limit, float max_limit)
{
    if (cst < ((max_limit - limit ) / (limit * (max_limit-1.0f))))
        return ((limit - limit*cst) / (1.0f - (cst * limit)));
    else
        return max_limit;
}


//if the allocation budget wasn't exhausted, the new budget may be wrong because the survival may
//not be correct (collection happened too soon). Correct with a linear estimation based on the previous
//value of the budget
static size_t linear_allocation_model (float allocation_fraction, size_t new_allocation,
                                       size_t previous_desired_allocation, size_t collection_count)
{
    if ((allocation_fraction < 0.95) && (allocation_fraction > 0.0))
    {
        dprintf (2, ("allocation fraction: %d", (int)(allocation_fraction/100.0)));
        new_allocation = (size_t)(allocation_fraction*new_allocation + (1.0-allocation_fraction)*previous_desired_allocation);
    }
#if 0
    size_t smoothing = 3; // exponential smoothing factor
    if (smoothing  > collection_count)
        smoothing  = collection_count;
    new_allocation = new_allocation / smoothing + ((previous_desired_allocation / smoothing) * (smoothing-1));
#else
    UNREFERENCED_PARAMETER(collection_count);
#endif //0
    return new_allocation;
}

size_t gc_heap::desired_new_allocation (dynamic_data* dd,
                                        size_t out, int gen_number,
                                        int pass)
{
    gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();

    if (dd_begin_data_size (dd) == 0)
    {
        size_t new_allocation = dd_min_size (dd);
        current_gc_data_per_heap->gen_data[gen_number].new_allocation = new_allocation;
        return new_allocation;
    }
    else
    {
        float     cst;
        size_t    previous_desired_allocation = dd_desired_allocation (dd);
        size_t    current_size = dd_current_size (dd);
        float     max_limit = dd_max_limit (dd);
        float     limit = dd_limit (dd);
        size_t    min_gc_size = dd_min_size (dd);
        float     f = 0;
        size_t    max_size = dd_max_size (dd);
        size_t    new_allocation = 0;
        float allocation_fraction = (float) (dd_desired_allocation (dd) - dd_gc_new_allocation (dd)) / (float) (dd_desired_allocation (dd));

        if (gen_number >= max_generation)
        {
            size_t    new_size = 0;

            cst = min (1.0f, float (out) / float (dd_begin_data_size (dd)));

            f = surv_to_growth (cst, limit, max_limit);
            size_t max_growth_size = (size_t)(max_size / f);
            if (current_size >= max_growth_size)
            {
                new_size = max_size;
            }
            else
            {
                new_size = (size_t) min (max ( (f * current_size), min_gc_size), max_size);
            }

            assert ((new_size >= current_size) || (new_size == max_size));

            if (gen_number == max_generation)
            {
                new_allocation  =  max((new_size - current_size), min_gc_size);

                new_allocation = linear_allocation_model (allocation_fraction, new_allocation,
                                                          dd_desired_allocation (dd), dd_collection_count (dd));

                if (
#ifdef BGC_SERVO_TUNING
                    !bgc_tuning::fl_tuning_triggered &&
#endif //BGC_SERVO_TUNING
                    (dd_fragmentation (dd) > ((size_t)((f-1)*current_size))))
                {
                    //reducing allocation in case of fragmentation
                    size_t new_allocation1 = max (min_gc_size,
                                                  // CAN OVERFLOW
                                                  (size_t)((float)new_allocation * current_size /
                                                           ((float)current_size + 2*dd_fragmentation (dd))));
                    dprintf (2, ("Reducing max_gen allocation due to fragmentation from %Id to %Id",
                                 new_allocation, new_allocation1));
                    new_allocation = new_allocation1;
                }
            }
            else //large object heap
            {
                uint32_t memory_load = 0;
                uint64_t available_physical = 0;
                get_memory_info (&memory_load, &available_physical);
#ifdef TRACE_GC
                if (heap_hard_limit)
                {
                    size_t loh_allocated = 0;
                    size_t loh_committed = committed_size (true, &loh_allocated);
                    dprintf (1, ("GC#%Id h%d, GMI: LOH budget, LOH commit %Id (obj %Id, frag %Id), total commit: %Id (recorded: %Id)",
                        (size_t)settings.gc_index, heap_number,
                        loh_committed, loh_allocated,
                        dd_fragmentation (dynamic_data_of (max_generation + 1)),
                        get_total_committed_size(), (current_total_committed - current_total_committed_bookkeeping)));
                }
#endif //TRACE_GC
                if (heap_number == 0)
                    settings.exit_memory_load = memory_load;
                if (available_physical > 1024*1024)
                    available_physical -= 1024*1024;

                uint64_t available_free = available_physical + (uint64_t)generation_free_list_space (generation_of (gen_number));
                if (available_free > (uint64_t)MAX_PTR)
                {
                    available_free = (uint64_t)MAX_PTR;
                }

                //try to avoid OOM during large object allocation
                new_allocation = max (min(max((new_size - current_size), dd_desired_allocation (dynamic_data_of (max_generation))),
                                          (size_t)available_free),
                                      max ((current_size/4), min_gc_size));

                new_allocation = linear_allocation_model (allocation_fraction, new_allocation,
                                                          dd_desired_allocation (dd), dd_collection_count (dd));

            }
        }
        else
        {
            size_t survivors = out;
            cst = float (survivors) / float (dd_begin_data_size (dd));
            f = surv_to_growth (cst, limit, max_limit);
            new_allocation = (size_t) min (max ((f * (survivors)), min_gc_size), max_size);

            new_allocation = linear_allocation_model (allocation_fraction, new_allocation,
                                                      dd_desired_allocation (dd), dd_collection_count (dd));

            if (gen_number == 0)
            {
                if (pass == 0)
                {

                    //printf ("%f, %Id\n", cst, new_allocation);
                    size_t free_space = generation_free_list_space (generation_of (gen_number));
                    // DTREVIEW - is min_gc_size really a good choice?
                    // on 64-bit this will almost always be true.
                    dprintf (GTC_LOG, ("frag: %Id, min: %Id", free_space, min_gc_size));
                    if (free_space > min_gc_size)
                    {
                        settings.gen0_reduction_count = 2;
                    }
                    else
                    {
                        if (settings.gen0_reduction_count > 0)
                            settings.gen0_reduction_count--;
                    }
                }
                if (settings.gen0_reduction_count > 0)
                {
                    dprintf (2, ("Reducing new allocation based on fragmentation"));
                    new_allocation = min (new_allocation,
                                          max (min_gc_size, (max_size/3)));
                }
            }
        }

        size_t new_allocation_ret =
            Align (new_allocation, get_alignment_constant (!(gen_number == (max_generation+1))));
        int gen_data_index = gen_number;
        gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_data_index]);
        gen_data->new_allocation = new_allocation_ret;

        dd_surv (dd) = cst;

#ifdef SIMPLE_DPRINTF
        dprintf (1, ("h%d g%d surv: %Id current: %Id alloc: %Id (%d%%) f: %d%% new-size: %Id new-alloc: %Id",
                    heap_number, gen_number, out, current_size, (dd_desired_allocation (dd) - dd_gc_new_allocation (dd)),
                    (int)(cst*100), (int)(f*100), current_size + new_allocation, new_allocation));
#else
        dprintf (1,("gen: %d in: %Id out: %Id ", gen_number, generation_allocation_size (generation_of (gen_number)), out));
        dprintf (1,("current: %Id alloc: %Id ", current_size, (dd_desired_allocation (dd) - dd_gc_new_allocation (dd))));
        dprintf (1,(" surv: %d%% f: %d%% new-size: %Id new-alloc: %Id",
                    (int)(cst*100), (int)(f*100), current_size + new_allocation, new_allocation));
#endif //SIMPLE_DPRINTF

        return new_allocation_ret;
    }
}

//returns the planned size of a generation (including free list element)
size_t gc_heap::generation_plan_size (int gen_number)
{
    if (0 == gen_number)
        return max((heap_segment_plan_allocated (ephemeral_heap_segment) -
                    generation_plan_allocation_start (generation_of (gen_number))),
                   (int)Align (min_obj_size));
    else
    {
        generation* gen = generation_of (gen_number);
        if (heap_segment_rw (generation_start_segment (gen)) == ephemeral_heap_segment)
            return (generation_plan_allocation_start (generation_of (gen_number - 1)) -
                    generation_plan_allocation_start (generation_of (gen_number)));
        else
        {
            size_t gensize = 0;
            heap_segment* seg = heap_segment_rw (generation_start_segment (gen));

            PREFIX_ASSUME(seg != NULL);

            while (seg && (seg != ephemeral_heap_segment))
            {
                gensize += heap_segment_plan_allocated (seg) -
                           heap_segment_mem (seg);
                seg = heap_segment_next_rw (seg);
            }
            if (seg)
            {
                gensize += (generation_plan_allocation_start (generation_of (gen_number - 1)) -
                            heap_segment_mem (ephemeral_heap_segment));
            }
            return gensize;
        }
    }

}

//returns the size of a generation (including free list element)
size_t gc_heap::generation_size (int gen_number)
{
    if (0 == gen_number)
        return max((heap_segment_allocated (ephemeral_heap_segment) -
                    generation_allocation_start (generation_of (gen_number))),
                   (int)Align (min_obj_size));
    else
    {
        generation* gen = generation_of (gen_number);
        if (heap_segment_rw (generation_start_segment (gen)) == ephemeral_heap_segment)
            return (generation_allocation_start (generation_of (gen_number - 1)) -
                    generation_allocation_start (generation_of (gen_number)));
        else
        {
            size_t gensize = 0;
            heap_segment* seg = heap_segment_rw (generation_start_segment (gen));

            PREFIX_ASSUME(seg != NULL);

            while (seg && (seg != ephemeral_heap_segment))
            {
                gensize += heap_segment_allocated (seg) -
                           heap_segment_mem (seg);
                seg = heap_segment_next_rw (seg);
            }
            if (seg)
            {
                gensize += (generation_allocation_start (generation_of (gen_number - 1)) -
                            heap_segment_mem (ephemeral_heap_segment));
            }

            return gensize;
        }
    }

}

size_t  gc_heap::compute_in (int gen_number)
{
    assert (gen_number != 0);
    dynamic_data* dd = dynamic_data_of (gen_number);

    size_t in = generation_allocation_size (generation_of (gen_number));

    if (gen_number == max_generation && ephemeral_promotion)
    {
        in = 0;
        for (int i = 0; i <= max_generation; i++)
        {
            dynamic_data* dd = dynamic_data_of (i);
            in += dd_survived_size (dd);
            if (i != max_generation)
            {
                generation_condemned_allocated (generation_of (gen_number)) += dd_survived_size (dd);
            }
        }
    }

    dd_gc_new_allocation (dd) -= in;
    dd_new_allocation (dd) = dd_gc_new_allocation (dd);

    gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
    gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_number]);
    gen_data->in = in;

    generation_allocation_size (generation_of (gen_number)) = 0;
    return in;
}

void  gc_heap::compute_promoted_allocation (int gen_number)
{
    compute_in (gen_number);
}

#ifdef BIT64
inline
size_t gc_heap::trim_youngest_desired (uint32_t memory_load,
                                       size_t total_new_allocation,
                                       size_t total_min_allocation)
{
    if (memory_load < MAX_ALLOWED_MEM_LOAD)
    {
        // If the total of memory load and gen0 budget exceeds
        // our max memory load limit, trim the gen0 budget so the total
        // is the max memory load limit.
        size_t remain_memory_load = (MAX_ALLOWED_MEM_LOAD - memory_load) * mem_one_percent;
        return min (total_new_allocation, remain_memory_load);
    }
    else
    {
        return max (mem_one_percent, total_min_allocation);
    }
}

size_t gc_heap::joined_youngest_desired (size_t new_allocation)
{
    dprintf (2, ("Entry memory load: %d; gen0 new_alloc: %Id", settings.entry_memory_load, new_allocation));

    size_t final_new_allocation = new_allocation;
    if (new_allocation > MIN_YOUNGEST_GEN_DESIRED)
    {
        uint32_t num_heaps = 1;

#ifdef MULTIPLE_HEAPS
        num_heaps = gc_heap::n_heaps;
#endif //MULTIPLE_HEAPS

        size_t total_new_allocation = new_allocation * num_heaps;
        size_t total_min_allocation = MIN_YOUNGEST_GEN_DESIRED * num_heaps;

        if ((settings.entry_memory_load >= MAX_ALLOWED_MEM_LOAD) ||
            (total_new_allocation > max (youngest_gen_desired_th, total_min_allocation)))
        {
            uint32_t memory_load = 0;
            get_memory_info (&memory_load);
            settings.exit_memory_load = memory_load;
            dprintf (2, ("Current memory load: %d", memory_load));

            size_t final_total =
                trim_youngest_desired (memory_load, total_new_allocation, total_min_allocation);
            size_t max_new_allocation =
#ifdef MULTIPLE_HEAPS
                                         dd_max_size (g_heaps[0]->dynamic_data_of (0));
#else //MULTIPLE_HEAPS
                                         dd_max_size (dynamic_data_of (0));
#endif //MULTIPLE_HEAPS

            final_new_allocation  = min (Align ((final_total / num_heaps), get_alignment_constant (TRUE)), max_new_allocation);
        }
    }

    if (final_new_allocation < new_allocation)
    {
        settings.gen0_reduction_count = 2;
    }

    return final_new_allocation;
}
#endif // BIT64

inline
gc_history_per_heap* gc_heap::get_gc_data_per_heap()
{
#ifdef BACKGROUND_GC
    return (settings.concurrent ? &bgc_data_per_heap : &gc_data_per_heap);
#else
    return &gc_data_per_heap;
#endif //BACKGROUND_GC
}

void gc_heap::compute_new_dynamic_data (int gen_number)
{
    PREFIX_ASSUME(gen_number >= 0);
    PREFIX_ASSUME(gen_number <= max_generation);

    dynamic_data* dd = dynamic_data_of (gen_number);
    generation*   gen = generation_of (gen_number);
    size_t        in = (gen_number==0) ? 0 : compute_in (gen_number);

    size_t total_gen_size = generation_size (gen_number);
    //keep track of fragmentation
    dd_fragmentation (dd) = generation_free_list_space (gen) + generation_free_obj_space (gen);
    dd_current_size (dd) = total_gen_size - dd_fragmentation (dd);

    gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();

    size_t out = dd_survived_size (dd);

    gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_number]);
    gen_data->size_after = total_gen_size;
    gen_data->free_list_space_after = generation_free_list_space (gen);
    gen_data->free_obj_space_after = generation_free_obj_space (gen);

    if ((settings.pause_mode == pause_low_latency) && (gen_number <= 1))
    {
        // When we are in the low latency mode, we can still be
        // condemning more than gen1's 'cause of induced GCs.
        dd_desired_allocation (dd) = low_latency_alloc;
    }
    else
    {
        if (gen_number == 0)
        {
            //compensate for dead finalizable objects promotion.
            //they shoudn't be counted for growth.
            size_t final_promoted = 0;
            final_promoted = min (promoted_bytes (heap_number), out);
            // Prefast: this is clear from above but prefast needs to be told explicitly
            PREFIX_ASSUME(final_promoted <= out);

            dprintf (2, ("gen: %d final promoted: %Id", gen_number, final_promoted));
            dd_freach_previous_promotion (dd) = final_promoted;
            size_t lower_bound = desired_new_allocation  (dd, out-final_promoted, gen_number, 0);

            if (settings.condemned_generation == 0)
            {
                //there is no noise.
                dd_desired_allocation (dd) = lower_bound;
            }
            else
            {
                size_t higher_bound = desired_new_allocation (dd, out, gen_number, 1);

                // <TODO>This assert was causing AppDomains\unload\test1n\test1nrun.bat to fail</TODO>
                //assert ( lower_bound <= higher_bound);

                //discount the noise. Change the desired allocation
                //only if the previous value is outside of the range.
                if (dd_desired_allocation (dd) < lower_bound)
                {
                    dd_desired_allocation (dd) = lower_bound;
                }
                else if (dd_desired_allocation (dd) > higher_bound)
                {
                    dd_desired_allocation (dd) = higher_bound;
                }
#if defined (BIT64) && !defined (MULTIPLE_HEAPS)
                dd_desired_allocation (dd) = joined_youngest_desired (dd_desired_allocation (dd));
#endif // BIT64 && !MULTIPLE_HEAPS
                trim_youngest_desired_low_memory();
                dprintf (2, ("final gen0 new_alloc: %Id", dd_desired_allocation (dd)));
            }
        }
        else
        {
            dd_desired_allocation (dd) = desired_new_allocation (dd, out, gen_number, 0);
        }
    }

    gen_data->pinned_surv = dd_pinned_survived_size (dd);
    gen_data->npinned_surv = dd_survived_size (dd) - dd_pinned_survived_size (dd);

    dd_gc_new_allocation (dd) = dd_desired_allocation (dd);
    dd_new_allocation (dd) = dd_gc_new_allocation (dd);

    //update counter
    dd_promoted_size (dd) = out;
    if (gen_number == max_generation)
    {
        dd = dynamic_data_of (max_generation+1);
        total_gen_size = generation_size (max_generation + 1);
        dd_fragmentation (dd) = generation_free_list_space (large_object_generation) +
                                generation_free_obj_space (large_object_generation);
        dd_current_size (dd) = total_gen_size - dd_fragmentation (dd);
        dd_survived_size (dd) = dd_current_size (dd);
        in = 0;
        out = dd_current_size (dd);
        dd_desired_allocation (dd) = desired_new_allocation (dd, out, max_generation+1, 0);
        dd_gc_new_allocation (dd) = Align (dd_desired_allocation (dd),
                                           get_alignment_constant (FALSE));
        dd_new_allocation (dd) = dd_gc_new_allocation (dd);

        gen_data = &(current_gc_data_per_heap->gen_data[max_generation+1]);
        gen_data->size_after = total_gen_size;
        gen_data->free_list_space_after = generation_free_list_space (large_object_generation);
        gen_data->free_obj_space_after = generation_free_obj_space (large_object_generation);
        gen_data->npinned_surv = out;
#ifdef BACKGROUND_GC
        end_loh_size = total_gen_size;
#endif //BACKGROUND_GC
        //update counter
        dd_promoted_size (dd) = out;
    }
}

void gc_heap::trim_youngest_desired_low_memory()
{
    if (g_low_memory_status)
    {
        size_t committed_mem = 0;
        heap_segment* seg = generation_start_segment (generation_of (max_generation));
        while (seg)
        {
            committed_mem += heap_segment_committed (seg) - heap_segment_mem (seg);
            seg = heap_segment_next (seg);
        }
        seg = generation_start_segment (generation_of (max_generation + 1));
        while (seg)
        {
            committed_mem += heap_segment_committed (seg) - heap_segment_mem (seg);
            seg = heap_segment_next (seg);
        }

        dynamic_data* dd = dynamic_data_of (0);
        size_t current = dd_desired_allocation (dd);
        size_t candidate = max (Align ((committed_mem / 10), get_alignment_constant(FALSE)), dd_min_size (dd));

        dd_desired_allocation (dd) = min (current, candidate);
    }
}

void gc_heap::decommit_ephemeral_segment_pages()
{
    if (settings.concurrent)
    {
        return;
    }

    size_t slack_space = heap_segment_committed (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment);

    dynamic_data* dd = dynamic_data_of (0);

#ifndef MULTIPLE_HEAPS
    size_t extra_space = (g_low_memory_status ? 0 : (512 * 1024));
    size_t decommit_timeout = (g_low_memory_status ? 0 : GC_EPHEMERAL_DECOMMIT_TIMEOUT);
    size_t ephemeral_elapsed = dd_time_clock(dd) - gc_last_ephemeral_decommit_time;

    if (dd_desired_allocation (dd) > gc_gen0_desired_high)
    {
        gc_gen0_desired_high = dd_desired_allocation (dd) + extra_space;
    }

    if (ephemeral_elapsed >= decommit_timeout)
    {
        slack_space = min (slack_space, gc_gen0_desired_high);

        gc_last_ephemeral_decommit_time = dd_time_clock(dd);
        gc_gen0_desired_high = 0;
    }
#endif //!MULTIPLE_HEAPS

    if (settings.condemned_generation >= (max_generation-1))
    {
        size_t new_slack_space =
#ifdef BIT64
                    max(min(min(soh_segment_size/32, dd_max_size(dd)), (generation_size (max_generation) / 10)), dd_desired_allocation(dd));
#else
#ifdef FEATURE_CORECLR
                    dd_desired_allocation (dd);
#else
                    dd_max_size (dd);
#endif //FEATURE_CORECLR
#endif // BIT64

        slack_space = min (slack_space, new_slack_space);
    }

    decommit_heap_segment_pages (ephemeral_heap_segment, slack_space);

    gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
    current_gc_data_per_heap->extra_gen0_committed = heap_segment_committed (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment);
}

//This is meant to be called by decide_on_compacting.

size_t gc_heap::generation_fragmentation (generation* gen,
                                          generation* consing_gen,
                                          uint8_t* end)
{
    size_t frag;
    uint8_t* alloc = generation_allocation_pointer (consing_gen);
    // If the allocation pointer has reached the ephemeral segment
    // fine, otherwise the whole ephemeral segment is considered
    // fragmentation
    if (in_range_for_segment (alloc, ephemeral_heap_segment))
        {
            if (alloc <= heap_segment_allocated(ephemeral_heap_segment))
                frag = end - alloc;
            else
            {
                // case when no survivors, allocated set to beginning
                frag = 0;
            }
            dprintf (3, ("ephemeral frag: %Id", frag));
        }
    else
        frag = (heap_segment_allocated (ephemeral_heap_segment) -
                heap_segment_mem (ephemeral_heap_segment));
    heap_segment* seg = heap_segment_rw (generation_start_segment (gen));

    PREFIX_ASSUME(seg != NULL);

    while (seg != ephemeral_heap_segment)
    {
        frag += (heap_segment_allocated (seg) -
                 heap_segment_plan_allocated (seg));
        dprintf (3, ("seg: %Ix, frag: %Id", (size_t)seg,
                     (heap_segment_allocated (seg) -
                      heap_segment_plan_allocated (seg))));

        seg = heap_segment_next_rw (seg);
        assert (seg);
    }
    dprintf (3, ("frag: %Id discounting pinned plugs", frag));
    //add the length of the dequeued plug free space
    size_t bos = 0;
    while (bos < mark_stack_bos)
    {
        frag += (pinned_len (pinned_plug_of (bos)));
        bos++;
    }

    return frag;
}

// for SOH this returns the total sizes of the generation and its
// younger generation(s).
// for LOH this returns just LOH size.
size_t gc_heap::generation_sizes (generation* gen)
{
    size_t result = 0;
    if (generation_start_segment (gen ) == ephemeral_heap_segment)
        result = (heap_segment_allocated (ephemeral_heap_segment) -
                  generation_allocation_start (gen));
    else
    {
        heap_segment* seg = heap_segment_in_range (generation_start_segment (gen));

        PREFIX_ASSUME(seg != NULL);

        while (seg)
        {
            result += (heap_segment_allocated (seg) -
                       heap_segment_mem (seg));
            seg = heap_segment_next_in_range (seg);
        }
    }

    return result;
}

size_t gc_heap::estimated_reclaim (int gen_number)
{
    dynamic_data* dd = dynamic_data_of (gen_number);
    size_t gen_allocated = (dd_desired_allocation (dd) - dd_new_allocation (dd));
    size_t gen_total_size = gen_allocated + dd_current_size (dd);
    size_t est_gen_surv = (size_t)((float) (gen_total_size) * dd_surv (dd));
    size_t est_gen_free = gen_total_size - est_gen_surv + dd_fragmentation (dd);

    dprintf (GTC_LOG, ("h%d gen%d total size: %Id, est dead space: %Id (s: %d, allocated: %Id), frag: %Id",
                heap_number, gen_number,
                gen_total_size,
                est_gen_free,
                (int)(dd_surv (dd) * 100),
                gen_allocated,
                dd_fragmentation (dd)));

    return est_gen_free;
}

BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
                                    size_t fragmentation,
                                    BOOL& should_expand)
{
    BOOL should_compact = FALSE;
    should_expand = FALSE;
    generation*   gen = generation_of (condemned_gen_number);
    dynamic_data* dd = dynamic_data_of (condemned_gen_number);
    size_t gen_sizes     = generation_sizes(gen);
    float  fragmentation_burden = ( ((0 == fragmentation) || (0 == gen_sizes)) ? (0.0f) :
                                    (float (fragmentation) / gen_sizes) );

    dprintf (GTC_LOG, ("h%d g%d fragmentation: %Id (%d%%)",
        heap_number, settings.condemned_generation,
        fragmentation, (int)(fragmentation_burden * 100.0)));

#if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
    // for pure GC stress runs we need compaction, for GC stress "mix"
    // we need to ensure a better mix of compacting and sweeping collections
    if (GCStress<cfg_any>::IsEnabled() && !settings.concurrent
        && !g_pConfig->IsGCStressMix())
        should_compact = TRUE;

#ifdef GC_STATS
    // in GC stress "mix" mode, for stress induced collections make sure we
    // keep sweeps and compactions relatively balanced. do not (yet) force sweeps
    // against the GC's determination, as it may lead to premature OOMs.
    if (g_pConfig->IsGCStressMix() && settings.stress_induced)
    {
        int compactions = g_GCStatistics.cntCompactFGC+g_GCStatistics.cntCompactNGC;
        int sweeps = g_GCStatistics.cntFGC + g_GCStatistics.cntNGC - compactions;
        if (compactions < sweeps / 10)
        {
            should_compact = TRUE;
        }
    }
#endif // GC_STATS
#endif //defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)

    if (GCConfig::GetForceCompact())
        should_compact = TRUE;

    if ((condemned_gen_number == max_generation) && last_gc_before_oom)
    {
        should_compact = TRUE;
        last_gc_before_oom = FALSE;
        get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_last_gc);
    }

    if (settings.reason == reason_induced_compacting)
    {
        dprintf (2, ("induced compacting GC"));
        should_compact = TRUE;
        get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_induced_compacting);
    }

    if (settings.reason == reason_pm_full_gc)
    {
        assert (condemned_gen_number == max_generation);
        if (heap_number == 0)
        {
            dprintf (GTC_LOG, ("PM doing compacting full GC after a gen1"));
        }
        should_compact = TRUE;
    }

    dprintf (2, ("Fragmentation: %d Fragmentation burden %d%%",
                fragmentation, (int) (100*fragmentation_burden)));

    if (provisional_mode_triggered && (condemned_gen_number == (max_generation - 1)))
    {
        dprintf (GTC_LOG, ("gen1 in PM always compact"));
        should_compact = TRUE;
    }

    if (!should_compact)
    {
        if (dt_low_ephemeral_space_p (tuning_deciding_compaction))
        {
            dprintf(GTC_LOG, ("compacting due to low ephemeral"));
            should_compact = TRUE;
            get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_low_ephemeral);
        }
    }

    if (should_compact)
    {
        if ((condemned_gen_number >= (max_generation - 1)))
        {
            if (dt_low_ephemeral_space_p (tuning_deciding_expansion))
            {
                dprintf (GTC_LOG,("Not enough space for all ephemeral generations with compaction"));
                should_expand = TRUE;
            }
        }
    }

#ifdef BIT64
    BOOL high_memory = FALSE;
#endif // BIT64

    if (!should_compact)
    {
        // We are not putting this in dt_high_frag_p because it's not exactly
        // high fragmentation - it's just enough planned fragmentation for us to
        // want to compact. Also the "fragmentation" we are talking about here
        // is different from anywhere else.
        BOOL frag_exceeded = ((fragmentation >= dd_fragmentation_limit (dd)) &&
                                (fragmentation_burden >= dd_fragmentation_burden_limit (dd)));

        if (frag_exceeded)
        {
#ifdef BACKGROUND_GC
            // do not force compaction if this was a stress-induced GC
            IN_STRESS_HEAP(if (!settings.stress_induced))
            {
#endif // BACKGROUND_GC
            assert (settings.concurrent == FALSE);
            should_compact = TRUE;
            get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_high_frag);
#ifdef BACKGROUND_GC
            }
#endif // BACKGROUND_GC
        }

#ifdef BIT64
        // check for high memory situation
        if(!should_compact)
        {
            uint32_t num_heaps = 1;
#ifdef MULTIPLE_HEAPS
            num_heaps = gc_heap::n_heaps;
#endif // MULTIPLE_HEAPS

            ptrdiff_t reclaim_space = generation_size(max_generation) - generation_plan_size(max_generation);

            if((settings.entry_memory_load >= high_memory_load_th) && (settings.entry_memory_load < v_high_memory_load_th))
            {
                if(reclaim_space > (int64_t)(min_high_fragmentation_threshold (entry_available_physical_mem, num_heaps)))
                {
                    dprintf(GTC_LOG,("compacting due to fragmentation in high memory"));
                    should_compact = TRUE;
                    get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_high_mem_frag);
                }
                high_memory = TRUE;
            }
            else if(settings.entry_memory_load >= v_high_memory_load_th)
            {
                if(reclaim_space > (ptrdiff_t)(min_reclaim_fragmentation_threshold (num_heaps)))
                {
                    dprintf(GTC_LOG,("compacting due to fragmentation in very high memory"));
                    should_compact = TRUE;
                    get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_vhigh_mem_frag);
                }
                high_memory = TRUE;
            }
        }
#endif // BIT64
    }

    // The purpose of calling ensure_gap_allocation here is to make sure
    // that we actually are able to commit the memory to allocate generation
    // starts.
    if ((should_compact == FALSE) &&
        (ensure_gap_allocation (condemned_gen_number) == FALSE))
    {
        should_compact = TRUE;
        get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_no_gaps);
    }

    if (settings.condemned_generation == max_generation)
    {
        //check the progress
        if (
#ifdef BIT64
            (high_memory && !should_compact) ||
#endif // BIT64
            (generation_plan_allocation_start (generation_of (max_generation - 1)) >=
                generation_allocation_start (generation_of (max_generation - 1))))
        {
            dprintf (1, ("gen1 start %Ix->%Ix, gen2 size %Id->%Id, lock elevation",
                    generation_allocation_start (generation_of (max_generation - 1)),
                    generation_plan_allocation_start (generation_of (max_generation - 1)),
                     generation_size (max_generation),
                     generation_plan_size (max_generation)));
            //no progress -> lock
            settings.should_lock_elevation = TRUE;
        }
    }

    if (settings.pause_mode == pause_no_gc)
    {
        should_compact = TRUE;
        if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_plan_allocated (ephemeral_heap_segment))
            < soh_allocation_no_gc)
        {
            should_expand = TRUE;
        }
    }

    dprintf (2, ("will %s(%s)", (should_compact ? "compact" : "sweep"), (should_expand ? "ex" : "")));
    return should_compact;
}

size_t align_lower_good_size_allocation (size_t size)
{
    return (size/64)*64;
}

size_t gc_heap::approximate_new_allocation()
{
    dynamic_data* dd0 = dynamic_data_of (0);
    return max (2*dd_min_size (dd0), ((dd_desired_allocation (dd0)*2)/3));
}

BOOL gc_heap::sufficient_space_end_seg (uint8_t* start, uint8_t* seg_end, size_t end_space_required, gc_tuning_point tp)
{
    BOOL can_fit = FALSE;
    size_t end_seg_space = (size_t)(seg_end - start);
    if (end_seg_space > end_space_required)
    {
        // If hard limit is specified, and if we attributed all that's left in commit to the ephemeral seg
        // so we treat that as segment end, do we have enough space.
        if (heap_hard_limit)
        {
            size_t left_in_commit = heap_hard_limit - current_total_committed;
            int num_heaps = 1;
#ifdef MULTIPLE_HEAPS
            num_heaps = n_heaps;
#endif //MULTIPLE_HEAPS
            left_in_commit /= num_heaps;
            if (left_in_commit > end_space_required)
            {
                can_fit = TRUE;
            }

            dprintf (2, ("h%d end seg %Id, but only %Id left in HARD LIMIT commit, required: %Id %s on eph (%d)",
                heap_number, end_seg_space,
                left_in_commit, end_space_required,
                (can_fit ? "ok" : "short"), (int)tp));
        }
        else
            can_fit = TRUE;
    }

    return can_fit;
}

// After we did a GC we expect to have at least this
// much space at the end of the segment to satisfy
// a reasonable amount of allocation requests.
size_t gc_heap::end_space_after_gc()
{
    return max ((dd_min_size (dynamic_data_of (0))/2), (END_SPACE_AFTER_GC + Align (min_obj_size)));
}

BOOL gc_heap::ephemeral_gen_fit_p (gc_tuning_point tp)
{
    uint8_t* start = 0;

    if ((tp == tuning_deciding_condemned_gen) ||
        (tp == tuning_deciding_compaction))
    {
        start = (settings.concurrent ? alloc_allocated : heap_segment_allocated (ephemeral_heap_segment));
        if (settings.concurrent)
        {
            dprintf (GTC_LOG, ("%Id left at the end of ephemeral segment (alloc_allocated)",
                (size_t)(heap_segment_reserved (ephemeral_heap_segment) - alloc_allocated)));
        }
        else
        {
            dprintf (GTC_LOG, ("%Id left at the end of ephemeral segment (allocated)",
                (size_t)(heap_segment_reserved (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment))));
        }
    }
    else if (tp == tuning_deciding_expansion)
    {
        start = heap_segment_plan_allocated (ephemeral_heap_segment);
        dprintf (GTC_LOG, ("%Id left at the end of ephemeral segment based on plan",
            (size_t)(heap_segment_reserved (ephemeral_heap_segment) - start)));
    }
    else
    {
        assert (tp == tuning_deciding_full_gc);
        dprintf (GTC_LOG, ("FGC: %Id left at the end of ephemeral segment (alloc_allocated)",
            (size_t)(heap_segment_reserved (ephemeral_heap_segment) - alloc_allocated)));
        start = alloc_allocated;
    }

    if (start == 0) // empty ephemeral generations
    {
        assert (tp == tuning_deciding_expansion);
        // if there are no survivors in the ephemeral segment,
        // this should be the beginning of ephemeral segment.
        start = generation_allocation_pointer (generation_of (max_generation));
        assert (start == heap_segment_mem (ephemeral_heap_segment));
    }

    if (tp == tuning_deciding_expansion)
    {
        assert (settings.condemned_generation >= (max_generation-1));
        size_t gen0size = approximate_new_allocation();
        size_t eph_size = gen0size;
        size_t gen_min_sizes = 0;

        for (int j = 1; j <= max_generation-1; j++)
        {
            gen_min_sizes += 2*dd_min_size (dynamic_data_of(j));
        }

        eph_size += gen_min_sizes;

        dprintf (3, ("h%d deciding on expansion, need %Id (gen0: %Id, 2*min: %Id)",
            heap_number, gen0size, gen_min_sizes, eph_size));

        // We must find room for one large object and enough room for gen0size
        if ((size_t)(heap_segment_reserved (ephemeral_heap_segment) - start) > eph_size)
        {
            dprintf (3, ("Enough room before end of segment"));
            return TRUE;
        }
        else
        {
            size_t room = align_lower_good_size_allocation
                (heap_segment_reserved (ephemeral_heap_segment) - start);
            size_t end_seg = room;

            //look at the plug free space
            size_t largest_alloc = END_SPACE_AFTER_GC + Align (min_obj_size);
            bool large_chunk_found = FALSE;
            size_t bos = 0;
            uint8_t* gen0start = generation_plan_allocation_start (youngest_generation);
            dprintf (3, ("ephemeral_gen_fit_p: gen0 plan start: %Ix", (size_t)gen0start));
            if (gen0start == 0)
                return FALSE;
            dprintf (3, ("ephemeral_gen_fit_p: room before free list search %Id, needed: %Id",
                         room, gen0size));
            while ((bos < mark_stack_bos) &&
                   !((room >= gen0size) && large_chunk_found))
            {
                uint8_t* plug = pinned_plug (pinned_plug_of (bos));
                if (in_range_for_segment (plug, ephemeral_heap_segment))
                {
                    if (plug >= gen0start)
                    {
                        size_t chunk = align_lower_good_size_allocation (pinned_len (pinned_plug_of (bos)));
                        room += chunk;
                        if (!large_chunk_found)
                        {
                            large_chunk_found = (chunk >= largest_alloc);
                        }
                        dprintf (3, ("ephemeral_gen_fit_p: room now %Id, large chunk: %Id",
                                     room, large_chunk_found));
                    }
                }
                bos++;
            }

            if (room >= gen0size)
            {
                if (large_chunk_found)
                {
                    sufficient_gen0_space_p = TRUE;

                    dprintf (3, ("Enough room"));
                    return TRUE;
                }
                else
                {
                    // now we need to find largest_alloc at the end of the segment.
                    if (end_seg >= end_space_after_gc())
                    {
                        dprintf (3, ("Enough room (may need end of seg)"));
                        return TRUE;
                    }
                }
            }

            dprintf (3, ("Not enough room"));
                return FALSE;
        }
    }
    else
    {
        size_t end_space = 0;
        dynamic_data* dd = dynamic_data_of (0);
        if ((tp == tuning_deciding_condemned_gen) ||
            (tp == tuning_deciding_full_gc))
        {
            end_space = max (2*dd_min_size (dd), end_space_after_gc());
        }
        else
        {
            assert (tp == tuning_deciding_compaction);
            end_space = approximate_new_allocation();
        }

        BOOL can_fit = sufficient_space_end_seg (start, heap_segment_reserved (ephemeral_heap_segment), end_space, tp);

        return can_fit;
    }
}

CObjectHeader* gc_heap::allocate_large_object (size_t jsize, uint32_t flags, int64_t& alloc_bytes)
{
    //create a new alloc context because gen3context is shared.
    alloc_context acontext;
    acontext.init();

#if BIT64
    size_t maxObjectSize = (INT64_MAX - 7 - Align(min_obj_size));
#else
    size_t maxObjectSize = (INT32_MAX - 7 - Align(min_obj_size));
#endif

    if (jsize >= maxObjectSize)
    {
        if (GCConfig::GetBreakOnOOM())
        {
            GCToOSInterface::DebugBreak();
        }
        return NULL;
    }

    size_t size = AlignQword (jsize);
    int align_const = get_alignment_constant (FALSE);
#ifdef FEATURE_LOH_COMPACTION
    size_t pad = Align (loh_padding_obj_size, align_const);
#else
    size_t pad = 0;
#endif //FEATURE_LOH_COMPACTION

    assert (size >= Align (min_obj_size, align_const));
#ifdef _MSC_VER
#pragma inline_depth(0)
#endif //_MSC_VER
    if (! allocate_more_space (&acontext, (size + pad), flags, max_generation+1))
    {
        return 0;
    }

#ifdef _MSC_VER
#pragma inline_depth(20)
#endif //_MSC_VER

#ifdef MARK_ARRAY
    uint8_t* current_lowest_address = lowest_address;
    uint8_t* current_highest_address = highest_address;
#ifdef BACKGROUND_GC
    if (recursive_gc_sync::background_running_p())
    {
        current_lowest_address = background_saved_lowest_address;
        current_highest_address = background_saved_highest_address;
    }
#endif //BACKGROUND_GC
#endif // MARK_ARRAY

#ifdef FEATURE_LOH_COMPACTION
    // The GC allocator made a free object already in this alloc context and
    // adjusted the alloc_ptr accordingly.
#endif //FEATURE_LOH_COMPACTION

    uint8_t*  result = acontext.alloc_ptr;

    assert ((size_t)(acontext.alloc_limit - acontext.alloc_ptr) == size);
    alloc_bytes += size;

    CObjectHeader* obj = (CObjectHeader*)result;

#ifdef MARK_ARRAY
    if (recursive_gc_sync::background_running_p())
    {
        if ((result < current_highest_address) && (result >= current_lowest_address))
        {
            dprintf (3, ("Clearing mark bit at address %Ix",
                     (size_t)(&mark_array [mark_word_of (result)])));

            mark_array_clear_marked (result);
        }
#ifdef BACKGROUND_GC
        //the object has to cover one full mark uint32_t
        assert (size > mark_word_size);
        if (current_c_gc_state != c_gc_state_free)
        {
            dprintf (3, ("Concurrent allocation of a large object %Ix",
                        (size_t)obj));
            //mark the new block specially so we know it is a new object
            if ((result < current_highest_address) && (result >= current_lowest_address))
            {
                dprintf (3, ("Setting mark bit at address %Ix",
                            (size_t)(&mark_array [mark_word_of (result)])));

                mark_array_set_marked (result);
            }
        }
#endif //BACKGROUND_GC
    }
#endif //MARK_ARRAY

    assert (obj != 0);
    assert ((size_t)obj == Align ((size_t)obj, align_const));

    return obj;
}

void reset_memory (uint8_t* o, size_t sizeo)
{
    if (sizeo > 128 * 1024)
    {
        // We cannot reset the memory for the useful part of a free object.
        size_t size_to_skip = min_free_list - plug_skew;

        size_t page_start = align_on_page ((size_t)(o + size_to_skip));
        size_t size = align_lower_page ((size_t)o + sizeo - size_to_skip - plug_skew) - page_start;
        // Note we need to compensate for an OS bug here. This bug would cause the MEM_RESET to fail
        // on write watched memory.
        if (reset_mm_p)
        {
#ifdef MULTIPLE_HEAPS
            bool unlock_p = true;
#else
            // We don't do unlock because there could be many processes using workstation GC and it's
            // bad perf to have many threads doing unlock at the same time.
            bool unlock_p = false;
#endif //MULTIPLE_HEAPS

            reset_mm_p = GCToOSInterface::VirtualReset((void*)page_start, size, unlock_p);
        }
    }
}

BOOL gc_heap::large_object_marked (uint8_t* o, BOOL clearp)
{
    BOOL m = FALSE;
    // It shouldn't be necessary to do these comparisons because this is only used for blocking
    // GCs and LOH segments cannot be out of range.
    if ((o >= lowest_address) && (o < highest_address))
    {
        if (marked (o))
        {
            if (clearp)
            {
                clear_marked (o);
                if (pinned (o))
                    clear_pinned(o);
            }
            m = TRUE;
        }
        else
            m = FALSE;
    }
    else
        m = TRUE;
    return m;
}

void gc_heap::walk_survivors_relocation (void* profiling_context, record_surv_fn fn)
{
    // Now walk the portion of memory that is actually being relocated.
    walk_relocation (profiling_context, fn);

#ifdef FEATURE_LOH_COMPACTION
    if (loh_compacted_p)
    {
        walk_relocation_for_loh (profiling_context, fn);
    }
#endif //FEATURE_LOH_COMPACTION
}

void gc_heap::walk_survivors_for_loh (void* profiling_context, record_surv_fn fn)
{
    generation* gen        = large_object_generation;
    heap_segment* seg      = heap_segment_rw (generation_start_segment (gen));;

    PREFIX_ASSUME(seg != NULL);

    uint8_t* o                = generation_allocation_start (gen);
    uint8_t* plug_end         = o;
    uint8_t* plug_start       = o;

    while (1)
    {
        if (o >= heap_segment_allocated (seg))
        {
            seg = heap_segment_next (seg);
            if (seg == 0)
                break;
            else
                o = heap_segment_mem (seg);
        }
        if (large_object_marked(o, FALSE))
        {
            plug_start = o;

            BOOL m = TRUE;
            while (m)
            {
                o = o + AlignQword (size (o));
                if (o >= heap_segment_allocated (seg))
                {
                    break;
                }
                m = large_object_marked (o, FALSE);
            }

            plug_end = o;

            fn (plug_start, plug_end, 0, profiling_context, false, false);
        }
        else
        {
            while (o < heap_segment_allocated (seg) && !large_object_marked(o, FALSE))
            {
                o = o + AlignQword (size (o));
            }
        }
    }
}

#ifdef BACKGROUND_GC

BOOL gc_heap::background_object_marked (uint8_t* o, BOOL clearp)
{
    BOOL m = FALSE;
    if ((o >= background_saved_lowest_address) && (o < background_saved_highest_address))
    {
        if (mark_array_marked (o))
        {
            if (clearp)
            {
                mark_array_clear_marked (o);
                //dprintf (3, ("mark array bit for object %Ix is cleared", o));
                dprintf (3, ("CM: %Ix", o));
            }
            m = TRUE;
        }
        else
            m = FALSE;
    }
    else
        m = TRUE;

    dprintf (3, ("o %Ix(%d) %s", o, size(o), (m ? "was bm" : "was NOT bm")));
    return m;
}

void gc_heap::background_delay_delete_loh_segments()
{
    generation* gen = large_object_generation;
    heap_segment* seg = heap_segment_rw (generation_start_segment (large_object_generation));
    heap_segment* prev_seg = 0;

    while (seg)
    {
        heap_segment* next_seg = heap_segment_next (seg);
        if (seg->flags & heap_segment_flags_loh_delete)
        {
            dprintf (3, ("deleting %Ix-%Ix-%Ix", (size_t)seg, heap_segment_allocated (seg), heap_segment_reserved (seg)));
            delete_heap_segment (seg, (GCConfig::GetRetainVM() != 0));
            heap_segment_next (prev_seg) = next_seg;
        }
        else
        {
            prev_seg = seg;
        }

        seg = next_seg;
    }
}

uint8_t* gc_heap::background_next_end (heap_segment* seg, BOOL large_objects_p)
{
    return
        (large_objects_p ? heap_segment_allocated (seg) : heap_segment_background_allocated (seg));
}

void gc_heap::set_mem_verify (uint8_t* start, uint8_t* end, uint8_t b)
{
#ifdef VERIFY_HEAP
    if (end > start)
    {
        if ((GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) &&
           !(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_NO_MEM_FILL))
        {
            dprintf (3, ("setting mem to %c [%Ix, [%Ix", b, start, end));
            memset (start, b, (end - start));
        }
    }
#endif //VERIFY_HEAP
}

void gc_heap::generation_delete_heap_segment (generation* gen,
                                              heap_segment* seg,
                                              heap_segment* prev_seg,
                                              heap_segment* next_seg)
{
    dprintf (3, ("bgc sweep: deleting seg %Ix", seg));
    if (gen == large_object_generation)
    {
        dprintf (3, ("Preparing empty large segment %Ix for deletion", (size_t)seg));

        // We cannot thread segs in here onto freeable_large_heap_segment because
        // grow_brick_card_tables could be committing mark array which needs to read
        // the seg list. So we delay it till next time we suspend EE.
        seg->flags |= heap_segment_flags_loh_delete;
        // Since we will be decommitting the seg, we need to prevent heap verification
        // to verify this segment.
        heap_segment_allocated (seg) = heap_segment_mem (seg);
    }
    else
    {
        if (seg == ephemeral_heap_segment)
        {
            FATAL_GC_ERROR();
        }

        heap_segment_next (next_seg) = prev_seg;

        dprintf (3, ("Preparing empty small segment %Ix for deletion", (size_t)seg));
        heap_segment_next (seg) = freeable_small_heap_segment;
        freeable_small_heap_segment = seg;
    }

    decommit_heap_segment (seg);
    seg->flags |= heap_segment_flags_decommitted;

    set_mem_verify (heap_segment_allocated (seg) - plug_skew, heap_segment_used (seg), 0xbb);
}

void gc_heap::process_background_segment_end (heap_segment* seg,
                                          generation* gen,
                                          uint8_t* last_plug_end,
                                          heap_segment* start_seg,
                                          BOOL* delete_p)
{
    *delete_p = FALSE;
    uint8_t* allocated = heap_segment_allocated (seg);
    uint8_t* background_allocated = heap_segment_background_allocated (seg);
    BOOL loh_p = heap_segment_loh_p (seg);

    dprintf (3, ("Processing end of background segment [%Ix, %Ix[(%Ix[)",
                (size_t)heap_segment_mem (seg), background_allocated, allocated));

    if (!loh_p && (allocated != background_allocated))
    {
        assert (gen != large_object_generation);

        dprintf (3, ("Make a free object before newly promoted objects [%Ix, %Ix[",
                    (size_t)last_plug_end, background_allocated));
        thread_gap (last_plug_end, background_allocated - last_plug_end, generation_of (max_generation));


        fix_brick_to_highest (last_plug_end, background_allocated);

        // When we allowed fgc's during going through gaps, we could have erased the brick
        // that corresponds to bgc_allocated 'cause we had to update the brick there,
        // recover it here.
        fix_brick_to_highest (background_allocated, background_allocated);
    }
    else
    {
        // by default, if allocated == background_allocated, it can't
        // be the ephemeral segment.
        if (seg == ephemeral_heap_segment)
        {
            FATAL_GC_ERROR();
        }

        if (allocated == heap_segment_mem (seg))
        {
            // this can happen with LOH segments when multiple threads
            // allocate new segments and not all of them were needed to
            // satisfy allocation requests.
            assert (gen == large_object_generation);
        }

        if (last_plug_end == heap_segment_mem (seg))
        {
            dprintf (3, ("Segment allocated is %Ix (beginning of this seg) - %s be deleted",
                        (size_t)allocated, (*delete_p ? "should" : "should not")));

            if (seg != start_seg)
            {
                *delete_p = TRUE;
            }
        }
        else
        {
            dprintf (3, ("Trimming seg to %Ix[", (size_t)last_plug_end));
            heap_segment_allocated (seg) = last_plug_end;
            set_mem_verify (heap_segment_allocated (seg) - plug_skew, heap_segment_used (seg), 0xbb);

            decommit_heap_segment_pages (seg, 0);
        }
    }

    dprintf (3, ("verifying seg %Ix's mark array was completely cleared", seg));
    bgc_verify_mark_array_cleared (seg);
}

void gc_heap::process_n_background_segments (heap_segment* seg,
                                             heap_segment* prev_seg,
                                             generation* gen)
{
    assert (gen != large_object_generation);

    while (seg)
    {
        dprintf (2, ("processing seg %Ix (not seen by bgc mark)", seg));
        heap_segment* next_seg = heap_segment_next (seg);

        if (heap_segment_read_only_p (seg))
        {
            prev_seg = seg;
        }
        else
        {
            if (heap_segment_allocated (seg) == heap_segment_mem (seg))
            {
                // This can happen - if we have a LOH segment where nothing survived
                // or a SOH segment allocated by a gen1 GC when BGC was going where
                // nothing survived last time we did a gen1 GC.
                generation_delete_heap_segment (gen, seg, prev_seg, next_seg);
            }
            else
            {
                prev_seg = seg;
            }
        }

        verify_soh_segment_list();
        seg = next_seg;
    }
}

inline
BOOL gc_heap::fgc_should_consider_object (uint8_t* o,
                                          heap_segment* seg,
                                          BOOL consider_bgc_mark_p,
                                          BOOL check_current_sweep_p,
                                          BOOL check_saved_sweep_p)
{
    // the logic for this function must be kept in sync with the analogous function
    // in ToolBox\SOS\Strike\gc.cpp

    // TRUE means we don't need to check the bgc mark bit
    // FALSE means we do.
    BOOL no_bgc_mark_p = FALSE;

    if (consider_bgc_mark_p)
    {
        if (check_current_sweep_p && (o < current_sweep_pos))
        {
            dprintf (3, ("no bgc mark - o: %Ix < cs: %Ix", o, current_sweep_pos));
            no_bgc_mark_p = TRUE;
        }

        if (!no_bgc_mark_p)
        {
            if(check_saved_sweep_p && (o >= saved_sweep_ephemeral_start))
            {
                dprintf (3, ("no bgc mark - o: %Ix >= ss: %Ix", o, saved_sweep_ephemeral_start));
                no_bgc_mark_p = TRUE;
            }

            if (!check_saved_sweep_p)
            {
                uint8_t* background_allocated = heap_segment_background_allocated (seg);
                // if this was the saved ephemeral segment, check_saved_sweep_p
                // would've been true.
                assert (heap_segment_background_allocated (seg) != saved_sweep_ephemeral_start);
                // background_allocated could be 0 for the new segments acquired during bgc
                // sweep and we still want no_bgc_mark_p to be true.
                if (o >= background_allocated)
                {
                    dprintf (3, ("no bgc mark - o: %Ix >= ba: %Ix", o, background_allocated));
                    no_bgc_mark_p = TRUE;
                }
            }
        }
    }
    else
    {
        no_bgc_mark_p = TRUE;
    }

    dprintf (3, ("bgc mark %Ix: %s (bm: %s)", o, (no_bgc_mark_p ? "no" : "yes"), (background_object_marked (o, FALSE) ? "yes" : "no")));
    return (no_bgc_mark_p ? TRUE : background_object_marked (o, FALSE));
}

// consider_bgc_mark_p tells you if you need to care about the bgc mark bit at all
// if it's TRUE, check_current_sweep_p tells you if you should consider the
// current sweep position or not.
void gc_heap::should_check_bgc_mark (heap_segment* seg,
                                     BOOL* consider_bgc_mark_p,
                                     BOOL* check_current_sweep_p,
                                     BOOL* check_saved_sweep_p)
{
    // the logic for this function must be kept in sync with the analogous function
    // in ToolBox\SOS\Strike\gc.cpp
    *consider_bgc_mark_p = FALSE;
    *check_current_sweep_p = FALSE;
    *check_saved_sweep_p = FALSE;

    if (current_c_gc_state == c_gc_state_planning)
    {
        // We are doing the current_sweep_pos comparison here because we have yet to
        // turn on the swept flag for the segment but in_range_for_segment will return
        // FALSE if the address is the same as reserved.
        if ((seg->flags & heap_segment_flags_swept) || (current_sweep_pos == heap_segment_reserved (seg)))
        {
            dprintf (3, ("seg %Ix is already swept by bgc", seg));
        }
        else
        {
            *consider_bgc_mark_p = TRUE;

            dprintf (3, ("seg %Ix hasn't been swept by bgc", seg));

            if (seg == saved_sweep_ephemeral_seg)
            {
                dprintf (3, ("seg %Ix is the saved ephemeral seg", seg));
                *check_saved_sweep_p = TRUE;
            }

            if (in_range_for_segment (current_sweep_pos, seg))
            {
                dprintf (3, ("current sweep pos is %Ix and within seg %Ix",
                              current_sweep_pos, seg));
                *check_current_sweep_p = TRUE;
            }
        }
    }
}

void gc_heap::background_ephemeral_sweep()
{
    dprintf (3, ("bgc ephemeral sweep"));

    int align_const = get_alignment_constant (TRUE);

    saved_sweep_ephemeral_seg = ephemeral_heap_segment;
    saved_sweep_ephemeral_start = generation_allocation_start (generation_of (max_generation - 1));

    // Since we don't want to interfere with gen0 allocation while we are threading gen0 free list,
    // we thread onto a list first then publish it when we are done.
    allocator youngest_free_list;
    size_t youngest_free_list_space = 0;
    size_t youngest_free_obj_space = 0;

    youngest_free_list.clear();

    for (int i = 0; i <= (max_generation - 1); i++)
    {
        generation* gen_to_reset = generation_of (i);
        assert (generation_free_list_space (gen_to_reset) == 0);
        // Can only assert free_list_space is 0, not free_obj_space as the allocator could have added
        // something there.
    }

    for (int i = (max_generation - 1); i >= 0; i--)
    {
        generation* current_gen = generation_of (i);
        uint8_t* o = generation_allocation_start (current_gen);
        //Skip the generation gap object
        o = o + Align(size (o), align_const);
        uint8_t* end = ((i > 0) ?
                     generation_allocation_start (generation_of (i - 1)) :
                     heap_segment_allocated (ephemeral_heap_segment));

        uint8_t* plug_end = o;
        uint8_t* plug_start = o;
        BOOL marked_p = FALSE;

        while (o < end)
        {
            marked_p = background_object_marked (o, TRUE);
            if (marked_p)
            {
                plug_start = o;
                size_t plug_size = plug_start - plug_end;

                if (i >= 1)
                {
                    thread_gap (plug_end, plug_size, current_gen);
                }
                else
                {
                    if (plug_size > 0)
                    {
                        make_unused_array (plug_end, plug_size);
                        if (plug_size >= min_free_list)
                        {
                            youngest_free_list_space += plug_size;
                            youngest_free_list.thread_item (plug_end, plug_size);
                        }
                        else
                        {
                            youngest_free_obj_space += plug_size;
                        }
                    }
                }

                fix_brick_to_highest (plug_end, plug_start);
                fix_brick_to_highest (plug_start, plug_start);

                BOOL m = TRUE;
                while (m)
                {
                    o = o + Align (size (o), align_const);
                    if (o >= end)
                    {
                        break;
                    }

                    m = background_object_marked (o, TRUE);
                }
                plug_end = o;
                dprintf (3, ("bgs: plug [%Ix, %Ix[", (size_t)plug_start, (size_t)plug_end));
            }
            else
            {
                while ((o < end) && !background_object_marked (o, FALSE))
                {
                    o = o + Align (size (o), align_const);
                }
            }
        }

        if (plug_end != end)
        {
            if (i >= 1)
            {
                thread_gap (plug_end, end - plug_end, current_gen);
            }
            else
            {
                heap_segment_allocated (ephemeral_heap_segment) = plug_end;
                // the following line is temporary.
                heap_segment_saved_bg_allocated (ephemeral_heap_segment) = plug_end;
                make_unused_array (plug_end, (end - plug_end));
            }

            fix_brick_to_highest (plug_end, end);
        }

        dd_fragmentation (dynamic_data_of (i)) =
            generation_free_list_space (current_gen) + generation_free_obj_space (current_gen);
    }

    generation* youngest_gen = generation_of (0);
    generation_free_list_space (youngest_gen) = youngest_free_list_space;
    generation_free_obj_space (youngest_gen) = youngest_free_obj_space;
    dd_fragmentation (dynamic_data_of (0)) = youngest_free_list_space + youngest_free_obj_space;
    generation_allocator (youngest_gen)->copy_with_no_repair (&youngest_free_list);
}

void gc_heap::background_sweep()
{
    generation* gen         = generation_of (max_generation);
    dynamic_data* dd        = dynamic_data_of (max_generation);
    // For SOH segments we go backwards.
    heap_segment* start_seg = ephemeral_heap_segment;
    PREFIX_ASSUME(start_seg != NULL);
    heap_segment* fseg      = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
    heap_segment* seg       = start_seg;
    uint8_t* o                 = heap_segment_mem (seg);

    heap_segment* prev_seg = heap_segment_next (seg);
    int align_const        = get_alignment_constant (TRUE);
    if (seg == fseg)
    {
        assert (o == generation_allocation_start (generation_of (max_generation)));
        o = o + Align(size (o), align_const);
    }

    uint8_t* plug_end      = o;
    uint8_t* plug_start    = o;
    next_sweep_obj         = o;
    current_sweep_pos      = o;

    //uint8_t* end              = background_next_end (seg, (gen == large_object_generation));
    uint8_t* end              = heap_segment_background_allocated (seg);
    BOOL delete_p          = FALSE;

    //concurrent_print_time_delta ("finished with mark and start with sweep");
    concurrent_print_time_delta ("Sw");
    dprintf (2, ("---- (GC%d)Background Sweep Phase ----", VolatileLoad(&settings.gc_index)));

    //block concurrent allocation for large objects
    dprintf (3, ("lh state: planning"));
    if (gc_lh_block_event.IsValid())
    {
        gc_lh_block_event.Reset();
    }

    for (int i = 0; i <= (max_generation + 1); i++)
    {
        generation* gen_to_reset = generation_of (i);
        generation_allocator (gen_to_reset)->clear();
        generation_free_list_space (gen_to_reset) = 0;
        generation_free_obj_space (gen_to_reset) = 0;
        generation_free_list_allocated (gen_to_reset) = 0;
        generation_end_seg_allocated (gen_to_reset) = 0;
        generation_condemned_allocated (gen_to_reset) = 0;
        generation_sweep_allocated (gen_to_reset) = 0;
        //reset the allocation so foreground gc can allocate into older generation
        generation_allocation_pointer (gen_to_reset)= 0;
        generation_allocation_limit (gen_to_reset) = 0;
        generation_allocation_segment (gen_to_reset) = heap_segment_rw (generation_start_segment (gen_to_reset));
    }

    FIRE_EVENT(BGC2ndNonConEnd);

    loh_alloc_thread_count = 0;
    current_bgc_state = bgc_sweep_soh;
    verify_soh_segment_list();

#ifdef FEATURE_BASICFREEZE
    if ((generation_start_segment (gen) != ephemeral_heap_segment) &&
        ro_segments_in_range)
    {
        sweep_ro_segments (generation_start_segment (gen));
    }
#endif // FEATURE_BASICFREEZE

    //TODO BACKGROUND_GC: can we move this to where we switch to the LOH?
    if (current_c_gc_state != c_gc_state_planning)
    {
        current_c_gc_state = c_gc_state_planning;
    }

    concurrent_print_time_delta ("Swe");

    heap_segment* loh_seg = heap_segment_rw (generation_start_segment (generation_of (max_generation + 1)));
    PREFIX_ASSUME(loh_seg  != NULL);
    while (loh_seg )
    {
        loh_seg->flags &= ~heap_segment_flags_swept;
        heap_segment_background_allocated (loh_seg) = heap_segment_allocated (loh_seg);
        loh_seg = heap_segment_next_rw (loh_seg);
    }

#ifdef MULTIPLE_HEAPS
    bgc_t_join.join(this, gc_join_restart_ee);
    if (bgc_t_join.joined())
#endif //MULTIPLE_HEAPS
    {
#ifdef MULTIPLE_HEAPS
        dprintf(2, ("Starting BGC threads for resuming EE"));
        bgc_t_join.restart();
#endif //MULTIPLE_HEAPS
    }

    if (heap_number == 0)
    {
#ifdef BGC_SERVO_TUNING
        get_and_reset_loh_alloc_info();
#endif //BGC_SERVO_TUNING
        restart_EE ();
    }

    FIRE_EVENT(BGC2ndConBegin);

    background_ephemeral_sweep();

    concurrent_print_time_delta ("Swe eph");

#ifdef MULTIPLE_HEAPS
    bgc_t_join.join(this, gc_join_after_ephemeral_sweep);
    if (bgc_t_join.joined())
#endif //MULTIPLE_HEAPS
    {
#ifdef FEATURE_EVENT_TRACE
        bgc_heap_walk_for_etw_p = GCEventStatus::IsEnabled(GCEventProvider_Default,
                                                           GCEventKeyword_GCHeapSurvivalAndMovement,
                                                           GCEventLevel_Information);
#endif //FEATURE_EVENT_TRACE

        leave_spin_lock (&gc_lock);

#ifdef MULTIPLE_HEAPS
        dprintf(2, ("Starting BGC threads for BGC sweeping"));
        bgc_t_join.restart();
#endif //MULTIPLE_HEAPS
    }

    disable_preemptive (true);

    dprintf (2, ("bgs: sweeping gen2 objects"));
    dprintf (2, ("bgs: seg: %Ix, [%Ix, %Ix[%Ix", (size_t)seg,
                    (size_t)heap_segment_mem (seg),
                    (size_t)heap_segment_allocated (seg),
                    (size_t)heap_segment_background_allocated (seg)));

    int num_objs = 256;
    int current_num_objs = 0;
    heap_segment* next_seg = 0;

    while (1)
    {
        if (o >= end)
        {
            if (gen == large_object_generation)
            {
                next_seg = heap_segment_next (seg);
            }
            else
            {
                next_seg = heap_segment_prev (fseg, seg);
            }

            delete_p = FALSE;

            if (!heap_segment_read_only_p (seg))
            {
                if (gen == large_object_generation)
                {
                    // we can treat all LOH segments as in the bgc domain
                    // regardless of whether we saw in bgc mark or not
                    // because we don't allow LOH allocations during bgc
                    // sweep anyway - the LOH segments can't change.
                    process_background_segment_end (seg, gen, plug_end,
                                                    start_seg, &delete_p);
                }
                else
                {
                    assert (heap_segment_background_allocated (seg) != 0);
                    process_background_segment_end (seg, gen, plug_end,
                                                    start_seg, &delete_p);

                    assert (next_seg || !delete_p);
                }
            }

            if (delete_p)
            {
                generation_delete_heap_segment (gen, seg, prev_seg, next_seg);
            }
            else
            {
                prev_seg = seg;
                dprintf (2, ("seg %Ix has been swept", seg));
                seg->flags |= heap_segment_flags_swept;
            }

            verify_soh_segment_list();

            seg = next_seg;

            dprintf (GTC_LOG, ("seg: %Ix, next_seg: %Ix, prev_seg: %Ix", seg, next_seg, prev_seg));

            if (seg == 0)
            {
                generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen));

                PREFIX_ASSUME(generation_allocation_segment(gen) != NULL);

                if (gen != large_object_generation)
                {
                    dprintf (2, ("bgs: sweeping gen3 objects"));
                    concurrent_print_time_delta ("Swe SOH");
                    FIRE_EVENT(BGC1stSweepEnd, 0);

                    enter_spin_lock (&more_space_lock_loh);
                    add_saved_spinlock_info (true, me_acquire, mt_bgc_loh_sweep);

                    concurrent_print_time_delta ("Swe LOH took msl");

                    // We wait till all allocating threads are completely done.
                    int spin_count = yp_spin_count_unit;
                    while (loh_alloc_thread_count)
                    {
                        spin_and_switch (spin_count, (loh_alloc_thread_count == 0));
                    }

                    current_bgc_state = bgc_sweep_loh;
                    gen = generation_of (max_generation+1);
                    start_seg = heap_segment_rw (generation_start_segment (gen));

                    PREFIX_ASSUME(start_seg != NULL);

                    seg = start_seg;
                    prev_seg = 0;
                    o = generation_allocation_start (gen);
                    assert (method_table (o) == g_gc_pFreeObjectMethodTable);
                    align_const = get_alignment_constant (FALSE);
                    o = o + Align(size (o), align_const);
                    plug_end = o;
                    end = heap_segment_allocated (seg);
                    dprintf (2, ("sweeping gen3 objects"));
                    generation_free_obj_space (gen) = 0;
                    generation_allocator (gen)->clear();
                    generation_free_list_space (gen) = 0;

                    dprintf (2, ("bgs: seg: %Ix, [%Ix, %Ix[%Ix", (size_t)seg,
                                    (size_t)heap_segment_mem (seg),
                                    (size_t)heap_segment_allocated (seg),
                                    (size_t)heap_segment_background_allocated (seg)));
                }
                else
                    break;
            }
            else
            {
                o = heap_segment_mem (seg);
                if (seg == fseg)
                {
                    assert (gen != large_object_generation);
                    assert (o == generation_allocation_start (generation_of (max_generation)));
                    align_const = get_alignment_constant (TRUE);
                    o = o + Align(size (o), align_const);
                }

                plug_end = o;
                current_sweep_pos = o;
                next_sweep_obj = o;

                allow_fgc();
                end = background_next_end (seg, (gen == large_object_generation));
                dprintf (2, ("bgs: seg: %Ix, [%Ix, %Ix[%Ix", (size_t)seg,
                                (size_t)heap_segment_mem (seg),
                                (size_t)heap_segment_allocated (seg),
                                (size_t)heap_segment_background_allocated (seg)));
            }
        }

        if ((o < end) && background_object_marked (o, TRUE))
        {
            plug_start = o;
            if (gen == large_object_generation)
            {
                dprintf (2, ("loh fr: [%Ix-%Ix[(%Id)", plug_end, plug_start, plug_start-plug_end));
            }

            thread_gap (plug_end, plug_start-plug_end, gen);
            if (gen != large_object_generation)
            {
                add_gen_free (max_generation, plug_start-plug_end);
                fix_brick_to_highest (plug_end, plug_start);
                // we need to fix the brick for the next plug here 'cause an FGC can
                // happen and can't read a stale brick.
                fix_brick_to_highest (plug_start, plug_start);
            }

            BOOL m = TRUE;

            while (m)
            {
                next_sweep_obj = o + Align(size (o), align_const);
                current_num_objs++;
                if (current_num_objs >= num_objs)
                {
                    current_sweep_pos = next_sweep_obj;

                    allow_fgc();
                    current_num_objs = 0;
                }

                o = next_sweep_obj;
                if (o >= end)
                {
                    break;
                }

                m = background_object_marked (o, TRUE);
            }
            plug_end = o;
            if (gen != large_object_generation)
            {
                add_gen_plug (max_generation, plug_end-plug_start);
                dd_survived_size (dd) += (plug_end - plug_start);
            }
            dprintf (3, ("bgs: plug [%Ix, %Ix[", (size_t)plug_start, (size_t)plug_end));
        }
        else
        {
            while ((o < end) && !background_object_marked (o, FALSE))
            {
                next_sweep_obj = o + Align(size (o), align_const);;
                current_num_objs++;
                if (current_num_objs >= num_objs)
                {
                    current_sweep_pos = plug_end;
                    dprintf (1234, ("f: swept till %Ix", current_sweep_pos));
                    allow_fgc();
                    current_num_objs = 0;
                }

                o = next_sweep_obj;
            }
        }
    }

    size_t total_loh_size = generation_size (max_generation + 1);
    size_t total_soh_size = generation_sizes (generation_of (max_generation));

    dprintf (GTC_LOG, ("h%d: S: loh: %Id, soh: %Id", heap_number, total_loh_size, total_soh_size));

    dprintf (GTC_LOG, ("end of bgc sweep: gen2 FL: %Id, FO: %Id",
        generation_free_list_space (generation_of (max_generation)),
        generation_free_obj_space (generation_of (max_generation))));
    dprintf (GTC_LOG, ("h%d: end of bgc sweep: gen3 FL: %Id, FO: %Id",
        heap_number,
        generation_free_list_space (generation_of (max_generation + 1)),
        generation_free_obj_space (generation_of (max_generation + 1))));

    FIRE_EVENT(BGC2ndConEnd);
    concurrent_print_time_delta ("background sweep");

    heap_segment* reset_seg = heap_segment_rw (generation_start_segment (generation_of (max_generation)));
    PREFIX_ASSUME(reset_seg != NULL);

    while (reset_seg)
    {
        heap_segment_saved_bg_allocated (reset_seg) = heap_segment_background_allocated (reset_seg);
        heap_segment_background_allocated (reset_seg) = 0;
        reset_seg = heap_segment_next_rw (reset_seg);
    }

    generation* loh_gen = generation_of (max_generation + 1);
    generation_allocation_segment (loh_gen) = heap_segment_rw (generation_start_segment (loh_gen));

    // We calculate dynamic data here because if we wait till we signal the lh event,
    // the allocation thread can change the fragmentation and we may read an intermediate
    // value (which can be greater than the generation size). Plus by that time it won't
    // be accurate.
    compute_new_dynamic_data (max_generation);

    enable_preemptive ();

#ifdef MULTIPLE_HEAPS
    bgc_t_join.join(this, gc_join_set_state_free);
    if (bgc_t_join.joined())
#endif //MULTIPLE_HEAPS
    {
        // TODO: We are using this join just to set the state. Should
        // look into eliminating it - check to make sure things that use
        // this state can live with per heap state like should_check_bgc_mark.
        current_c_gc_state = c_gc_state_free;

#ifdef BGC_SERVO_TUNING
        if (bgc_tuning::enable_fl_tuning)
        {
            enter_spin_lock (&gc_lock);
            bgc_tuning::record_and_adjust_bgc_end();
            leave_spin_lock (&gc_lock);
        }
#endif //BGC_SERVO_TUNING

#ifdef MULTIPLE_HEAPS
        dprintf(2, ("Starting BGC threads after background sweep phase"));
        bgc_t_join.restart();
#endif //MULTIPLE_HEAPS
    }

    disable_preemptive (true);

    if (gc_lh_block_event.IsValid())
    {
        gc_lh_block_event.Set();
    }

    add_saved_spinlock_info (true, me_release, mt_bgc_loh_sweep);
    leave_spin_lock (&more_space_lock_loh);

    //dprintf (GTC_LOG, ("---- (GC%d)End Background Sweep Phase ----", VolatileLoad(&settings.gc_index)));
    dprintf (GTC_LOG, ("---- (GC%d)ESw ----", VolatileLoad(&settings.gc_index)));
}
#endif //BACKGROUND_GC

void gc_heap::sweep_large_objects ()
{
    //this min value is for the sake of the dynamic tuning.
    //so we know that we are not starting even if we have no
    //survivors.
    generation* gen        = large_object_generation;
    heap_segment* start_seg = heap_segment_rw (generation_start_segment (gen));

    PREFIX_ASSUME(start_seg != NULL);

    heap_segment* seg      = start_seg;
    heap_segment* prev_seg = 0;
    uint8_t* o             = generation_allocation_start (gen);
    int align_const        = get_alignment_constant (FALSE);

    //Skip the generation gap object
    o = o + Align(size (o), align_const);

    uint8_t* plug_end         = o;
    uint8_t* plug_start       = o;

    generation_allocator (gen)->clear();
    generation_free_list_space (gen) = 0;
    generation_free_obj_space (gen) = 0;


    dprintf (3, ("sweeping large objects"));
    dprintf (3, ("seg: %Ix, [%Ix, %Ix[, starting from %Ix",
                 (size_t)seg,
                 (size_t)heap_segment_mem (seg),
                 (size_t)heap_segment_allocated (seg),
                 o));

    while (1)
    {
        if (o >= heap_segment_allocated (seg))
        {
            heap_segment* next_seg = heap_segment_next (seg);
            //delete the empty segment if not the only one
            if ((plug_end == heap_segment_mem (seg)) &&
                (seg != start_seg) && !heap_segment_read_only_p (seg))
            {
                //prepare for deletion
                dprintf (3, ("Preparing empty large segment %Ix", (size_t)seg));
                assert (prev_seg);
                heap_segment_next (prev_seg) = next_seg;
                heap_segment_next (seg) = freeable_large_heap_segment;
                freeable_large_heap_segment = seg;
            }
            else
            {
                if (!heap_segment_read_only_p (seg))
                {
                    dprintf (3, ("Trimming seg to %Ix[", (size_t)plug_end));
                    heap_segment_allocated (seg) = plug_end;
                    decommit_heap_segment_pages (seg, 0);
                }
                prev_seg = seg;
            }
            seg = next_seg;
            if (seg == 0)
                break;
            else
            {
                o = heap_segment_mem (seg);
                plug_end = o;
                dprintf (3, ("seg: %Ix, [%Ix, %Ix[", (size_t)seg,
                             (size_t)heap_segment_mem (seg),
                             (size_t)heap_segment_allocated (seg)));
            }
        }
        if (large_object_marked(o, TRUE))
        {
            plug_start = o;
            //everything between plug_end and plug_start is free
            thread_gap (plug_end, plug_start-plug_end, gen);

            BOOL m = TRUE;
            while (m)
            {
                o = o + AlignQword (size (o));
                if (o >= heap_segment_allocated (seg))
                {
                    break;
                }
                m = large_object_marked (o, TRUE);
            }
            plug_end = o;
            dprintf (3, ("plug [%Ix, %Ix[", (size_t)plug_start, (size_t)plug_end));
        }
        else
        {
            while (o < heap_segment_allocated (seg) && !large_object_marked(o, FALSE))
            {
                o = o + AlignQword (size (o));
            }
        }
    }

    generation_allocation_segment (gen) = heap_segment_rw (generation_start_segment (gen));

    PREFIX_ASSUME(generation_allocation_segment(gen) != NULL);
}

void gc_heap::relocate_in_large_objects ()
{
    relocate_args args;
    args.low = gc_low;
    args.high = gc_high;
    args.last_plug = 0;

    generation* gen = large_object_generation;

    heap_segment* seg = heap_segment_rw (generation_start_segment (gen));

    PREFIX_ASSUME(seg != NULL);

    uint8_t* o = generation_allocation_start (gen);

    while (1)
    {
        if (o >= heap_segment_allocated (seg))
        {
            seg = heap_segment_next_rw (seg);
            if (seg == 0)
                break;
            else
            {
                o = heap_segment_mem (seg);
            }
        }
        while (o < heap_segment_allocated (seg))
        {
            check_class_object_demotion (o);
            if (contain_pointers (o))
            {
                dprintf(3, ("Relocating through large object %Ix", (size_t)o));
                go_through_object_nostart (method_table (o), o, size(o), pval,
                        {
                            reloc_survivor_helper (pval);
                        });
            }
            o = o + AlignQword (size (o));
        }
    }
}

void gc_heap::mark_through_cards_for_large_objects (card_fn fn,
                                                    BOOL relocating
                                                    CARD_MARKING_STEALING_ARG(gc_heap* hpt))
{
    uint8_t*      low               = gc_low;
    size_t        end_card          = 0;
    generation*   oldest_gen        = generation_of (max_generation+1);
    heap_segment* seg               = heap_segment_rw (generation_start_segment (oldest_gen));

    PREFIX_ASSUME(seg != NULL);

    uint8_t*      beg               = generation_allocation_start (oldest_gen);
    uint8_t*      end               = heap_segment_allocated (seg);

    size_t  cg_pointers_found = 0;

    size_t  card_word_end = (card_of (align_on_card_word (end)) /
                             card_word_width);

    size_t      n_eph             = 0;
    size_t      n_gen             = 0;
    size_t      n_card_set        = 0;
    uint8_t*    next_boundary = (relocating ?
                              generation_plan_allocation_start (generation_of (max_generation -1)) :
                              ephemeral_low);

    uint8_t*    nhigh         = (relocating ?
                              heap_segment_plan_allocated (ephemeral_heap_segment) :
                              ephemeral_high);

    BOOL          foundp            = FALSE;
    uint8_t*      start_address     = 0;
    uint8_t*      limit             = 0;
    size_t        card              = card_of (beg);
    uint8_t*      o                 = beg;
#ifdef BACKGROUND_GC
    BOOL consider_bgc_mark_p        = FALSE;
    BOOL check_current_sweep_p      = FALSE;
    BOOL check_saved_sweep_p        = FALSE;
    should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
#endif //BACKGROUND_GC

    size_t total_cards_cleared = 0;

#ifdef FEATURE_CARD_MARKING_STEALING
    card_marking_enumerator card_mark_enumerator(seg, low, (VOLATILE(uint32_t)*) & card_mark_chunk_index_loh);
    card_word_end = 0;
#endif // FEATURE_CARD_MARKING_STEALING

    //dprintf(3,( "scanning large objects from %Ix to %Ix", (size_t)beg, (size_t)end));
    dprintf(3, ("CMl: %Ix->%Ix", (size_t)beg, (size_t)end));
    while (1)
    {
        if ((o < end) && (card_of(o) > card))
        {
            dprintf (3, ("Found %Id cg pointers", cg_pointers_found));
            if (cg_pointers_found == 0)
            {
                uint8_t* last_object_processed = o;
#ifdef FEATURE_CARD_MARKING_STEALING
                last_object_processed = min(limit, o);
#endif // FEATURE_CARD_MARKING_STEALING
                dprintf (3, (" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card), (size_t)last_object_processed));
                clear_cards (card, card_of((uint8_t*)last_object_processed));
                total_cards_cleared += (card_of((uint8_t*)last_object_processed) - card);
            }
            n_eph +=cg_pointers_found;
            cg_pointers_found = 0;
            card = card_of ((uint8_t*)o);
        }
        if ((o < end) &&(card >= end_card))
        {
#ifdef FEATURE_CARD_MARKING_STEALING
            // find another chunk with some cards set
            foundp = find_next_chunk(card_mark_enumerator, seg, n_card_set, start_address, limit, card, end_card, card_word_end);
#else // FEATURE_CARD_MARKING_STEALING
            foundp = find_card (card_table, card, card_word_end, end_card);
            if (foundp)
            {
                n_card_set+= end_card - card;
                start_address = max (beg, card_address (card));
            }
            limit = min (end, card_address (end_card));
#endif  // FEATURE_CARD_MARKING_STEALING
        }
        if ((!foundp) || (o >= end) || (card_address (card) >= end))
        {
            if ((foundp) && (cg_pointers_found == 0))
            {
                dprintf(3,(" Clearing cards [%Ix, %Ix[ ", (size_t)card_address(card),
                           (size_t)card_address(card+1)));
                clear_cards (card, card+1);
                total_cards_cleared += 1;
            }
            n_eph +=cg_pointers_found;
            cg_pointers_found = 0;
#ifdef FEATURE_CARD_MARKING_STEALING
            // we have decided to move to the next segment - make sure we exhaust the chunk enumerator for this segment
            card_mark_enumerator.exhaust_segment(seg);
#endif // FEATURE_CARD_MARKING_STEALING
            if ((seg = heap_segment_next_rw (seg)) != 0)
            {
#ifdef BACKGROUND_GC
                should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
#endif //BACKGROUND_GC
                beg = heap_segment_mem (seg);
                end = compute_next_end (seg, low);
#ifdef FEATURE_CARD_MARKING_STEALING
                card_word_end = 0;
#else // FEATURE_CARD_MARKING_STEALING
                card_word_end = card_of (align_on_card_word (end)) / card_word_width;
#endif // FEATURE_CARD_MARKING_STEALING
                card = card_of (beg);
                o  = beg;
                end_card = 0;
                continue;
            }
            else
            {
                break;
            }
        }

        assert (card_set_p (card));
        {
            dprintf(3,("card %Ix: o: %Ix, l: %Ix[ ",
                       card, (size_t)o, (size_t)limit));

            assert (Align (size (o)) >= Align (min_obj_size));
            size_t s = size (o);
            uint8_t* next_o =  o + AlignQword (s);
            Prefetch (next_o);

            while (o < limit)
            {
                s = size (o);
                assert (Align (s) >= Align (min_obj_size));
                next_o =  o + AlignQword (s);
                Prefetch (next_o);

                dprintf (4, ("|%Ix|", (size_t)o));
                if (next_o < start_address)
                {
                    goto end_object;
                }

#ifdef BACKGROUND_GC
                if (!fgc_should_consider_object (o, seg, consider_bgc_mark_p, check_current_sweep_p, check_saved_sweep_p))
                {
                    goto end_object;
                }
#endif //BACKGROUND_GC

#ifdef COLLECTIBLE_CLASS
                if (is_collectible(o))
                {
                    BOOL passed_end_card_p = FALSE;

                    if (card_of (o) > card)
                    {
                        passed_end_card_p = card_transition (o, end, card_word_end,
                            cg_pointers_found,
                            n_eph, n_card_set,
                            card, end_card,
                            foundp, start_address,
                            limit, total_cards_cleared
                            CARD_MARKING_STEALING_ARGS(card_mark_enumerator, seg, card_word_end));
                    }

                    if ((!passed_end_card_p || foundp) && (card_of (o) == card))
                    {
                        // card is valid and it covers the head of the object
                        if (fn == &gc_heap::relocate_address)
                        {
                            keep_card_live (o, n_gen, cg_pointers_found);
                        }
                        else
                        {
                            uint8_t* class_obj = get_class_object (o);
                            mark_through_cards_helper (&class_obj, n_gen,
                                                       cg_pointers_found, fn,
                                                       nhigh, next_boundary CARD_MARKING_STEALING_ARG(hpt));
                        }
                    }

                    if (passed_end_card_p)
                    {
                        if (foundp && (card_address (card) < next_o))
                        {
                            goto go_through_refs;
                        }
                        else
                        {
                            goto end_object;
                        }
                    }
                }

go_through_refs:
#endif //COLLECTIBLE_CLASS

                if (contain_pointers (o))
                {
                    dprintf(3,("Going through %Ix", (size_t)o));

                    go_through_object (method_table(o), o, s, poo,
                                       start_address, use_start, (o + s),
                       {
                           if (card_of ((uint8_t*)poo) > card)
                           {
                                BOOL passed_end_card_p  = card_transition ((uint8_t*)poo, end,
                                        card_word_end,
                                        cg_pointers_found,
                                        n_eph, n_card_set,
                                        card, end_card,
                                        foundp, start_address,
                                        limit, total_cards_cleared
                                        CARD_MARKING_STEALING_ARGS(card_mark_enumerator, seg, card_word_end));

                                if (passed_end_card_p)
                                {
                                    if (foundp && (card_address (card) < next_o))
                                    {
                                        //new_start();
                                        {
                                            if (ppstop <= (uint8_t**)start_address)
                                            {break;}
                                            else if (poo < (uint8_t**)start_address)
                                            {poo = (uint8_t**)start_address;}
                                        }
                                    }
                                    else
                                    {
                                        goto end_object;
                                    }
                                }
                            }

                           mark_through_cards_helper (poo, n_gen,
                                                      cg_pointers_found, fn,
                                                      nhigh, next_boundary CARD_MARKING_STEALING_ARG(hpt));
                       }
                        );
                }

            end_object:
                o = next_o;
            }

        }
    }

    // compute the efficiency ratio of the card table
    if (!relocating)
    {
        generation_skip_ratio = min (((n_eph > 800) ?
                                      (int)(((float)n_gen / (float)n_eph) * 100) : 100),
                                     generation_skip_ratio);

        dprintf (3, ("Mloh: cross: %Id, useful: %Id, cards cleared: %Id, cards set: %Id, ratio: %d",
             n_eph, n_gen, total_cards_cleared, n_card_set, generation_skip_ratio));
    }
    else
    {
        dprintf (3, ("R: Mloh: cross: %Id, useful: %Id, cards set: %Id, ratio: %d",
             n_eph, n_gen, n_card_set, generation_skip_ratio));
    }
}

void gc_heap::descr_segment (heap_segment* seg )
{
#ifdef TRACE_GC
    uint8_t*  x = heap_segment_mem (seg);
    while (x < heap_segment_allocated (seg))
    {
        dprintf(2, ( "%Ix: %d ", (size_t)x, size (x)));
        x = x + Align(size (x));
    }
#else // TRACE_GC
    UNREFERENCED_PARAMETER(seg);
#endif // TRACE_GC
}

void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context)
{
#ifdef MULTIPLE_HEAPS
    int n_heaps = g_theGCHeap->GetNumberOfHeaps ();
    for (int i = 0; i < n_heaps; i++)
    {
        gc_heap* hp = GCHeap::GetHeap(i)->pGenGCHeap;
#else //MULTIPLE_HEAPS
    {
        gc_heap* hp = NULL;
#ifdef _PREFAST_
        // prefix complains about us dereferencing hp in wks build even though we only access static members
        // this way. not sure how to shut it up except for this ugly workaround:
        PREFIX_ASSUME(hp != NULL);
#endif // _PREFAST_
#endif //MULTIPLE_HEAPS

        int curr_gen_number0 = max_generation+1;
        while (curr_gen_number0 >= 0)
        {
            generation* gen = hp->generation_of (curr_gen_number0);
            heap_segment* seg = generation_start_segment (gen);
            while (seg && (seg != hp->ephemeral_heap_segment))
            {
                assert (curr_gen_number0 > 0);

                // report bounds from heap_segment_mem (seg) to
                // heap_segment_allocated (seg);
                // for generation # curr_gen_number0
                // for heap # heap_no

                fn(context, curr_gen_number0, heap_segment_mem (seg),
                                              heap_segment_allocated (seg),
                                              curr_gen_number0 == max_generation+1 ? heap_segment_reserved (seg) : heap_segment_allocated (seg));

                seg = heap_segment_next (seg);
            }
            if (seg)
            {
                assert (seg == hp->ephemeral_heap_segment);
                assert (curr_gen_number0 <= max_generation);
                //
                if (curr_gen_number0 == max_generation)
                {
                    if (heap_segment_mem (seg) < generation_allocation_start (hp->generation_of (max_generation-1)))
                    {
                        // report bounds from heap_segment_mem (seg) to
                        // generation_allocation_start (generation_of (max_generation-1))
                        // for heap # heap_number

                        fn(context, curr_gen_number0, heap_segment_mem (seg),
                                                      generation_allocation_start (hp->generation_of (max_generation-1)),
                                                      generation_allocation_start (hp->generation_of (max_generation-1)) );
                    }
                }
                else if (curr_gen_number0 != 0)
                {
                    //report bounds from generation_allocation_start (generation_of (curr_gen_number0))
                    // to generation_allocation_start (generation_of (curr_gen_number0-1))
                    // for heap # heap_number

                    fn(context, curr_gen_number0, generation_allocation_start (hp->generation_of (curr_gen_number0)),
                                                  generation_allocation_start (hp->generation_of (curr_gen_number0-1)),
                                                  generation_allocation_start (hp->generation_of (curr_gen_number0-1)));
                }
                else
                {
                    //report bounds from generation_allocation_start (generation_of (curr_gen_number0))
                    // to heap_segment_allocated (ephemeral_heap_segment);
                    // for heap # heap_number

                    fn(context, curr_gen_number0, generation_allocation_start (hp->generation_of (curr_gen_number0)),
                                                  heap_segment_allocated (hp->ephemeral_heap_segment),
                                                  heap_segment_reserved (hp->ephemeral_heap_segment) );
                }
            }
            curr_gen_number0--;
        }
    }
}

#ifdef TRACE_GC
// Note that when logging is on it can take a long time to go through the free items.
void gc_heap::print_free_list (int gen, heap_segment* seg)
{
    UNREFERENCED_PARAMETER(gen);
    UNREFERENCED_PARAMETER(seg);
/*
    if (settings.concurrent == FALSE)
    {
        uint8_t* seg_start = heap_segment_mem (seg);
        uint8_t* seg_end = heap_segment_allocated (seg);

        dprintf (3, ("Free list in seg %Ix:", seg_start));

        size_t total_free_item = 0;

        allocator* gen_allocator = generation_allocator (generation_of (gen));
        for (unsigned int b = 0; b < gen_allocator->number_of_buckets(); b++)
        {
            uint8_t* fo = gen_allocator->alloc_list_head_of (b);
            while (fo)
            {
                if (fo >= seg_start && fo < seg_end)
                {
                    total_free_item++;

                    size_t free_item_len = size(fo);

                    dprintf (3, ("[%Ix, %Ix[:%Id",
                                 (size_t)fo,
                                 (size_t)(fo + free_item_len),
                                 free_item_len));
                }

                fo = free_list_slot (fo);
            }
        }

        dprintf (3, ("total %Id free items", total_free_item));
    }
*/
}
#endif //TRACE_GC

void gc_heap::descr_generations (BOOL begin_gc_p)
{
    UNREFERENCED_PARAMETER(begin_gc_p);
#ifdef STRESS_LOG
    if (StressLog::StressLogOn(LF_GC, LL_INFO10))
    {
        gc_heap* hp = 0;
#ifdef MULTIPLE_HEAPS
        hp= this;
#endif //MULTIPLE_HEAPS

        STRESS_LOG1(LF_GC, LL_INFO10, "GC Heap %p\n", hp);
        for (int n = max_generation; n >= 0; --n)
        {
            STRESS_LOG4(LF_GC, LL_INFO10, "    Generation %d [%p, %p] cur = %p\n",
                    n,
                    generation_allocation_start(generation_of(n)),
                    generation_allocation_limit(generation_of(n)),
                    generation_allocation_pointer(generation_of(n)));

            heap_segment* seg = generation_start_segment(generation_of(n));
            while (seg)
            {
                STRESS_LOG4(LF_GC, LL_INFO10, "        Segment mem %p alloc = %p used %p committed %p\n",
                        heap_segment_mem(seg),
                        heap_segment_allocated(seg),
                        heap_segment_used(seg),
                        heap_segment_committed(seg));
                seg = heap_segment_next(seg);
            }
        }
    }
#endif  // STRESS_LOG

#ifdef TRACE_GC
    dprintf (2, ("lowest_address: %Ix highest_address: %Ix",
             (size_t) lowest_address, (size_t) highest_address));
#ifdef BACKGROUND_GC
    dprintf (2, ("bgc lowest_address: %Ix bgc highest_address: %Ix",
             (size_t) background_saved_lowest_address, (size_t) background_saved_highest_address));
#endif //BACKGROUND_GC

    if (heap_number == 0)
    {
        dprintf (1, ("total heap size: %Id, commit size: %Id", get_total_heap_size(), get_total_committed_size()));
    }

    int curr_gen_number = max_generation+1;
    while (curr_gen_number >= 0)
    {
        size_t total_gen_size = generation_size (curr_gen_number);
#ifdef SIMPLE_DPRINTF
        dprintf (GTC_LOG, ("[%s][g%d]gen %d:, size: %Id, frag: %Id(L: %Id, O: %Id), f: %d%% %s %s %s",
                      (begin_gc_p ? "BEG" : "END"),
                      settings.condemned_generation,
                      curr_gen_number,
                      total_gen_size,
                      dd_fragmentation (dynamic_data_of (curr_gen_number)),
                      generation_free_list_space (generation_of (curr_gen_number)),
                      generation_free_obj_space (generation_of (curr_gen_number)),
                      (total_gen_size ?
                        (int)(((double)dd_fragmentation (dynamic_data_of (curr_gen_number)) / (double)total_gen_size) * 100) :
                        0),
                      (begin_gc_p ? ("") : (settings.compaction ? "(compact)" : "(sweep)")),
                      (settings.heap_expansion ? "(EX)" : " "),
                      (settings.promotion ? "Promotion" : "NoPromotion")));
#else
        dprintf (2, ( "Generation %d: gap size: %d, generation size: %Id, fragmentation: %Id",
                      curr_gen_number,
                      size (generation_allocation_start (generation_of (curr_gen_number))),
                      total_gen_size,
                      dd_fragmentation (dynamic_data_of (curr_gen_number))));
#endif //SIMPLE_DPRINTF

        generation* gen = generation_of (curr_gen_number);
        heap_segment* seg = generation_start_segment (gen);
        while (seg && (seg != ephemeral_heap_segment))
        {
            dprintf (GTC_LOG, ("g%d: [%Ix %Ix[-%Ix[ (%Id) (%Id)",
                        curr_gen_number,
                        (size_t)heap_segment_mem (seg),
                        (size_t)heap_segment_allocated (seg),
                        (size_t)heap_segment_committed (seg),
                        (size_t)(heap_segment_allocated (seg) - heap_segment_mem (seg)),
                        (size_t)(heap_segment_committed (seg) - heap_segment_allocated (seg))));
            print_free_list (curr_gen_number, seg);
            seg = heap_segment_next (seg);
        }
        if (seg && (seg != generation_start_segment (gen)))
        {
            dprintf (GTC_LOG, ("g%d: [%Ix %Ix[",
                         curr_gen_number,
                         (size_t)heap_segment_mem (seg),
                         (size_t)generation_allocation_start (generation_of (curr_gen_number-1))));
            print_free_list (curr_gen_number, seg);

        }
        else if (seg)
        {
            dprintf (GTC_LOG, ("g%d: [%Ix %Ix[",
                         curr_gen_number,
                         (size_t)generation_allocation_start (generation_of (curr_gen_number)),
                         (size_t)(((curr_gen_number == 0)) ?
                                  (heap_segment_allocated
                                   (generation_start_segment
                                    (generation_of (curr_gen_number)))) :
                                  (generation_allocation_start
                                   (generation_of (curr_gen_number - 1))))
                         ));
            print_free_list (curr_gen_number, seg);
        }
        curr_gen_number--;
    }

#endif //TRACE_GC
}

#undef TRACE_GC

//#define TRACE_GC

//-----------------------------------------------------------------------------
//
//                                  VM Specific support
//
//-----------------------------------------------------------------------------


#ifdef TRACE_GC

 unsigned int PromotedObjectCount  = 0;
 unsigned int CreatedObjectCount       = 0;
 unsigned int AllocDuration            = 0;
 unsigned int AllocCount               = 0;
 unsigned int AllocBigCount            = 0;
 unsigned int AllocSmallCount      = 0;
 unsigned int AllocStart             = 0;
#endif //TRACE_GC

//Static member variables.
VOLATILE(BOOL)    GCHeap::GcInProgress            = FALSE;
//GCTODO
//CMCSafeLock*      GCHeap::fGcLock;
GCEvent            *GCHeap::WaitForGCEvent         = NULL;
//GCTODO
#ifdef TRACE_GC
unsigned int       GCHeap::GcDuration;
#endif //TRACE_GC
unsigned            GCHeap::GcCondemnedGeneration   = 0;
size_t              GCHeap::totalSurvivedSize       = 0;
#ifdef FEATURE_PREMORTEM_FINALIZATION
CFinalize*          GCHeap::m_Finalize              = 0;
BOOL                GCHeap::GcCollectClasses        = FALSE;
VOLATILE(int32_t)      GCHeap::m_GCFLock               = 0;

#ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
#ifdef STRESS_HEAP
#ifdef BACKGROUND_GC
int                 GCHeap::gc_stress_fgcs_in_bgc   = 0;
#endif // BACKGROUND_GC
#ifndef MULTIPLE_HEAPS
OBJECTHANDLE        GCHeap::m_StressObjs[NUM_HEAP_STRESS_OBJS];
int                 GCHeap::m_CurStressObj          = 0;
#endif // !MULTIPLE_HEAPS
#endif // STRESS_HEAP
#endif // FEATURE_REDHAWK

#endif //FEATURE_PREMORTEM_FINALIZATION

class NoGCRegionLockHolder
{
public:
    NoGCRegionLockHolder()
    {
        enter_spin_lock_noinstru(&g_no_gc_lock);
    }

    ~NoGCRegionLockHolder()
    {
        leave_spin_lock_noinstru(&g_no_gc_lock);
    }
};

// An explanation of locking for finalization:
//
// Multiple threads allocate objects.  During the allocation, they are serialized by
// the AllocLock above.  But they release that lock before they register the object
// for finalization.  That's because there is much contention for the alloc lock, but
// finalization is presumed to be a rare case.
//
// So registering an object for finalization must be protected by the FinalizeLock.
//
// There is another logical queue that involves finalization.  When objects registered
// for finalization become unreachable, they are moved from the "registered" queue to
// the "unreachable" queue.  Note that this only happens inside a GC, so no other
// threads can be manipulating either queue at that time.  Once the GC is over and
// threads are resumed, the Finalizer thread will dequeue objects from the "unreachable"
// queue and call their finalizers.  This dequeue operation is also protected with
// the finalize lock.
//
// At first, this seems unnecessary.  Only one thread is ever enqueuing or dequeuing
// on the unreachable queue (either the GC thread during a GC or the finalizer thread
// when a GC is not in progress).  The reason we share a lock with threads enqueuing
// on the "registered" queue is that the "registered" and "unreachable" queues are
// interrelated.
//
// They are actually two regions of a longer list, which can only grow at one end.
// So to enqueue an object to the "registered" list, you actually rotate an unreachable
// object at the boundary between the logical queues, out to the other end of the
// unreachable queue -- where all growing takes place.  Then you move the boundary
// pointer so that the gap we created at the boundary is now on the "registered"
// side rather than the "unreachable" side.  Now the object can be placed into the
// "registered" side at that point.  This is much more efficient than doing moves
// of arbitrarily long regions, but it causes the two queues to require a shared lock.
//
// Notice that Enter/LeaveFinalizeLock is not a GC-aware spin lock.  Instead, it relies
// on the fact that the lock will only be taken for a brief period and that it will
// never provoke or allow a GC while the lock is held.  This is critical.  If the
// FinalizeLock used enter_spin_lock (and thus sometimes enters preemptive mode to
// allow a GC), then the Alloc client would have to GC protect a finalizable object
// to protect against that eventuality.  That is too slow!



BOOL IsValidObject99(uint8_t *pObject)
{
#ifdef VERIFY_HEAP
    if (!((CObjectHeader*)pObject)->IsFree())
        ((CObjectHeader *) pObject)->Validate();
#endif //VERIFY_HEAP
    return(TRUE);
}

#ifdef BACKGROUND_GC
BOOL gc_heap::bgc_mark_array_range (heap_segment* seg,
                                    BOOL whole_seg_p,
                                    uint8_t** range_beg,
                                    uint8_t** range_end)
{
    uint8_t* seg_start = heap_segment_mem (seg);
    uint8_t* seg_end = (whole_seg_p ? heap_segment_reserved (seg) : align_on_mark_word (heap_segment_allocated (seg)));

    if ((seg_start < background_saved_highest_address) &&
        (seg_end > background_saved_lowest_address))
    {
        *range_beg = max (seg_start, background_saved_lowest_address);
        *range_end = min (seg_end, background_saved_highest_address);
        return TRUE;
    }
    else
    {
        return FALSE;
    }
}

void gc_heap::bgc_verify_mark_array_cleared (heap_segment* seg)
{
#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
    if (recursive_gc_sync::background_running_p() && GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
    {
        uint8_t* range_beg = 0;
        uint8_t* range_end = 0;

        if (bgc_mark_array_range (seg, TRUE, &range_beg, &range_end))
        {
            size_t  markw = mark_word_of (range_beg);
            size_t  markw_end = mark_word_of (range_end);
            while (markw < markw_end)
            {
                if (mark_array [markw])
                {
                    dprintf  (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
                                    markw, mark_array [markw], mark_word_address (markw)));
                    FATAL_GC_ERROR();
                }
                markw++;
            }
            uint8_t* p = mark_word_address (markw_end);
            while (p < range_end)
            {
                assert (!(mark_array_marked (p)));
                p++;
            }
        }
    }
#endif //VERIFY_HEAP && MARK_ARRAY
}

void gc_heap::verify_mark_bits_cleared (uint8_t* obj, size_t s)
{
#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
    size_t start_mark_bit = mark_bit_of (obj) + 1;
    size_t end_mark_bit = mark_bit_of (obj + s);
    unsigned int startbit = mark_bit_bit (start_mark_bit);
    unsigned int endbit = mark_bit_bit (end_mark_bit);
    size_t startwrd = mark_bit_word (start_mark_bit);
    size_t endwrd = mark_bit_word (end_mark_bit);
    unsigned int result = 0;

    unsigned int firstwrd = ~(lowbits (~0, startbit));
    unsigned int lastwrd = ~(highbits (~0, endbit));

    if (startwrd == endwrd)
    {
        unsigned int wrd = firstwrd & lastwrd;
        result = mark_array[startwrd] & wrd;
        if (result)
        {
            FATAL_GC_ERROR();
        }
        return;
    }

    // verify the first mark word is cleared.
    if (startbit)
    {
        result = mark_array[startwrd] & firstwrd;
        if (result)
        {
            FATAL_GC_ERROR();
        }
        startwrd++;
    }

    for (size_t wrdtmp = startwrd; wrdtmp < endwrd; wrdtmp++)
    {
        result = mark_array[wrdtmp];
        if (result)
        {
            FATAL_GC_ERROR();
        }
    }

    // set the last mark word.
    if (endbit)
    {
        result = mark_array[endwrd] & lastwrd;
        if (result)
        {
            FATAL_GC_ERROR();
        }
    }
#endif //VERIFY_HEAP && MARK_ARRAY
}

void gc_heap::clear_all_mark_array()
{
#ifdef MARK_ARRAY
    //size_t num_dwords_written = 0;
    //size_t begin_time = GetHighPrecisionTimeStamp();

    generation* gen = generation_of (max_generation);
    heap_segment* seg = heap_segment_rw (generation_start_segment (gen));

    while (1)
    {
        if (seg == 0)
        {
            if (gen != large_object_generation)
            {
                gen = generation_of (max_generation+1);
                seg = heap_segment_rw (generation_start_segment (gen));
            }
            else
            {
                break;
            }
        }

        uint8_t* range_beg = 0;
        uint8_t* range_end = 0;

        if (bgc_mark_array_range (seg, (seg == ephemeral_heap_segment), &range_beg, &range_end))
        {
            size_t markw = mark_word_of (range_beg);
            size_t markw_end = mark_word_of (range_end);
            size_t size_total = (markw_end - markw) * sizeof (uint32_t);
            //num_dwords_written = markw_end - markw;
            size_t size = 0;
            size_t size_left = 0;

            assert (((size_t)&mark_array[markw] & (sizeof(PTR_PTR)-1)) == 0);

            if ((size_total & (sizeof(PTR_PTR) - 1)) != 0)
            {
                size = (size_total & ~(sizeof(PTR_PTR) - 1));
                size_left = size_total - size;
                assert ((size_left & (sizeof (uint32_t) - 1)) == 0);
            }
            else
            {
                size = size_total;
            }

            memclr ((uint8_t*)&mark_array[markw], size);

            if (size_left != 0)
            {
                uint32_t* markw_to_clear = &mark_array[markw + size / sizeof (uint32_t)];
                for (size_t i = 0; i < (size_left / sizeof (uint32_t)); i++)
                {
                    *markw_to_clear = 0;
                    markw_to_clear++;
                }
            }
        }

        seg = heap_segment_next_rw (seg);
    }

    //size_t end_time = GetHighPrecisionTimeStamp() - begin_time;

    //printf ("took %Id ms to clear %Id bytes\n", end_time, num_dwords_written*sizeof(uint32_t));

#endif //MARK_ARRAY
}

#endif //BACKGROUND_GC

void gc_heap::verify_mark_array_cleared (heap_segment* seg)
{
#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
    assert (card_table == g_gc_card_table);
    size_t  markw = mark_word_of (heap_segment_mem (seg));
    size_t  markw_end = mark_word_of (heap_segment_reserved (seg));

    while (markw < markw_end)
    {
        if (mark_array [markw])
        {
            dprintf  (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
                            markw, mark_array [markw], mark_word_address (markw)));
            FATAL_GC_ERROR();
        }
        markw++;
    }
#endif //VERIFY_HEAP && MARK_ARRAY
}

void gc_heap::verify_mark_array_cleared ()
{
#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
    if (recursive_gc_sync::background_running_p() && GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
    {
        generation* gen = generation_of (max_generation);
        heap_segment* seg = heap_segment_rw (generation_start_segment (gen));

        while (1)
        {
            if (seg == 0)
            {
                if (gen != large_object_generation)
                {
                    gen = generation_of (max_generation+1);
                    seg = heap_segment_rw (generation_start_segment (gen));
                }
                else
                {
                    break;
                }
            }

            bgc_verify_mark_array_cleared (seg);
            seg = heap_segment_next_rw (seg);
        }
    }
#endif //VERIFY_HEAP && MARK_ARRAY
}

void gc_heap::verify_seg_end_mark_array_cleared()
{
#if defined (VERIFY_HEAP) && defined (MARK_ARRAY)
    if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
    {
        generation* gen = generation_of (max_generation);
        heap_segment* seg = heap_segment_rw (generation_start_segment (gen));

        while (1)
        {
            if (seg == 0)
            {
                if (gen != large_object_generation)
                {
                    gen = generation_of (max_generation+1);
                    seg = heap_segment_rw (generation_start_segment (gen));
                }
                else
                {
                    break;
                }
            }

            // We already cleared all mark array bits for ephemeral generations
            // at the beginning of bgc sweep
            uint8_t* from = ((seg == ephemeral_heap_segment) ?
                          generation_allocation_start (generation_of (max_generation - 1)) :
                          heap_segment_allocated (seg));
            size_t  markw = mark_word_of (align_on_mark_word (from));
            size_t  markw_end = mark_word_of (heap_segment_reserved (seg));

            while (from < mark_word_address (markw))
            {
                if (is_mark_bit_set (from))
                {
                    dprintf (3, ("mark bit for %Ix was not cleared", from));
                    FATAL_GC_ERROR();
                }

                from += mark_bit_pitch;
            }

            while (markw < markw_end)
            {
                if (mark_array [markw])
                {
                    dprintf  (3, ("The mark bits at 0x%Ix:0x%Ix(addr: 0x%Ix) were not cleared",
                                    markw, mark_array [markw], mark_word_address (markw)));
                    FATAL_GC_ERROR();
                }
                markw++;
            }
            seg = heap_segment_next_rw (seg);
        }
    }
#endif //VERIFY_HEAP && MARK_ARRAY
}

// This function is called to make sure we don't mess up the segment list
// in SOH. It's called by:
// 1) begin and end of ephemeral GCs
// 2) during bgc sweep when we switch segments.
void gc_heap::verify_soh_segment_list()
{
#ifdef VERIFY_HEAP
    if (GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC)
    {
        generation* gen = generation_of (max_generation);
        heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
        heap_segment* last_seg = 0;
        while (seg)
        {
            last_seg = seg;
            seg = heap_segment_next_rw (seg);
        }
        if (last_seg != ephemeral_heap_segment)
        {
            FATAL_GC_ERROR();
        }
    }
#endif //VERIFY_HEAP
}

// This function can be called at any foreground GCs or blocking GCs. For background GCs,
// it can be called at the end of the final marking; and at any point during background
// sweep.
// NOTE - to be able to call this function during background sweep, we need to temporarily
// NOT clear the mark array bits as we go.
void gc_heap::verify_partial ()
{
#ifdef BACKGROUND_GC
    //printf ("GC#%d: Verifying loh during sweep\n", settings.gc_index);
    //generation* gen = large_object_generation;
    generation* gen = generation_of (max_generation);
    heap_segment* seg = heap_segment_rw (generation_start_segment (gen));
    int align_const = get_alignment_constant (gen != large_object_generation);

    uint8_t* o = 0;
    uint8_t* end = 0;
    size_t s = 0;

    // Different ways to fail.
    BOOL mark_missed_p = FALSE;
    BOOL bad_ref_p = FALSE;
    BOOL free_ref_p = FALSE;

    while (1)
    {
        if (seg == 0)
        {
            if (gen != large_object_generation)
            {
                //switch to LOH
                gen = large_object_generation;
                align_const = get_alignment_constant (gen != large_object_generation);
                seg = heap_segment_rw (generation_start_segment (gen));
                continue;
            }
            else
            {
                break;
            }
        }

        o = heap_segment_mem (seg);
        end  = heap_segment_allocated (seg);
        //printf ("validating [%Ix-[%Ix\n", o, end);
        while (o < end)
        {
            s = size (o);

            BOOL marked_p = background_object_marked (o, FALSE);

            if (marked_p)
            {
                go_through_object_cl (method_table (o), o, s, oo,
                    {
                        if (*oo)
                        {
                            //dprintf (3, ("VOM: verifying member %Ix in obj %Ix", (size_t)*oo, o));
                            MethodTable *pMT = method_table (*oo);

                            if (pMT == g_gc_pFreeObjectMethodTable)
                            {
                                free_ref_p = TRUE;
                                FATAL_GC_ERROR();
                            }

                            if (!pMT->SanityCheck())
                            {
                                bad_ref_p = TRUE;
                                dprintf (3, ("Bad member of %Ix %Ix",
                                            (size_t)oo, (size_t)*oo));
                                FATAL_GC_ERROR();
                            }

                            if (current_bgc_state == bgc_final_marking)
                            {
                                if (marked_p && !background_object_marked (*oo, FALSE))
                                {
                                    mark_missed_p = TRUE;
                                    FATAL_GC_ERROR();
                                }
                            }
                        }
                    }
                                    );
            }

            o = o + Align(s, align_const);
        }
        seg = heap_segment_next_rw (seg);
    }

    //printf ("didn't find any large object large enough...\n");
    //printf ("finished verifying loh\n");
#endif //BACKGROUND_GC
}

#ifdef VERIFY_HEAP

void
gc_heap::verify_free_lists ()
{
    for (int gen_num = 0; gen_num <= max_generation+1; gen_num++)
    {
        dprintf (3, ("Verifying free list for gen:%d", gen_num));
        allocator* gen_alloc = generation_allocator (generation_of (gen_num));
        size_t sz = gen_alloc->first_bucket_size();
        bool verify_undo_slot = (gen_num != 0) && (gen_num != max_generation+1) && !gen_alloc->discard_if_no_fit_p();

        for (unsigned int a_l_number = 0; a_l_number < gen_alloc->number_of_buckets(); a_l_number++)
        {
            uint8_t* free_list = gen_alloc->alloc_list_head_of (a_l_number);
            uint8_t* prev = 0;
            while (free_list)
            {
                if (!((CObjectHeader*)free_list)->IsFree())
                {
                    dprintf (3, ("Verifiying Heap: curr free list item %Ix isn't a free object)",
                                 (size_t)free_list));
                    FATAL_GC_ERROR();
                }
                if (((a_l_number < (gen_alloc->number_of_buckets()-1))&& (unused_array_size (free_list) >= sz))
                    || ((a_l_number != 0) && (unused_array_size (free_list) < sz/2)))
                {
                    dprintf (3, ("Verifiying Heap: curr free list item %Ix isn't in the right bucket",
                                 (size_t)free_list));
                    FATAL_GC_ERROR();
                }
                if (verify_undo_slot && (free_list_undo (free_list) != UNDO_EMPTY))
                {
                    dprintf (3, ("Verifiying Heap: curr free list item %Ix has non empty undo slot",
                                 (size_t)free_list));
                    FATAL_GC_ERROR();
                }
                if ((gen_num != max_generation+1)&&(object_gennum (free_list)!= gen_num))
                {
                    dprintf (3, ("Verifiying Heap: curr free list item %Ix is in the wrong generation free list",
                                 (size_t)free_list));
                    FATAL_GC_ERROR();
                }

                prev = free_list;
                free_list = free_list_slot (free_list);
            }
            //verify the sanity of the tail
            uint8_t* tail = gen_alloc->alloc_list_tail_of (a_l_number);
            if (!((tail == 0) || (tail == prev)))
            {
                dprintf (3, ("Verifying Heap: tail of free list is not correct"));
                FATAL_GC_ERROR();
            }
            if (tail == 0)
            {
                uint8_t* head = gen_alloc->alloc_list_head_of (a_l_number);
                if ((head != 0) && (free_list_slot (head) != 0))
                {
                    dprintf (3, ("Verifying Heap: tail of free list is not correct"));
                    FATAL_GC_ERROR();
                }
            }

            sz *=2;
        }
    }
}

void
gc_heap::verify_heap (BOOL begin_gc_p)
{
    int             heap_verify_level = static_cast<int>(GCConfig::GetHeapVerifyLevel());
    size_t          last_valid_brick = 0;
    BOOL            bCurrentBrickInvalid = FALSE;
    BOOL            large_brick_p = TRUE;
    size_t          curr_brick = 0;
    size_t          prev_brick = (size_t)-1;
    int             curr_gen_num = max_generation+1;
    heap_segment*   seg = heap_segment_in_range (generation_start_segment (generation_of (curr_gen_num ) ));

    PREFIX_ASSUME(seg != NULL);

    uint8_t*        curr_object = heap_segment_mem (seg);
    uint8_t*        prev_object = 0;
    uint8_t*        begin_youngest = generation_allocation_start(generation_of(0));
    uint8_t*        end_youngest = heap_segment_allocated (ephemeral_heap_segment);
    uint8_t*        next_boundary = generation_allocation_start (generation_of (max_generation - 1));
    int             align_const = get_alignment_constant (FALSE);
    size_t          total_objects_verified = 0;
    size_t          total_objects_verified_deep = 0;

#ifdef BACKGROUND_GC
    BOOL consider_bgc_mark_p    = FALSE;
    BOOL check_current_sweep_p  = FALSE;
    BOOL check_saved_sweep_p    = FALSE;
    should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
#endif //BACKGROUND_GC

#ifdef MULTIPLE_HEAPS
    t_join* current_join = &gc_t_join;
#ifdef BACKGROUND_GC
    if (settings.concurrent && (bgc_thread_id.IsCurrentThread()))
    {
        // We always call verify_heap on entry of GC on the SVR GC threads.
        current_join = &bgc_t_join;
    }
#endif //BACKGROUND_GC
#endif //MULTIPLE_HEAPS

    UNREFERENCED_PARAMETER(begin_gc_p);
#ifdef BACKGROUND_GC
    dprintf (2,("[%s]GC#%d(%s): Verifying heap - begin",
        (begin_gc_p ? "BEG" : "END"),
        VolatileLoad(&settings.gc_index),
        (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC"))));
#else
    dprintf (2,("[%s]GC#%d: Verifying heap - begin",
                (begin_gc_p ? "BEG" : "END"), VolatileLoad(&settings.gc_index)));
#endif //BACKGROUND_GC

#ifndef MULTIPLE_HEAPS
    if ((ephemeral_low != generation_allocation_start (generation_of (max_generation - 1))) ||
        (ephemeral_high != heap_segment_reserved (ephemeral_heap_segment)))
    {
        FATAL_GC_ERROR();
    }
#endif //MULTIPLE_HEAPS

#ifdef BACKGROUND_GC
    //don't touch the memory because the program is allocating from it.
    if (!settings.concurrent)
#endif //BACKGROUND_GC
    {
        if (!(heap_verify_level & GCConfig::HEAPVERIFY_NO_MEM_FILL))
        {
            //uninit the unused portions of segments.
            generation* gen1 = large_object_generation;
            heap_segment* seg1 = heap_segment_rw (generation_start_segment (gen1));
            PREFIX_ASSUME(seg1 != NULL);

            while (1)
            {
                if (seg1)
                {
                    uint8_t* clear_start = heap_segment_allocated (seg1) - plug_skew;
                    if (heap_segment_used (seg1) > clear_start)
                    {
                        dprintf (3, ("setting end of seg %Ix: [%Ix-[%Ix to 0xaa",
                                    heap_segment_mem (seg1),
                                    clear_start ,
                                    heap_segment_used (seg1)));
                        memset (heap_segment_allocated (seg1) - plug_skew, 0xaa,
                            (heap_segment_used (seg1) - clear_start));
                    }
                    seg1 = heap_segment_next_rw (seg1);
                }
                else
                {
                    if (gen1 == large_object_generation)
                    {
                        gen1 = generation_of (max_generation);
                        seg1 = heap_segment_rw (generation_start_segment (gen1));
                        PREFIX_ASSUME(seg1 != NULL);
                    }
                    else
                    {
                        break;
                    }
                }
            }
        }
    }

#ifdef MULTIPLE_HEAPS
    current_join->join(this, gc_join_verify_copy_table);
    if (current_join->joined())
    {
        // in concurrent GC, new segment could be allocated when GC is working so the card brick table might not be updated at this point
        for (int i = 0; i < n_heaps; i++)
        {
            //copy the card and brick tables
            if (g_gc_card_table != g_heaps[i]->card_table)
            {
                g_heaps[i]->copy_brick_card_table();
            }
        }

        current_join->restart();
    }
#else
        if (g_gc_card_table != card_table)
            copy_brick_card_table();
#endif //MULTIPLE_HEAPS

    //verify that the generation structures makes sense
    {
        generation* gen = generation_of (max_generation);

        assert (generation_allocation_start (gen) ==
                heap_segment_mem (heap_segment_rw (generation_start_segment (gen))));
        int gen_num = max_generation-1;
        generation* prev_gen = gen;
        while (gen_num >= 0)
        {
            gen = generation_of (gen_num);
            assert (generation_allocation_segment (gen) == ephemeral_heap_segment);
            assert (generation_allocation_start (gen) >= heap_segment_mem (ephemeral_heap_segment));
            assert (generation_allocation_start (gen) < heap_segment_allocated (ephemeral_heap_segment));

            if (generation_start_segment (prev_gen ) ==
                generation_start_segment (gen))
            {
                assert (generation_allocation_start (prev_gen) <
                        generation_allocation_start (gen));
            }
            prev_gen = gen;
            gen_num--;
        }
    }

    while (1)
    {
        // Handle segment transitions
        if (curr_object >= heap_segment_allocated (seg))
        {
            if (curr_object > heap_segment_allocated(seg))
            {
                dprintf (3, ("Verifiying Heap: curr_object: %Ix > heap_segment_allocated (seg: %Ix)",
                        (size_t)curr_object, (size_t)seg));
                FATAL_GC_ERROR();
            }
            seg = heap_segment_next_in_range (seg);
            if (seg)
            {
#ifdef BACKGROUND_GC
                should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
#endif //BACKGROUND_GC
                curr_object = heap_segment_mem(seg);
                prev_object = 0;
                continue;
            }
            else
            {
                if (curr_gen_num == (max_generation+1))
                {
                    curr_gen_num--;
                    seg = heap_segment_in_range (generation_start_segment (generation_of (curr_gen_num)));

                    PREFIX_ASSUME(seg != NULL);

#ifdef BACKGROUND_GC
                    should_check_bgc_mark (seg, &consider_bgc_mark_p, &check_current_sweep_p, &check_saved_sweep_p);
#endif //BACKGROUND_GC
                    curr_object = heap_segment_mem (seg);
                    prev_object = 0;
                    large_brick_p = FALSE;
                    align_const = get_alignment_constant (TRUE);
                }
                else
                    break;  // Done Verifying Heap -- no more segments
            }
        }

        // Are we at the end of the youngest_generation?
        if (seg == ephemeral_heap_segment)
        {
            if (curr_object >= end_youngest)
            {
                // prev_object length is too long if we hit this int3
                if (curr_object > end_youngest)
                {
                    dprintf (3, ("Verifiying Heap: curr_object: %Ix > end_youngest: %Ix",
                            (size_t)curr_object, (size_t)end_youngest));
                    FATAL_GC_ERROR();
                }
                break;
            }

            if ((curr_object >= next_boundary) && (curr_gen_num > 0))
            {
                curr_gen_num--;
                if (curr_gen_num > 0)
                {
                    next_boundary = generation_allocation_start (generation_of (curr_gen_num - 1));
                }
            }
        }

         //if (is_mark_set (curr_object))
         //{
         //        printf ("curr_object: %Ix is marked!",(size_t)curr_object);
         //        FATAL_GC_ERROR();
         //}

        size_t s = size (curr_object);
        dprintf (3, ("o: %Ix, s: %d", (size_t)curr_object, s));
        if (s == 0)
        {
            dprintf (3, ("Verifying Heap: size of current object %Ix == 0", curr_object));
            FATAL_GC_ERROR();
        }

        // If object is not in the youngest generation, then lets
        // verify that the brick table is correct....
        if (((seg != ephemeral_heap_segment) ||
             (brick_of(curr_object) < brick_of(begin_youngest))))
        {
            curr_brick = brick_of(curr_object);

            // Brick Table Verification...
            //
            // On brick transition
            //     if brick is negative
            //          verify that brick indirects to previous valid brick
            //     else
            //          set current brick invalid flag to be flipped if we
            //          encounter an object at the correct place
            //
            if (curr_brick != prev_brick)
            {
                // If the last brick we were examining had positive
                // entry but we never found the matching object, then
                // we have a problem
                // If prev_brick was the last one of the segment
                // it's ok for it to be invalid because it is never looked at
                if (bCurrentBrickInvalid &&
                    (curr_brick != brick_of (heap_segment_mem (seg))) &&
                    !heap_segment_read_only_p (seg))
                {
                    dprintf (3, ("curr brick %Ix invalid", curr_brick));
                    FATAL_GC_ERROR();
                }

                if (large_brick_p)
                {
                    //large objects verify the table only if they are in
                    //range.
                    if ((heap_segment_reserved (seg) <= highest_address) &&
                        (heap_segment_mem (seg) >= lowest_address) &&
                        brick_table [curr_brick] != 0)
                    {
                        dprintf (3, ("curr_brick %Ix for large object %Ix not set to -32768",
                                curr_brick, (size_t)curr_object));
                        FATAL_GC_ERROR();
                    }
                    else
                    {
                        bCurrentBrickInvalid = FALSE;
                    }
                }
                else
                {
                    // If the current brick contains a negative value make sure
                    // that the indirection terminates at the last  valid brick
                    if (brick_table [curr_brick] <= 0)
                    {
                        if (brick_table [curr_brick] == 0)
                        {
                            dprintf(3, ("curr_brick %Ix for object %Ix set to 0",
                                    curr_brick, (size_t)curr_object));
                            FATAL_GC_ERROR();
                        }
                        ptrdiff_t i = curr_brick;
                        while ((i >= ((ptrdiff_t) brick_of (heap_segment_mem (seg)))) &&
                               (brick_table[i] < 0))
                        {
                            i = i + brick_table[i];
                        }
                        if (i <  ((ptrdiff_t)(brick_of (heap_segment_mem (seg))) - 1))
                        {
                            dprintf (3, ("ptrdiff i: %Ix < brick_of (heap_segment_mem (seg)):%Ix - 1. curr_brick: %Ix",
                                    i, brick_of (heap_segment_mem (seg)),
                                    curr_brick));
                            FATAL_GC_ERROR();
                        }
                        // if (i != last_valid_brick)
                        //  FATAL_GC_ERROR();
                        bCurrentBrickInvalid = FALSE;
                    }
                    else if (!heap_segment_read_only_p (seg))
                    {
                        bCurrentBrickInvalid = TRUE;
                    }
                }
            }

            if (bCurrentBrickInvalid)
            {
                if (curr_object == (brick_address(curr_brick) + brick_table[curr_brick] - 1))
                {
                    bCurrentBrickInvalid = FALSE;
                    last_valid_brick = curr_brick;
                }
            }
        }

        if (*((uint8_t**)curr_object) != (uint8_t *) g_gc_pFreeObjectMethodTable)
        {
#ifdef FEATURE_LOH_COMPACTION
            if ((curr_gen_num == (max_generation+1)) && (prev_object != 0))
            {
                assert (method_table (prev_object) == g_gc_pFreeObjectMethodTable);
            }
#endif //FEATURE_LOH_COMPACTION

            total_objects_verified++;

            BOOL can_verify_deep = TRUE;
#ifdef BACKGROUND_GC
            can_verify_deep = fgc_should_consider_object (curr_object, seg, consider_bgc_mark_p, check_current_sweep_p, check_saved_sweep_p);
#endif //BACKGROUND_GC

            BOOL deep_verify_obj = can_verify_deep;
            if ((heap_verify_level & GCConfig::HEAPVERIFY_DEEP_ON_COMPACT) && !settings.compaction)
                deep_verify_obj = FALSE;

            ((CObjectHeader*)curr_object)->ValidateHeap((Object*)curr_object, deep_verify_obj);

            if (can_verify_deep)
            {
                if (curr_gen_num > 0)
                {
                    BOOL need_card_p = FALSE;
                    if (contain_pointers_or_collectible (curr_object))
                    {
                        dprintf (4, ("curr_object: %Ix", (size_t)curr_object));
                        size_t crd = card_of (curr_object);
                        BOOL found_card_p = card_set_p (crd);

#ifdef COLLECTIBLE_CLASS
                        if (is_collectible(curr_object))
                        {
                            uint8_t* class_obj = get_class_object (curr_object);
                            if ((class_obj < ephemeral_high) && (class_obj >= next_boundary))
                            {
                                if (!found_card_p)
                                {
                                    dprintf (3, ("Card not set, curr_object = [%Ix:%Ix pointing to class object %Ix",
                                                card_of (curr_object), (size_t)curr_object, class_obj));

                                    FATAL_GC_ERROR();
                                }
                            }
                        }
#endif //COLLECTIBLE_CLASS

                        if (contain_pointers(curr_object))
                        {
                            go_through_object_nostart
                                (method_table(curr_object), curr_object, s, oo,
                                {
                                    if ((crd != card_of ((uint8_t*)oo)) && !found_card_p)
                                    {
                                        crd = card_of ((uint8_t*)oo);
                                        found_card_p = card_set_p (crd);
                                        need_card_p = FALSE;
                                    }
                                    if ((*oo < ephemeral_high) && (*oo >= next_boundary))
                                    {
                                        need_card_p = TRUE;
                                    }

                                if (need_card_p && !found_card_p)
                                {

                                        dprintf (3, ("Card not set, curr_object = [%Ix:%Ix, %Ix:%Ix[",
                                                    card_of (curr_object), (size_t)curr_object,
                                                    card_of (curr_object+Align(s, align_const)), (size_t)curr_object+Align(s, align_const)));
                                        FATAL_GC_ERROR();
                                    }
                                }
                                    );
                        }
                        if (need_card_p && !found_card_p)
                        {
                            dprintf (3, ("Card not set, curr_object = [%Ix:%Ix, %Ix:%Ix[",
                                    card_of (curr_object), (size_t)curr_object,
                                    card_of (curr_object+Align(s, align_const)), (size_t)curr_object+Align(s, align_const)));
                            FATAL_GC_ERROR();
                        }
                    }
                }
                total_objects_verified_deep++;
            }
        }

        prev_object = curr_object;
        prev_brick = curr_brick;
        curr_object = curr_object + Align(s, align_const);
        if (curr_object < prev_object)
        {
            dprintf (3, ("overflow because of a bad object size: %Ix size %Ix", prev_object, s));
            FATAL_GC_ERROR();
        }
    }

#ifdef BACKGROUND_GC
    dprintf (2, ("(%s)(%s)(%s) total_objects_verified is %Id, total_objects_verified_deep is %Id",
                 (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p () ? "FGC" : "NGC")),
                 (begin_gc_p ? "BEG" : "END"),
                 ((current_c_gc_state == c_gc_state_planning) ? "in plan" : "not in plan"),
                 total_objects_verified, total_objects_verified_deep));
    if (current_c_gc_state != c_gc_state_planning)
    {
        assert (total_objects_verified == total_objects_verified_deep);
    }
#endif //BACKGROUND_GC

    verify_free_lists();

#ifdef FEATURE_PREMORTEM_FINALIZATION
    finalize_queue->CheckFinalizerObjects();
#endif // FEATURE_PREMORTEM_FINALIZATION

    {
        // to be consistent with handle table APIs pass a ScanContext*
        // to provide the heap number.  the SC isn't complete though so
        // limit its scope to handle table verification.
        ScanContext sc;
        sc.thread_number = heap_number;
        GCScan::VerifyHandleTable(max_generation, max_generation, &sc);
    }

#ifdef MULTIPLE_HEAPS
    current_join->join(this, gc_join_verify_objects_done);
    if (current_join->joined())
#endif //MULTIPLE_HEAPS
    {
        GCToEEInterface::VerifySyncTableEntry();
#ifdef MULTIPLE_HEAPS
        current_join->restart();
#endif //MULTIPLE_HEAPS
    }

#ifdef BACKGROUND_GC
    if (!settings.concurrent)
    {
        if (current_c_gc_state == c_gc_state_planning)
        {
            // temporarily commenting this out 'cause an FGC
            // could be triggered before we sweep ephemeral.
            //verify_seg_end_mark_array_cleared();
        }
    }

    if (settings.concurrent)
    {
        verify_mark_array_cleared();
    }
    dprintf (2,("GC%d(%s): Verifying heap - end",
        VolatileLoad(&settings.gc_index),
        (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC"))));
#else
    dprintf (2,("GC#d: Verifying heap - end", VolatileLoad(&settings.gc_index)));
#endif //BACKGROUND_GC
}

#endif  //VERIFY_HEAP


void GCHeap::ValidateObjectMember (Object* obj)
{
#ifdef VERIFY_HEAP
    size_t s = size (obj);
    uint8_t* o = (uint8_t*)obj;

    go_through_object_cl (method_table (obj), o, s, oo,
                                {
                                    uint8_t* child_o = *oo;
                                    if (child_o)
                                    {
                                        dprintf (3, ("VOM: m: %Ix obj %Ix", (size_t)child_o, o));
                                        MethodTable *pMT = method_table (child_o);
                                        assert(pMT);
                                        if (!pMT->SanityCheck()) {
                                            dprintf (3, ("Bad member of %Ix %Ix",
                                                        (size_t)oo, (size_t)child_o));
                                            FATAL_GC_ERROR();
                                        }
                                    }
                                } );
#endif // VERIFY_HEAP
}

void DestructObject (CObjectHeader* hdr)
{
    UNREFERENCED_PARAMETER(hdr); // compiler bug? -- this *is*, indeed, referenced
    hdr->~CObjectHeader();
}

HRESULT GCHeap::Shutdown ()
{
    deleteGCShadow();

    GCScan::GcRuntimeStructuresValid (FALSE);

    // Cannot assert this, since we use SuspendEE as the mechanism to quiesce all
    // threads except the one performing the shutdown.
    // ASSERT( !GcInProgress );

    // Guard against any more GC occurring and against any threads blocking
    // for GC to complete when the GC heap is gone.  This fixes a race condition
    // where a thread in GC is destroyed as part of process destruction and
    // the remaining threads block for GC complete.

    //GCTODO
    //EnterAllocLock();
    //Enter();
    //EnterFinalizeLock();
    //SetGCDone();

    // during shutdown lot of threads are suspended
    // on this even, we don't want to wake them up just yet
    //CloseHandle (WaitForGCEvent);

    //find out if the global card table hasn't been used yet
    uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))];
    if (card_table_refcount (ct) == 0)
    {
        destroy_card_table (ct);
        g_gc_card_table = nullptr;

#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
        g_gc_card_bundle_table = nullptr;
#endif
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
        SoftwareWriteWatch::StaticClose();
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
    }

    //destroy all segments on the standby list
    while(gc_heap::segment_standby_list != 0)
    {
        heap_segment* next_seg = heap_segment_next (gc_heap::segment_standby_list);
#ifdef MULTIPLE_HEAPS
        (gc_heap::g_heaps[0])->delete_heap_segment (gc_heap::segment_standby_list, FALSE);
#else //MULTIPLE_HEAPS
        pGenGCHeap->delete_heap_segment (gc_heap::segment_standby_list, FALSE);
#endif //MULTIPLE_HEAPS
        gc_heap::segment_standby_list = next_seg;
    }


#ifdef MULTIPLE_HEAPS

    for (int i = 0; i < gc_heap::n_heaps; i ++)
    {
        delete gc_heap::g_heaps[i]->vm_heap;
        //destroy pure GC stuff
        gc_heap::destroy_gc_heap (gc_heap::g_heaps[i]);
    }
#else
    gc_heap::destroy_gc_heap (pGenGCHeap);

#endif //MULTIPLE_HEAPS
    gc_heap::shutdown_gc();

    return S_OK;
}

// Wait until a garbage collection is complete
// returns NOERROR if wait was OK, other error code if failure.
// WARNING: This will not undo the must complete state. If you are
// in a must complete when you call this, you'd better know what you're
// doing.

#ifdef FEATURE_PREMORTEM_FINALIZATION
static
HRESULT AllocateCFinalize(CFinalize **pCFinalize)
{
    *pCFinalize = new (nothrow) CFinalize();
    if (*pCFinalize == NULL || !(*pCFinalize)->Initialize())
        return E_OUTOFMEMORY;

    return S_OK;
}
#endif // FEATURE_PREMORTEM_FINALIZATION

// init the instance heap
HRESULT GCHeap::Init(size_t hn)
{
    HRESULT hres = S_OK;

#ifdef MULTIPLE_HEAPS
    if ((pGenGCHeap = gc_heap::make_gc_heap(this, (int)hn)) == 0)
        hres = E_OUTOFMEMORY;
#else
    UNREFERENCED_PARAMETER(hn);
    if (!gc_heap::make_gc_heap())
        hres = E_OUTOFMEMORY;
#endif //MULTIPLE_HEAPS

    // Failed.
    return hres;
}

//System wide initialization
HRESULT GCHeap::Initialize()
{
    HRESULT hr = S_OK;

    qpf = GCToOSInterface::QueryPerformanceFrequency();
    start_time = GetHighPrecisionTimeStamp();

    g_gc_pFreeObjectMethodTable = GCToEEInterface::GetFreeObjectMethodTable();
    g_num_processors = GCToOSInterface::GetTotalProcessorCount();
    assert(g_num_processors != 0);

//Initialize the static members.
#ifdef TRACE_GC
    GcDuration = 0;
    CreatedObjectCount = 0;
#endif //TRACE_GC

    bool is_restricted;
    gc_heap::total_physical_mem = GCToOSInterface::GetPhysicalMemoryLimit (&is_restricted);

#ifdef BIT64
    gc_heap::heap_hard_limit = (size_t)GCConfig::GetGCHeapHardLimit();

    if (!(gc_heap::heap_hard_limit))
    {
        uint32_t percent_of_mem = (uint32_t)GCConfig::GetGCHeapHardLimitPercent();
        if ((percent_of_mem > 0) && (percent_of_mem < 100))
        {
            gc_heap::heap_hard_limit = (size_t)(gc_heap::total_physical_mem * (uint64_t)percent_of_mem / (uint64_t)100);
        }
    }

    // If the hard limit is specified, the user is saying even if the process is already
    // running in a container, use this limit for the GC heap.
    if (!(gc_heap::heap_hard_limit))
    {
        if (is_restricted)
        {
            uint64_t physical_mem_for_gc = gc_heap::total_physical_mem * (uint64_t)75 / (uint64_t)100;
            //printf ("returned physical mem %I64d, setting it to max (75%%: %I64d, 20mb)\n",
            //    gc_heap::total_physical_mem, physical_mem_for_gc);
            gc_heap::heap_hard_limit = (size_t)max ((20 * 1024 * 1024), physical_mem_for_gc);
        }
    }

    //printf ("heap_hard_limit is %Id, total physical mem: %Id, %s restricted\n",
    //    gc_heap::heap_hard_limit, gc_heap::total_physical_mem, (is_restricted ? "is" : "is not"));
#endif //BIT64

    uint32_t nhp = 1;
    uint32_t nhp_from_config = 0;

#ifdef MULTIPLE_HEAPS
    AffinitySet config_affinity_set;
    GCConfigStringHolder cpu_index_ranges_holder(GCConfig::GetGCHeapAffinitizeRanges());

    if (!ParseGCHeapAffinitizeRanges(cpu_index_ranges_holder.Get(), &config_affinity_set))
    {
        return CLR_E_GC_BAD_AFFINITY_CONFIG_FORMAT;
    }

    uintptr_t config_affinity_mask = static_cast<uintptr_t>(GCConfig::GetGCHeapAffinitizeMask());
    const AffinitySet* process_affinity_set = GCToOSInterface::SetGCThreadsAffinitySet(config_affinity_mask, &config_affinity_set);

    if (process_affinity_set->IsEmpty())
    {
        return CLR_E_GC_BAD_AFFINITY_CONFIG;
    }

    if ((cpu_index_ranges_holder.Get() != nullptr)
#ifdef PLATFORM_WINDOWS
        || (config_affinity_mask != 0)
#endif
    )
    {
        affinity_config_specified_p = true;
    }

    nhp_from_config = static_cast<uint32_t>(GCConfig::GetHeapCount());

    g_num_active_processors = GCToOSInterface::GetCurrentProcessCpuCount();

    if (nhp_from_config)
    {
        // Even when the user specifies a heap count, it should not be more
        // than the number of procs this process can use.
        nhp_from_config = min (nhp_from_config, g_num_active_processors);
    }

    nhp = ((nhp_from_config == 0) ? g_num_active_processors : nhp_from_config);

    nhp = min (nhp, MAX_SUPPORTED_CPUS);
#ifndef FEATURE_REDHAWK
    gc_heap::gc_thread_no_affinitize_p = (gc_heap::heap_hard_limit ? !affinity_config_specified_p : (GCConfig::GetNoAffinitize() != 0));

    if (!(gc_heap::gc_thread_no_affinitize_p))
    {
        uint32_t num_affinitized_processors = (uint32_t)process_affinity_set->Count();

        if (num_affinitized_processors != 0)
        {
            nhp = min(nhp, num_affinitized_processors);
        }
#ifndef PLATFORM_WINDOWS
        // Limit the GC heaps to the number of processors available in the system.
        nhp = min (nhp, GCToOSInterface::GetTotalProcessorCount());
#endif // !PLATFORM_WINDOWS
    }
#endif //!FEATURE_REDHAWK
#endif //MULTIPLE_HEAPS

    size_t seg_size = 0;
    size_t large_seg_size = 0;

    if (gc_heap::heap_hard_limit)
    {
        gc_heap::use_large_pages_p = GCConfig::GetGCLargePages();
        seg_size = gc_heap::get_segment_size_hard_limit (&nhp, (nhp_from_config == 0));
        gc_heap::soh_segment_size = seg_size;
        large_seg_size = gc_heap::use_large_pages_p ? seg_size : seg_size * 2;

        if (gc_heap::use_large_pages_p)
            gc_heap::min_segment_size = min_segment_size_hard_limit;
    }
    else
    {
        seg_size = get_valid_segment_size();
        gc_heap::soh_segment_size = seg_size;
        large_seg_size = get_valid_segment_size (TRUE);
    }

    dprintf (1, ("%d heaps, soh seg size: %Id mb, loh: %Id mb\n",
        nhp,
        (seg_size / (size_t)1024 / 1024),
        (large_seg_size / 1024 / 1024)));

    gc_heap::min_loh_segment_size = large_seg_size;
#ifdef SEG_MAPPING_TABLE
    if (gc_heap::min_segment_size == 0)
    {
        gc_heap::min_segment_size = min (seg_size, large_seg_size);
    }
    gc_heap::min_segment_size_shr = index_of_highest_set_bit (gc_heap::min_segment_size);
#endif //SEG_MAPPING_TABLE

#ifdef MULTIPLE_HEAPS
    gc_heap::n_heaps = nhp;
    hr = gc_heap::initialize_gc (seg_size, large_seg_size /*LHEAP_ALLOC*/, nhp);
#else
    hr = gc_heap::initialize_gc (seg_size, large_seg_size /*LHEAP_ALLOC*/);
#endif //MULTIPLE_HEAPS

    if (hr != S_OK)
        return hr;

    gc_heap::mem_one_percent = gc_heap::total_physical_mem / 100;
#ifndef MULTIPLE_HEAPS
    gc_heap::mem_one_percent /= g_num_processors;
#endif //!MULTIPLE_HEAPS

    uint32_t highmem_th_from_config = (uint32_t)GCConfig::GetGCHighMemPercent();
    if (highmem_th_from_config)
    {
        gc_heap::high_memory_load_th = min (99, highmem_th_from_config);
        gc_heap::v_high_memory_load_th = min (99, (highmem_th_from_config + 7));
    }
    else
    {
        // We should only use this if we are in the "many process" mode which really is only applicable
        // to very powerful machines - before that's implemented, temporarily I am only enabling this for 80GB+ memory.
        // For now I am using an estimate to calculate these numbers but this should really be obtained
        // programmatically going forward.
        // I am assuming 47 processes using WKS GC and 3 using SVR GC.
        // I am assuming 3 in part due to the "very high memory load" is 97%.
        int available_mem_th = 10;
        if (gc_heap::total_physical_mem >= ((uint64_t)80 * 1024 * 1024 * 1024))
        {
            int adjusted_available_mem_th = 3 + (int)((float)47 / (float)(GCToOSInterface::GetTotalProcessorCount()));
            available_mem_th = min (available_mem_th, adjusted_available_mem_th);
        }

        gc_heap::high_memory_load_th = 100 - available_mem_th;
        gc_heap::v_high_memory_load_th = 97;
    }

    gc_heap::m_high_memory_load_th = min ((gc_heap::high_memory_load_th + 5), gc_heap::v_high_memory_load_th);

    gc_heap::pm_stress_on = (GCConfig::GetGCProvModeStress() != 0);

#if defined(BIT64)
    gc_heap::youngest_gen_desired_th = gc_heap::mem_one_percent;
#endif // BIT64

    WaitForGCEvent = new (nothrow) GCEvent;

    if (!WaitForGCEvent)
    {
        return E_OUTOFMEMORY;
    }

    if (!WaitForGCEvent->CreateManualEventNoThrow(TRUE))
    {
        return E_FAIL;
    }

#ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
#if defined (STRESS_HEAP) && !defined (MULTIPLE_HEAPS)
    if (GCStress<cfg_any>::IsEnabled())  {
        for(int i = 0; i < GCHeap::NUM_HEAP_STRESS_OBJS; i++)
        {
            m_StressObjs[i] = CreateGlobalHandle(0);
        }
        m_CurStressObj = 0;
    }
#endif //STRESS_HEAP && !MULTIPLE_HEAPS
#endif // FEATURE_REDHAWK

    initGCShadow();         // If we are debugging write barriers, initialize heap shadow

#ifdef MULTIPLE_HEAPS

    for (unsigned i = 0; i < nhp; i++)
    {
        GCHeap* Hp = new (nothrow) GCHeap();
        if (!Hp)
            return E_OUTOFMEMORY;

        if ((hr = Hp->Init (i))!= S_OK)
        {
            return hr;
        }
    }

    heap_select::init_numa_node_to_heap_map (nhp);

    // If we have more active processors than heaps we still want to initialize some of the
    // mapping for the rest of the active processors because user threads can still run on
    // them which means it's important to know their numa nodes and map them to a reasonable
    // heap, ie, we wouldn't want to have all such procs go to heap 0.
    if (g_num_active_processors > nhp)
        heap_select::distribute_other_procs();

    gc_heap* hp = gc_heap::g_heaps[0];

    dynamic_data* gen0_dd = hp->dynamic_data_of (0);
    gc_heap::min_gen0_balance_delta = (dd_min_size (gen0_dd) >> 3);

#ifdef HEAP_BALANCE_INSTRUMENTATION
    cpu_group_enabled_p = GCToOSInterface::CanEnableGCCPUGroups();

    if (!GCToOSInterface::GetNumaInfo (&total_numa_nodes_on_machine, &procs_per_numa_node))
    {
        total_numa_nodes_on_machine = 1;

        // Note that if we are in cpu groups we need to take the way proc index is calculated
        // into consideration. It would mean we have more than 64 procs on one numa node -
        // this is mostly for testing (if we want to simulate no numa on a numa system).
        // see vm\gcenv.os.cpp GroupProcNo implementation.
        if (GCToOSInterface::GetCPUGroupInfo (&total_cpu_groups_on_machine, &procs_per_cpu_group))
            procs_per_numa_node = procs_per_cpu_group + ((total_cpu_groups_on_machine - 1) << 6);
        else
            procs_per_numa_node = g_num_processors;
    }
    hb_info_numa_nodes = new (nothrow) heap_balance_info_numa[total_numa_nodes_on_machine];
    dprintf (HEAP_BALANCE_LOG, ("total: %d, numa: %d", g_num_processors, total_numa_nodes_on_machine));

    int hb_info_size_per_proc = sizeof (heap_balance_info_proc);

    for (int numa_node_index = 0; numa_node_index < total_numa_nodes_on_machine; numa_node_index++)
    {
        int hb_info_size_per_node = hb_info_size_per_proc * procs_per_numa_node;
        uint8_t* numa_mem = (uint8_t*)GCToOSInterface::VirtualReserve (hb_info_size_per_node, 0, 0, numa_node_index);
        if (!numa_mem)
            return E_FAIL;
        if (!GCToOSInterface::VirtualCommit (numa_mem, hb_info_size_per_node, numa_node_index))
            return E_FAIL;

        heap_balance_info_proc* hb_info_procs = (heap_balance_info_proc*)numa_mem;
        hb_info_numa_nodes[numa_node_index].hb_info_procs = hb_info_procs;

        for (int proc_index = 0; proc_index < (int)procs_per_numa_node; proc_index++)
        {
            heap_balance_info_proc* hb_info_proc = &hb_info_procs[proc_index];
            hb_info_proc->count = default_max_hb_heap_balance_info;
            hb_info_proc->index = 0;
        }
    }
#endif //HEAP_BALANCE_INSTRUMENTATION
#else
    hr = Init (0);
#endif //MULTIPLE_HEAPS

    if (hr == S_OK)
    {
        GCScan::GcRuntimeStructuresValid (TRUE);

        GCToEEInterface::DiagUpdateGenerationBounds();
    }

    return hr;
};

////
// GC callback functions
bool GCHeap::IsPromoted(Object* object)
{
#ifdef _DEBUG
    if (object)
    {
        ((CObjectHeader*)object)->Validate();
    }
#endif //_DEBUG

    uint8_t* o = (uint8_t*)object;

    if (gc_heap::settings.condemned_generation == max_generation)
    {
#ifdef MULTIPLE_HEAPS
        gc_heap* hp = gc_heap::g_heaps[0];
#else
        gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS

#ifdef BACKGROUND_GC
        if (gc_heap::settings.concurrent)
        {
            bool is_marked = (!((o < hp->background_saved_highest_address) && (o >= hp->background_saved_lowest_address))||
                            hp->background_marked (o));
            return is_marked;
        }
        else
#endif //BACKGROUND_GC
        {
            return (!((o < hp->highest_address) && (o >= hp->lowest_address))
                    || hp->is_mark_set (o));
        }
    }
    else
    {
        gc_heap* hp = gc_heap::heap_of (o);
        return (!((o < hp->gc_high) && (o >= hp->gc_low))
                || hp->is_mark_set (o));
    }
}

size_t GCHeap::GetPromotedBytes(int heap_index)
{
#ifdef BACKGROUND_GC
    if (gc_heap::settings.concurrent)
    {
        return gc_heap::bpromoted_bytes (heap_index);
    }
    else
#endif //BACKGROUND_GC
    {
        return gc_heap::promoted_bytes (heap_index);
    }
}

void GCHeap::SetYieldProcessorScalingFactor (float scalingFactor)
{
    assert (yp_spin_count_unit != 0);
    int saved_yp_spin_count_unit = yp_spin_count_unit;
    yp_spin_count_unit = (int)((float)yp_spin_count_unit * scalingFactor / (float)9);

    // It's very suspicious if it becomes 0
    if (yp_spin_count_unit == 0)
    {
        yp_spin_count_unit = saved_yp_spin_count_unit;
    }
}

unsigned int GCHeap::WhichGeneration (Object* object)
{
    gc_heap* hp = gc_heap::heap_of ((uint8_t*)object);
    unsigned int g = hp->object_gennum ((uint8_t*)object);
    dprintf (3, ("%Ix is in gen %d", (size_t)object, g));
    return g;
}

bool GCHeap::IsEphemeral (Object* object)
{
    uint8_t* o = (uint8_t*)object;
    gc_heap* hp = gc_heap::heap_of (o);
    return !!hp->ephemeral_pointer_p (o);
}

// Return NULL if can't find next object. When EE is not suspended,
// the result is not accurate: if the input arg is in gen0, the function could
// return zeroed out memory as next object
Object * GCHeap::NextObj (Object * object)
{
#ifdef VERIFY_HEAP
    uint8_t* o = (uint8_t*)object;

#ifndef FEATURE_BASICFREEZE
    if (!((o < g_gc_highest_address) && (o >= g_gc_lowest_address)))
    {
        return NULL;
    }
#endif //!FEATURE_BASICFREEZE

    heap_segment * hs = gc_heap::find_segment (o, FALSE);
    if (!hs)
    {
        return NULL;
    }

    BOOL large_object_p = heap_segment_loh_p (hs);
    if (large_object_p)
        return NULL; //could be racing with another core allocating.
#ifdef MULTIPLE_HEAPS
    gc_heap* hp = heap_segment_heap (hs);
#else //MULTIPLE_HEAPS
    gc_heap* hp = 0;
#endif //MULTIPLE_HEAPS
    unsigned int g = hp->object_gennum ((uint8_t*)object);
    if ((g == 0) && hp->settings.demotion)
        return NULL;//could be racing with another core allocating.
    int align_const = get_alignment_constant (!large_object_p);
    uint8_t* nextobj = o + Align (size (o), align_const);
    if (nextobj <= o) // either overflow or 0 sized object.
    {
        return NULL;
    }

    if ((nextobj < heap_segment_mem(hs)) ||
        (nextobj >= heap_segment_allocated(hs) && hs != hp->ephemeral_heap_segment) ||
        (nextobj >= hp->alloc_allocated))
    {
        return NULL;
    }

    return (Object *)nextobj;
#else
    return nullptr;
#endif // VERIFY_HEAP
}

// returns TRUE if the pointer is in one of the GC heaps.
bool GCHeap::IsHeapPointer (void* vpObject, bool small_heap_only)
{
    // removed STATIC_CONTRACT_CAN_TAKE_LOCK here because find_segment
    // no longer calls GCEvent::Wait which eventually takes a lock.

    uint8_t* object = (uint8_t*) vpObject;
#ifndef FEATURE_BASICFREEZE
    if (!((object < g_gc_highest_address) && (object >= g_gc_lowest_address)))
        return FALSE;
#endif //!FEATURE_BASICFREEZE

    heap_segment * hs = gc_heap::find_segment (object, small_heap_only);
    return !!hs;
}

#ifdef STRESS_PINNING
static n_promote = 0;
#endif //STRESS_PINNING
// promote an object
void GCHeap::Promote(Object** ppObject, ScanContext* sc, uint32_t flags)
{
    THREAD_NUMBER_FROM_CONTEXT;
#ifndef MULTIPLE_HEAPS
    const int thread = 0;
#endif //!MULTIPLE_HEAPS

    uint8_t* o = (uint8_t*)*ppObject;

    if (o == 0)
        return;

#ifdef DEBUG_DestroyedHandleValue
    // we can race with destroy handle during concurrent scan
    if (o == (uint8_t*)DEBUG_DestroyedHandleValue)
        return;
#endif //DEBUG_DestroyedHandleValue

    HEAP_FROM_THREAD;

    gc_heap* hp = gc_heap::heap_of (o);

    dprintf (3, ("Promote %Ix", (size_t)o));

#ifdef INTERIOR_POINTERS
    if (flags & GC_CALL_INTERIOR)
    {
        if ((o < hp->gc_low) || (o >= hp->gc_high))
        {
            return;
        }
        if ( (o = hp->find_object (o, hp->gc_low)) == 0)
        {
            return;
        }

    }
#endif //INTERIOR_POINTERS

#ifdef FEATURE_CONSERVATIVE_GC
    // For conservative GC, a value on stack may point to middle of a free object.
    // In this case, we don't need to promote the pointer.
    if (GCConfig::GetConservativeGC()
        && ((CObjectHeader*)o)->IsFree())
    {
        return;
    }
#endif

#ifdef _DEBUG
    ((CObjectHeader*)o)->ValidatePromote(sc, flags);
#else
    UNREFERENCED_PARAMETER(sc);
#endif //_DEBUG

    if (flags & GC_CALL_PINNED)
        hp->pin_object (o, (uint8_t**) ppObject, hp->gc_low, hp->gc_high);

#ifdef STRESS_PINNING
    if ((++n_promote % 20) == 1)
            hp->pin_object (o, (uint8_t**) ppObject, hp->gc_low, hp->gc_high);
#endif //STRESS_PINNING

    if ((o >= hp->gc_low) && (o < hp->gc_high))
    {
        hpt->mark_object_simple (&o THREAD_NUMBER_ARG);
    }

    STRESS_LOG_ROOT_PROMOTE(ppObject, o, o ? header(o)->GetMethodTable() : NULL);
}

void GCHeap::Relocate (Object** ppObject, ScanContext* sc,
                       uint32_t flags)
{
    UNREFERENCED_PARAMETER(sc);

    uint8_t* object = (uint8_t*)(Object*)(*ppObject);

    THREAD_NUMBER_FROM_CONTEXT;

    //dprintf (3, ("Relocate location %Ix\n", (size_t)ppObject));
    dprintf (3, ("R: %Ix", (size_t)ppObject));

    if (object == 0)
        return;

    gc_heap* hp = gc_heap::heap_of (object);

#ifdef _DEBUG
    if (!(flags & GC_CALL_INTERIOR))
    {
        // We cannot validate this object if it's in the condemned gen because it could
        // be one of the objects that were overwritten by an artificial gap due to a pinned plug.
        if (!((object >= hp->gc_low) && (object < hp->gc_high)))
        {
            ((CObjectHeader*)object)->Validate(FALSE);
        }
    }
#endif //_DEBUG

    dprintf (3, ("Relocate %Ix\n", (size_t)object));

    uint8_t* pheader;

    if ((flags & GC_CALL_INTERIOR) && gc_heap::settings.loh_compaction)
    {
        if (!((object >= hp->gc_low) && (object < hp->gc_high)))
        {
            return;
        }

        if (gc_heap::loh_object_p (object))
        {
            pheader = hp->find_object (object, 0);
            if (pheader == 0)
            {
                return;
            }

            ptrdiff_t ref_offset = object - pheader;
            hp->relocate_address(&pheader THREAD_NUMBER_ARG);
            *ppObject = (Object*)(pheader + ref_offset);
            return;
        }
    }

    {
        pheader = object;
        hp->relocate_address(&pheader THREAD_NUMBER_ARG);
        *ppObject = (Object*)pheader;
    }

    STRESS_LOG_ROOT_RELOCATE(ppObject, object, pheader, ((!(flags & GC_CALL_INTERIOR)) ? ((Object*)object)->GetGCSafeMethodTable() : 0));
}

/*static*/ bool GCHeap::IsObjectInFixedHeap(Object *pObj)
{
    // For now we simply look at the size of the object to determine if it in the
    // fixed heap or not. If the bit indicating this gets set at some point
    // we should key off that instead.
    return size( pObj ) >= loh_size_threshold;
}

#ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way
#ifdef STRESS_HEAP

void StressHeapDummy ();

static int32_t GCStressStartCount = -1;
static int32_t GCStressCurCount = 0;
static int32_t GCStressStartAtJit = -1;

// the maximum number of foreground GCs we'll induce during one BGC
// (this number does not include "naturally" occurring GCs).
static int32_t GCStressMaxFGCsPerBGC = -1;

// CLRRandom implementation can produce FPU exceptions if
// the test/application run by CLR is enabling any FPU exceptions.
// We want to avoid any unexpected exception coming from stress
// infrastructure, so CLRRandom is not an option.
// The code below is a replicate of CRT rand() implementation.
// Using CRT rand() is not an option because we will interfere with the user application
// that may also use it.
int StressRNG(int iMaxValue)
{
    static BOOL bisRandInit = FALSE;
    static int lHoldrand = 1L;

    if (!bisRandInit)
    {
        lHoldrand = (int)time(NULL);
        bisRandInit = TRUE;
    }
    int randValue = (((lHoldrand = lHoldrand * 214013L + 2531011L) >> 16) & 0x7fff);
    return randValue % iMaxValue;
}
#endif // STRESS_HEAP
#endif // !FEATURE_REDHAWK

// free up object so that things will move and then do a GC
//return TRUE if GC actually happens, otherwise FALSE
bool GCHeap::StressHeap(gc_alloc_context * context)
{
#if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
    alloc_context* acontext = static_cast<alloc_context*>(context);
    assert(context != nullptr);

    // if GC stress was dynamically disabled during this run we return FALSE
    if (!GCStressPolicy::IsEnabled())
        return FALSE;

#ifdef _DEBUG
    if (g_pConfig->FastGCStressLevel() && !GCToEEInterface::GetThread()->StressHeapIsEnabled()) {
        return FALSE;
    }

#endif //_DEBUG

    if ((g_pConfig->GetGCStressLevel() & EEConfig::GCSTRESS_UNIQUE)
#ifdef _DEBUG
        || g_pConfig->FastGCStressLevel() > 1
#endif //_DEBUG
        ) {
        if (!Thread::UniqueStack(&acontext)) {
            return FALSE;
        }
    }

#ifdef BACKGROUND_GC
        // don't trigger a GC from the GC threads but still trigger GCs from user threads.
        if (GCToEEInterface::WasCurrentThreadCreatedByGC())
        {
            return FALSE;
        }
#endif //BACKGROUND_GC

        if (GCStressStartAtJit == -1 || GCStressStartCount == -1)
        {
            GCStressStartCount = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_GCStressStart);
            GCStressStartAtJit = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GCStressStartAtJit);
        }

        if (GCStressMaxFGCsPerBGC == -1)
        {
            GCStressMaxFGCsPerBGC = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GCStressMaxFGCsPerBGC);
            if (g_pConfig->IsGCStressMix() && GCStressMaxFGCsPerBGC == -1)
                GCStressMaxFGCsPerBGC = 6;
        }

#ifdef _DEBUG
        if (g_JitCount < GCStressStartAtJit)
            return FALSE;
#endif //_DEBUG

        // Allow programmer to skip the first N Stress GCs so that you can
        // get to the interesting ones faster.
        Interlocked::Increment(&GCStressCurCount);
        if (GCStressCurCount < GCStressStartCount)
            return FALSE;

        // throttle the number of stress-induced GCs by a factor given by GCStressStep
        if ((GCStressCurCount % g_pConfig->GetGCStressStep()) != 0)
        {
            return FALSE;
        }

#ifdef BACKGROUND_GC
        if (IsConcurrentGCEnabled() && IsConcurrentGCInProgress())
        {
            // allow a maximum number of stress induced FGCs during one BGC
            if (gc_stress_fgcs_in_bgc >= GCStressMaxFGCsPerBGC)
                return FALSE;
            ++gc_stress_fgcs_in_bgc;
        }
#endif // BACKGROUND_GC

    if (g_pStringClass == 0)
    {
        // If the String class has not been loaded, dont do any stressing. This should
        // be kept to a minimum to get as complete coverage as possible.
        _ASSERTE(g_fEEInit);
        return FALSE;
    }

#ifndef MULTIPLE_HEAPS
    static int32_t OneAtATime = -1;

    // Only bother with this if the stress level is big enough and if nobody else is
    // doing it right now.  Note that some callers are inside the AllocLock and are
    // guaranteed synchronized.  But others are using AllocationContexts and have no
    // particular synchronization.
    //
    // For this latter case, we want a very high-speed way of limiting this to one
    // at a time.  A secondary advantage is that we release part of our StressObjs
    // buffer sparingly but just as effectively.

    if (Interlocked::Increment(&OneAtATime) == 0 &&
        !TrackAllocations()) // Messing with object sizes can confuse the profiler (see ICorProfilerInfo::GetObjectSize)
    {
        StringObject* str;

        // If the current string is used up
        if (HndFetchHandle(m_StressObjs[m_CurStressObj]) == 0)
        {
            // Populate handles with strings
            int i = m_CurStressObj;
            while(HndFetchHandle(m_StressObjs[i]) == 0)
            {
                _ASSERTE(m_StressObjs[i] != 0);
                unsigned strLen = ((unsigned)loh_size_threshold - 32) / sizeof(WCHAR);
                unsigned strSize = PtrAlign(StringObject::GetSize(strLen));

                // update the cached type handle before allocating
                SetTypeHandleOnThreadForAlloc(TypeHandle(g_pStringClass));
                str = (StringObject*) pGenGCHeap->allocate (strSize, acontext, /*flags*/ 0);
                if (str)
                {
                    str->SetMethodTable (g_pStringClass);
                    str->SetStringLength (strLen);
                    HndAssignHandle(m_StressObjs[i], ObjectToOBJECTREF(str));
                }
                i = (i + 1) % NUM_HEAP_STRESS_OBJS;
                if (i == m_CurStressObj) break;
            }

            // advance the current handle to the next string
            m_CurStressObj = (m_CurStressObj + 1) % NUM_HEAP_STRESS_OBJS;
        }

        // Get the current string
        str = (StringObject*) OBJECTREFToObject(HndFetchHandle(m_StressObjs[m_CurStressObj]));
        if (str)
        {
            // Chop off the end of the string and form a new object out of it.
            // This will 'free' an object at the beginning of the heap, which will
            // force data movement.  Note that we can only do this so many times.
            // before we have to move on to the next string.
            unsigned sizeOfNewObj = (unsigned)Align(min_obj_size * 31);
            if (str->GetStringLength() > sizeOfNewObj / sizeof(WCHAR))
            {
                unsigned sizeToNextObj = (unsigned)Align(size(str));
                uint8_t* freeObj = ((uint8_t*) str) + sizeToNextObj - sizeOfNewObj;
                pGenGCHeap->make_unused_array (freeObj, sizeOfNewObj);
                str->SetStringLength(str->GetStringLength() - (sizeOfNewObj / sizeof(WCHAR)));
            }
            else
            {
                // Let the string itself become garbage.
                // will be realloced next time around
                HndAssignHandle(m_StressObjs[m_CurStressObj], 0);
            }
        }
    }
    Interlocked::Decrement(&OneAtATime);
#endif // !MULTIPLE_HEAPS
    if (IsConcurrentGCEnabled())
    {
        int rgen = StressRNG(10);

        // gen0:gen1:gen2 distribution: 40:40:20
        if (rgen >= 8)
            rgen = 2;
        else if (rgen >= 4)
            rgen = 1;
    else
            rgen = 0;

        GarbageCollectTry (rgen, FALSE, collection_gcstress);
    }
    else
    {
        GarbageCollect(max_generation, FALSE, collection_gcstress);
    }

    return TRUE;
#else
    UNREFERENCED_PARAMETER(context);
    return FALSE;
#endif // defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
}


#ifdef FEATURE_PREMORTEM_FINALIZATION
#define REGISTER_FOR_FINALIZATION(_object, _size) \
    hp->finalize_queue->RegisterForFinalization (0, (_object), (_size))
#else // FEATURE_PREMORTEM_FINALIZATION
#define REGISTER_FOR_FINALIZATION(_object, _size) true
#endif // FEATURE_PREMORTEM_FINALIZATION

#define CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(_object, _size, _register) do {  \
    if ((_object) == NULL || ((_register) && !REGISTER_FOR_FINALIZATION(_object, _size)))   \
    {                                                                                       \
        STRESS_LOG_OOM_STACK(_size);                                                        \
        return NULL;                                                                        \
    }                                                                                       \
} while (false)

//
// Small Object Allocator
//
//
// Allocate small object with an alignment requirement of 8-bytes.
Object*
GCHeap::AllocAlign8(gc_alloc_context* ctx, size_t size, uint32_t flags )
{
#ifdef FEATURE_64BIT_ALIGNMENT
    CONTRACTL {
        NOTHROW;
        GC_TRIGGERS;
    } CONTRACTL_END;

    alloc_context* acontext = static_cast<alloc_context*>(ctx);

#ifdef MULTIPLE_HEAPS
    if (acontext->get_alloc_heap() == 0)
    {
        AssignHeap (acontext);
        assert (acontext->get_alloc_heap());
    }

    gc_heap* hp = acontext->get_alloc_heap()->pGenGCHeap;
#else
    gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS

    return AllocAlign8Common(hp, acontext, size, flags);
#else
    UNREFERENCED_PARAMETER(ctx);
    UNREFERENCED_PARAMETER(size);
    UNREFERENCED_PARAMETER(flags);
    assert(!"should not call GCHeap::AllocAlign8 without FEATURE_64BIT_ALIGNMENT defined!");
    return nullptr;
#endif  //FEATURE_64BIT_ALIGNMENT
}

// Common code used by both variants of AllocAlign8 above.
Object*
GCHeap::AllocAlign8Common(void* _hp, alloc_context* acontext, size_t size, uint32_t flags)
{
#ifdef FEATURE_64BIT_ALIGNMENT
    CONTRACTL {
        NOTHROW;
        GC_TRIGGERS;
    } CONTRACTL_END;

    gc_heap* hp = (gc_heap*)_hp;

    TRIGGERSGC();

    Object* newAlloc = NULL;

#ifdef TRACE_GC
#ifdef COUNT_CYCLES
    AllocStart = GetCycleCount32();
    unsigned finish;
#elif defined(ENABLE_INSTRUMENTATION)
    unsigned AllocStart = GetInstLogTime();
    unsigned finish;
#endif //COUNT_CYCLES
#endif //TRACE_GC

    if (size < loh_size_threshold)
    {
#ifdef TRACE_GC
        AllocSmallCount++;
#endif //TRACE_GC

        // Depending on where in the object the payload requiring 8-byte alignment resides we might have to
        // align the object header on an 8-byte boundary or midway between two such boundaries. The unaligned
        // case is indicated to the GC via the GC_ALLOC_ALIGN8_BIAS flag.
        size_t desiredAlignment = (flags & GC_ALLOC_ALIGN8_BIAS) ? 4 : 0;

        // Retrieve the address of the next allocation from the context (note that we're inside the alloc
        // lock at this point).
        uint8_t*  result = acontext->alloc_ptr;

        // Will an allocation at this point yield the correct alignment and fit into the remainder of the
        // context?
        if ((((size_t)result & 7) == desiredAlignment) && ((result + size) <= acontext->alloc_limit))
        {
            // Yes, we can just go ahead and make the allocation.
            newAlloc = (Object*) hp->allocate (size, acontext, flags);
            ASSERT(((size_t)newAlloc & 7) == desiredAlignment);
        }
        else
        {
            // No, either the next available address is not aligned in the way we require it or there's
            // not enough space to allocate an object of the required size. In both cases we allocate a
            // padding object (marked as a free object). This object's size is such that it will reverse
            // the alignment of the next header (asserted below).
            //
            // We allocate both together then decide based on the result whether we'll format the space as
            // free object + real object or real object + free object.
            ASSERT((Align(min_obj_size) & 7) == 4);
            CObjectHeader *freeobj = (CObjectHeader*) hp->allocate (Align(size) + Align(min_obj_size), acontext, flags);
            if (freeobj)
            {
                if (((size_t)freeobj & 7) == desiredAlignment)
                {
                    // New allocation has desired alignment, return this one and place the free object at the
                    // end of the allocated space.
                    newAlloc = (Object*)freeobj;
                    freeobj = (CObjectHeader*)((uint8_t*)freeobj + Align(size));
                }
                else
                {
                    // New allocation is still mis-aligned, format the initial space as a free object and the
                    // rest of the space should be correctly aligned for the real object.
                    newAlloc = (Object*)((uint8_t*)freeobj + Align(min_obj_size));
                    ASSERT(((size_t)newAlloc & 7) == desiredAlignment);
                    if (flags & GC_ALLOC_ZEROING_OPTIONAL)
                    {
                        // clean the syncblock of the aligned object.
                        *(((PTR_PTR)newAlloc)-1) = 0;
                    }
                }
                freeobj->SetFree(min_obj_size);
            }
        }
    }
    else
    {
        // The LOH always guarantees at least 8-byte alignment, regardless of platform. Moreover it doesn't
        // support mis-aligned object headers so we can't support biased headers as above. Luckily for us
        // we've managed to arrange things so the only case where we see a bias is for boxed value types and
        // these can never get large enough to be allocated on the LOH.
        ASSERT(65536 < loh_size_threshold);
        ASSERT((flags & GC_ALLOC_ALIGN8_BIAS) == 0);

        alloc_context* acontext = generation_alloc_context (hp->generation_of (max_generation+1));

        newAlloc = (Object*) hp->allocate_large_object (size, flags, acontext->alloc_bytes_loh);
        ASSERT(((size_t)newAlloc & 7) == 0);
    }

    CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE);

#ifdef TRACE_GC
#ifdef COUNT_CYCLES
    finish = GetCycleCount32();
#elif defined(ENABLE_INSTRUMENTATION)
    finish = GetInstLogTime();
#endif //COUNT_CYCLES
    AllocDuration += finish - AllocStart;
    AllocCount++;
#endif //TRACE_GC
    return newAlloc;
#else
    UNREFERENCED_PARAMETER(_hp);
    UNREFERENCED_PARAMETER(acontext);
    UNREFERENCED_PARAMETER(size);
    UNREFERENCED_PARAMETER(flags);
    assert(!"Should not call GCHeap::AllocAlign8Common without FEATURE_64BIT_ALIGNMENT defined!");
    return nullptr;
#endif // FEATURE_64BIT_ALIGNMENT
}

Object *
GCHeap::AllocLHeap( size_t size, uint32_t flags REQD_ALIGN_DCL)
{
    CONTRACTL {
        NOTHROW;
        GC_TRIGGERS;
    } CONTRACTL_END;

    TRIGGERSGC();

    Object* newAlloc = NULL;

#ifdef TRACE_GC
#ifdef COUNT_CYCLES
    AllocStart = GetCycleCount32();
    unsigned finish;
#elif defined(ENABLE_INSTRUMENTATION)
    unsigned AllocStart = GetInstLogTime();
    unsigned finish;
#endif //COUNT_CYCLES
#endif //TRACE_GC

#ifdef MULTIPLE_HEAPS
    //take the first heap....
    gc_heap* hp = gc_heap::g_heaps[0];
#else
    gc_heap* hp = pGenGCHeap;
#ifdef _PREFAST_
    // prefix complains about us dereferencing hp in wks build even though we only access static members
    // this way. not sure how to shut it up except for this ugly workaround:
    PREFIX_ASSUME(hp != NULL);
#endif //_PREFAST_
#endif //MULTIPLE_HEAPS

    alloc_context* acontext = generation_alloc_context (hp->generation_of (max_generation+1));

    newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), flags, acontext->alloc_bytes_loh);
#ifdef FEATURE_STRUCTALIGN
    newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size);
#endif // FEATURE_STRUCTALIGN
    CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE);

#ifdef TRACE_GC
#ifdef COUNT_CYCLES
    finish = GetCycleCount32();
#elif defined(ENABLE_INSTRUMENTATION)
    finish = GetInstLogTime();
    AllocDuration += finish - AllocStart;
    AllocCount++;
#endif //COUNT_CYCLES
#endif //TRACE_GC
    return newAlloc;
}

Object*
GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_DCL)
{
    CONTRACTL {
        NOTHROW;
        GC_TRIGGERS;
    } CONTRACTL_END;

    TRIGGERSGC();

    Object* newAlloc = NULL;
    alloc_context* acontext = static_cast<alloc_context*>(context);

#ifdef TRACE_GC
#ifdef COUNT_CYCLES
    AllocStart = GetCycleCount32();
    unsigned finish;
#elif defined(ENABLE_INSTRUMENTATION)
    unsigned AllocStart = GetInstLogTime();
    unsigned finish;
#endif //COUNT_CYCLES
#endif //TRACE_GC

#ifdef MULTIPLE_HEAPS
    if (acontext->get_alloc_heap() == 0)
    {
        AssignHeap (acontext);
        assert (acontext->get_alloc_heap());
    }
    gc_heap* hp = acontext->get_alloc_heap()->pGenGCHeap;
#else
    gc_heap* hp = pGenGCHeap;
#ifdef _PREFAST_
    // prefix complains about us dereferencing hp in wks build even though we only access static members
    // this way. not sure how to shut it up except for this ugly workaround:
    PREFIX_ASSUME(hp != NULL);
#endif //_PREFAST_
#endif //MULTIPLE_HEAPS

    if (size < loh_size_threshold)
    {

#ifdef TRACE_GC
        AllocSmallCount++;
#endif //TRACE_GC
        newAlloc = (Object*) hp->allocate (size + ComputeMaxStructAlignPad(requiredAlignment), acontext, flags);
#ifdef FEATURE_STRUCTALIGN
        newAlloc = (Object*) hp->pad_for_alignment ((uint8_t*) newAlloc, requiredAlignment, size, acontext);
#endif // FEATURE_STRUCTALIGN
//        ASSERT (newAlloc);
    }
    else
    {
        newAlloc = (Object*) hp->allocate_large_object (size + ComputeMaxStructAlignPadLarge(requiredAlignment), flags, acontext->alloc_bytes_loh);
#ifdef FEATURE_STRUCTALIGN
        newAlloc = (Object*) hp->pad_for_alignment_large ((uint8_t*) newAlloc, requiredAlignment, size);
#endif // FEATURE_STRUCTALIGN
    }

    CHECK_ALLOC_AND_POSSIBLY_REGISTER_FOR_FINALIZATION(newAlloc, size, flags & GC_ALLOC_FINALIZE);

#ifdef TRACE_GC
#ifdef COUNT_CYCLES
    finish = GetCycleCount32();
#elif defined(ENABLE_INSTRUMENTATION)
    finish = GetInstLogTime();
#endif //COUNT_CYCLES
    AllocDuration += finish - AllocStart;
    AllocCount++;
#endif //TRACE_GC
    return newAlloc;
}

void
GCHeap::FixAllocContext (gc_alloc_context* context, void* arg, void *heap)
{
    alloc_context* acontext = static_cast<alloc_context*>(context);
#ifdef MULTIPLE_HEAPS

    if (arg != 0)
        acontext->alloc_count = 0;

    uint8_t * alloc_ptr = acontext->alloc_ptr;

    if (!alloc_ptr)
        return;

    // The acontext->alloc_heap can be out of sync with the ptrs because
    // of heap re-assignment in allocate
    gc_heap* hp = gc_heap::heap_of (alloc_ptr);
#else
    gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS

    if (heap == NULL || heap == hp)
    {
        hp->fix_allocation_context (acontext, ((arg != 0)? TRUE : FALSE),
                                    get_alignment_constant(TRUE));
    }
}

Object*
GCHeap::GetContainingObject (void *pInteriorPtr, bool fCollectedGenOnly)
{
    uint8_t *o = (uint8_t*)pInteriorPtr;

    gc_heap* hp = gc_heap::heap_of (o);

    uint8_t* lowest = (fCollectedGenOnly ? hp->gc_low : hp->lowest_address);
    uint8_t* highest = (fCollectedGenOnly ? hp->gc_high : hp->highest_address);

    if (o >= lowest && o < highest)
    {
        o = hp->find_object (o, lowest);
    }
    else
    {
        o = NULL;
    }

    return (Object *)o;
}

BOOL should_collect_optimized (dynamic_data* dd, BOOL low_memory_p)
{
    if (dd_new_allocation (dd) < 0)
    {
        return TRUE;
    }

    if (((float)(dd_new_allocation (dd)) / (float)dd_desired_allocation (dd)) < (low_memory_p ? 0.7 : 0.3))
    {
        return TRUE;
    }

    return FALSE;
}

//----------------------------------------------------------------------------
// #GarbageCollector
//
//  API to ensure that a complete new garbage collection takes place
//
HRESULT
GCHeap::GarbageCollect (int generation, bool low_memory_p, int mode)
{
#if defined(BIT64)
    if (low_memory_p)
    {
        size_t total_allocated = 0;
        size_t total_desired = 0;
#ifdef MULTIPLE_HEAPS
        int hn = 0;
        for (hn = 0; hn < gc_heap::n_heaps; hn++)
        {
            gc_heap* hp = gc_heap::g_heaps [hn];
            total_desired += dd_desired_allocation (hp->dynamic_data_of (0));
            total_allocated += dd_desired_allocation (hp->dynamic_data_of (0))-
                dd_new_allocation (hp->dynamic_data_of (0));
        }
#else
        gc_heap* hp = pGenGCHeap;
        total_desired = dd_desired_allocation (hp->dynamic_data_of (0));
        total_allocated = dd_desired_allocation (hp->dynamic_data_of (0))-
            dd_new_allocation (hp->dynamic_data_of (0));
#endif //MULTIPLE_HEAPS

        if ((total_desired > gc_heap::mem_one_percent) && (total_allocated < gc_heap::mem_one_percent))
        {
            dprintf (2, ("Async low mem but we've only allocated %d (< 10%% of physical mem) out of %d, returning",
                         total_allocated, total_desired));

            return S_OK;
        }
    }
#endif // BIT64

#ifdef MULTIPLE_HEAPS
    gc_heap* hpt = gc_heap::g_heaps[0];
#else
    gc_heap* hpt = 0;
#endif //MULTIPLE_HEAPS

    generation = (generation < 0) ? max_generation : min (generation, max_generation);
    dynamic_data* dd = hpt->dynamic_data_of (generation);

#ifdef BACKGROUND_GC
    if (recursive_gc_sync::background_running_p())
    {
        if ((mode == collection_optimized) || (mode & collection_non_blocking))
        {
            return S_OK;
        }
        if (mode & collection_blocking)
        {
            pGenGCHeap->background_gc_wait();
            if (mode & collection_optimized)
            {
                return S_OK;
            }
        }
    }
#endif //BACKGROUND_GC

    if (mode & collection_optimized)
    {
        if (pGenGCHeap->gc_started)
        {
            return S_OK;
        }
        else
        {
            BOOL should_collect = FALSE;
            BOOL should_check_loh = (generation == max_generation);
#ifdef MULTIPLE_HEAPS
            for (int i = 0; i < gc_heap::n_heaps; i++)
            {
                dynamic_data* dd1 = gc_heap::g_heaps [i]->dynamic_data_of (generation);
                dynamic_data* dd2 = (should_check_loh ?
                                     (gc_heap::g_heaps [i]->dynamic_data_of (max_generation + 1)) :
                                     0);

                if (should_collect_optimized (dd1, low_memory_p))
                {
                    should_collect = TRUE;
                    break;
                }
                if (dd2 && should_collect_optimized (dd2, low_memory_p))
                {
                    should_collect = TRUE;
                    break;
                }
            }
#else
            should_collect = should_collect_optimized (dd, low_memory_p);
            if (!should_collect && should_check_loh)
            {
                should_collect =
                    should_collect_optimized (hpt->dynamic_data_of (max_generation + 1), low_memory_p);
            }
#endif //MULTIPLE_HEAPS
            if (!should_collect)
            {
                return S_OK;
            }
        }
    }

    size_t CollectionCountAtEntry = dd_collection_count (dd);
    size_t BlockingCollectionCountAtEntry = gc_heap::full_gc_counts[gc_type_blocking];
    size_t CurrentCollectionCount = 0;

retry:

    CurrentCollectionCount = GarbageCollectTry(generation, low_memory_p, mode);

    if ((mode & collection_blocking) &&
        (generation == max_generation) &&
        (gc_heap::full_gc_counts[gc_type_blocking] == BlockingCollectionCountAtEntry))
    {
#ifdef BACKGROUND_GC
        if (recursive_gc_sync::background_running_p())
        {
            pGenGCHeap->background_gc_wait();
        }
#endif //BACKGROUND_GC

        goto retry;
    }

    if (CollectionCountAtEntry == CurrentCollectionCount)
    {
        goto retry;
    }

    return S_OK;
}

size_t
GCHeap::GarbageCollectTry (int generation, BOOL low_memory_p, int mode)
{
    int gen = (generation < 0) ?
               max_generation : min (generation, max_generation);

    gc_reason reason = reason_empty;

    if (low_memory_p)
    {
        if (mode & collection_blocking)
        {
            reason = reason_lowmemory_blocking;
        }
        else
        {
            reason = reason_lowmemory;
        }
    }
    else
    {
        reason = reason_induced;
    }

    if (reason == reason_induced)
    {
        if (mode & collection_compacting)
        {
            reason = reason_induced_compacting;
        }
        else if (mode & collection_non_blocking)
        {
            reason = reason_induced_noforce;
        }
#ifdef STRESS_HEAP
        else if (mode & collection_gcstress)
        {
            reason = reason_gcstress;
        }
#endif
    }

    return GarbageCollectGeneration (gen, reason);
}

void gc_heap::do_pre_gc()
{
    STRESS_LOG_GC_STACK;

#ifdef STRESS_LOG
    STRESS_LOG_GC_START(VolatileLoad(&settings.gc_index),
                        (uint32_t)settings.condemned_generation,
                        (uint32_t)settings.reason);
#endif // STRESS_LOG

#ifdef MULTIPLE_HEAPS
    gc_heap* hp = g_heaps[0];
#else
    gc_heap* hp = 0;
#endif //MULTIPLE_HEAPS

#ifdef BACKGROUND_GC
    settings.b_state = hp->current_bgc_state;
#endif //BACKGROUND_GC

#ifdef TRACE_GC
    size_t total_allocated_since_last_gc = get_total_allocated_since_last_gc();
#ifdef BACKGROUND_GC
    dprintf (1, ("*GC* %d(gen0:%d)(%d)(alloc: %Id)(%s)(%d)",
        VolatileLoad(&settings.gc_index),
        dd_collection_count (hp->dynamic_data_of (0)),
        settings.condemned_generation,
        total_allocated_since_last_gc,
        (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC")),
        settings.b_state));
#else
    dprintf (1, ("*GC* %d(gen0:%d)(%d)(alloc: %Id)",
        VolatileLoad(&settings.gc_index),
        dd_collection_count(hp->dynamic_data_of(0)),
        settings.condemned_generation,
        total_allocated_since_last_gc));
#endif //BACKGROUND_GC

    if (heap_hard_limit)
    {
        size_t total_heap_committed = get_total_committed_size();
        size_t total_heap_committed_recorded = current_total_committed - current_total_committed_bookkeeping;
        dprintf (1, ("(%d)GC commit BEG #%Id: %Id (recorded: %Id)",
            settings.condemned_generation,
            (size_t)settings.gc_index, total_heap_committed, total_heap_committed_recorded));
    }
#endif //TRACE_GC

    // TODO: this can happen...it's because of the way we are calling
    // do_pre_gc, will fix later.
    //if (last_gc_index > VolatileLoad(&settings.gc_index))
    //{
    //    FATAL_GC_ERROR();
    //}

    last_gc_index = VolatileLoad(&settings.gc_index);
    GCHeap::UpdatePreGCCounters();
#if defined(__linux__)
    GCToEEInterface::UpdateGCEventStatus(static_cast<int>(GCEventStatus::GetEnabledLevel(GCEventProvider_Default)),
                                         static_cast<int>(GCEventStatus::GetEnabledKeywords(GCEventProvider_Default)),
                                         static_cast<int>(GCEventStatus::GetEnabledLevel(GCEventProvider_Private)),
                                         static_cast<int>(GCEventStatus::GetEnabledKeywords(GCEventProvider_Private)));
#endif // __linux__

    if (settings.concurrent)
    {
#ifdef BACKGROUND_GC
        full_gc_counts[gc_type_background]++;
#if defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
        GCHeap::gc_stress_fgcs_in_bgc = 0;
#endif // STRESS_HEAP && !FEATURE_REDHAWK
#endif // BACKGROUND_GC
    }
    else
    {
        if (settings.condemned_generation == max_generation)
        {
            full_gc_counts[gc_type_blocking]++;
        }
        else
        {
#ifdef BACKGROUND_GC
            if (settings.background_p)
            {
                ephemeral_fgc_counts[settings.condemned_generation]++;
            }
#endif //BACKGROUND_GC
        }
    }
}

#ifdef GC_CONFIG_DRIVEN
void gc_heap::record_interesting_info_per_heap()
{
    // datapoints are always from the last blocking GC so don't record again
    // for BGCs.
    if (!(settings.concurrent))
    {
        for (int i = 0; i < max_idp_count; i++)
        {
            interesting_data_per_heap[i] += interesting_data_per_gc[i];
        }
    }

    int compact_reason = get_gc_data_per_heap()->get_mechanism (gc_heap_compact);
    if (compact_reason >= 0)
        (compact_reasons_per_heap[compact_reason])++;
    int expand_mechanism = get_gc_data_per_heap()->get_mechanism (gc_heap_expand);
    if (expand_mechanism >= 0)
        (expand_mechanisms_per_heap[expand_mechanism])++;

    for (int i = 0; i < max_gc_mechanism_bits_count; i++)
    {
        if (get_gc_data_per_heap()->is_mechanism_bit_set ((gc_mechanism_bit_per_heap)i))
            (interesting_mechanism_bits_per_heap[i])++;
    }

    //         h#  | GC  | gen | C   | EX  | NF  | BF  | ML  | DM  || PreS | PostS | Merge | Conv | Pre | Post | PrPo | PreP | PostP |
    cprintf (("%2d | %6d | %1d | %1s | %2s | %2s | %2s | %2s | %2s || %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id |",
            heap_number,
            (size_t)settings.gc_index,
            settings.condemned_generation,
            // TEMP - I am just doing this for wks GC 'cause I wanna see the pattern of doing C/S GCs.
            (settings.compaction ? (((compact_reason >= 0) && gc_heap_compact_reason_mandatory_p[compact_reason]) ? "M" : "W") : ""), // compaction
            ((expand_mechanism >= 0)? "X" : ""), // EX
            ((expand_mechanism == expand_reuse_normal) ? "X" : ""), // NF
            ((expand_mechanism == expand_reuse_bestfit) ? "X" : ""), // BF
            (get_gc_data_per_heap()->is_mechanism_bit_set (gc_mark_list_bit) ? "X" : ""), // ML
            (get_gc_data_per_heap()->is_mechanism_bit_set (gc_demotion_bit) ? "X" : ""), // DM
            interesting_data_per_gc[idp_pre_short],
            interesting_data_per_gc[idp_post_short],
            interesting_data_per_gc[idp_merged_pin],
            interesting_data_per_gc[idp_converted_pin],
            interesting_data_per_gc[idp_pre_pin],
            interesting_data_per_gc[idp_post_pin],
            interesting_data_per_gc[idp_pre_and_post_pin],
            interesting_data_per_gc[idp_pre_short_padded],
            interesting_data_per_gc[idp_post_short_padded]));
}

void gc_heap::record_global_mechanisms()
{
    for (int i = 0; i < max_global_mechanisms_count; i++)
    {
        if (gc_data_global.get_mechanism_p ((gc_global_mechanism_p)i))
        {
            ::record_global_mechanism (i);
        }
    }
}

BOOL gc_heap::should_do_sweeping_gc (BOOL compact_p)
{
    if (!compact_ratio)
        return (!compact_p);

    size_t compact_count = compact_or_sweep_gcs[0];
    size_t sweep_count = compact_or_sweep_gcs[1];

    size_t total_count = compact_count + sweep_count;
    BOOL should_compact = compact_p;
    if (total_count > 3)
    {
        if (compact_p)
        {
            int temp_ratio = (int)((compact_count + 1) * 100 / (total_count + 1));
            if (temp_ratio > compact_ratio)
            {
                // cprintf (("compact would be: %d, total_count: %d, ratio would be %d%% > target\n",
                //     (compact_count + 1), (total_count + 1), temp_ratio));
                should_compact = FALSE;
            }
        }
        else
        {
            int temp_ratio = (int)((sweep_count + 1) * 100 / (total_count + 1));
            if (temp_ratio > (100 - compact_ratio))
            {
                // cprintf (("sweep would be: %d, total_count: %d, ratio would be %d%% > target\n",
                //     (sweep_count + 1), (total_count + 1), temp_ratio));
                should_compact = TRUE;
            }
        }
    }

    return !should_compact;
}
#endif //GC_CONFIG_DRIVEN

#ifdef BGC_SERVO_TUNING
// virtual_fl_size is only used for NGC2
void gc_heap::check_and_adjust_bgc_tuning (int gen_number, size_t physical_size, ptrdiff_t virtual_fl_size)
{
    // For LOH we need to check more often to catch things like when the size grows too much.
    int min_gen_to_check = ((gen_number == max_generation) ? (max_generation - 1) : 0);

    if (settings.condemned_generation >= min_gen_to_check)
    {
#ifdef MULTIPLE_HEAPS
        gc_heap* hp = g_heaps[0];
#else
        gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS

        size_t total_gen_size = physical_size;
        size_t total_generation_fl_size = get_total_generation_fl_size (gen_number);
        double gen_flr = (double)total_generation_fl_size * 100.0 / (double)total_gen_size;
        size_t gen1_index = dd_collection_count (hp->dynamic_data_of (max_generation - 1));
        size_t gen2_index = dd_collection_count (hp->dynamic_data_of (max_generation));

        bgc_tuning::tuning_calculation* current_gen_calc = &bgc_tuning::gen_calc[gen_number - max_generation];
        bgc_tuning::tuning_stats* current_gen_stats = &bgc_tuning::gen_stats[gen_number - max_generation];

        bool gen_size_inc_p = (total_gen_size > current_gen_calc->last_bgc_size);

        if ((settings.condemned_generation >= min_gen_to_check) &&
            (settings.condemned_generation != max_generation))
        {
            if (gen_size_inc_p)
            {
                current_gen_stats->last_gen_increase_flr = gen_flr;
                dprintf (BGC_TUNING_LOG, ("BTLp[g1: %Id, g2: %Id]: gen%d size inc %s %Id->%Id, flr: %.3f",
                        gen1_index, gen2_index, gen_number,
                        (recursive_gc_sync::background_running_p() ? "during bgc" : ""),
                        current_gen_stats->last_bgc_physical_size, total_gen_size, gen_flr));
            }

            if (!bgc_tuning::fl_tuning_triggered)
            {
                if (bgc_tuning::enable_fl_tuning)
                {
                    if (!((recursive_gc_sync::background_running_p() || (hp->current_bgc_state == bgc_initialized))))
                    {
                        assert (settings.entry_memory_load);

                        // We start when we are 2/3 way there so we don't overshoot.
                        if ((settings.entry_memory_load >= (bgc_tuning::memory_load_goal * 2 / 3)) &&
                            (full_gc_counts[gc_type_background] >= 2))
                        {
                            bgc_tuning::next_bgc_p = true;
                            current_gen_calc->first_alloc_to_trigger = get_total_servo_alloc (gen_number);
                            dprintf (BGC_TUNING_LOG, ("BTL[g1: %Id] mem high enough: %d(goal: %d), gen%d fl alloc: %Id, trigger BGC!",
                                gen1_index, settings.entry_memory_load, bgc_tuning::memory_load_goal,
                                gen_number, current_gen_calc->first_alloc_to_trigger));
                        }
                    }
                }
            }
        }

        if ((settings.condemned_generation == max_generation) && !(settings.concurrent))
        {
            size_t total_survived = get_total_surv_size (gen_number);
            size_t total_begin = get_total_begin_data_size (gen_number);
            double current_gc_surv_rate = (double)total_survived * 100.0 / (double)total_begin;

            // calculate the adjusted gen_flr.
            double total_virtual_size = (double)physical_size + (double)virtual_fl_size;
            double total_fl_size = (double)total_generation_fl_size + (double)virtual_fl_size;
            double new_gen_flr = total_fl_size * 100.0 / total_virtual_size;

            dprintf (BGC_TUNING_LOG, ("BTL%d NGC2 size %Id->%Id, fl %Id(%.3f)->%Id(%.3f)",
                gen_number, physical_size, (size_t)total_virtual_size,
                total_generation_fl_size, gen_flr,
                (size_t)total_fl_size, new_gen_flr));

            dprintf (BGC_TUNING_LOG, ("BTL%d* %Id, %.3f, %.3f, %.3f, %.3f, %.3f, %Id, %Id, %Id, %Id",
                                    gen_number,
                                    (size_t)total_virtual_size,
                                    0.0,
                                    0.0,
                                    new_gen_flr,
                                    current_gen_stats->last_gen_increase_flr,
                                    current_gc_surv_rate,
                                    0,
                                    0,
                                    0,
                                    current_gen_calc->alloc_to_trigger));

            bgc_tuning::gen1_index_last_bgc_end = gen1_index;

            current_gen_calc->last_bgc_size = total_gen_size;
            current_gen_calc->last_bgc_flr = new_gen_flr;
            current_gen_calc->last_sweep_above_p = false;
            current_gen_calc->last_bgc_end_alloc = 0;

            current_gen_stats->last_alloc_end_to_start = 0;
            current_gen_stats->last_alloc_start_to_sweep = 0;
            current_gen_stats->last_alloc_sweep_to_end = 0;
            current_gen_stats->last_bgc_fl_size = total_generation_fl_size;
            current_gen_stats->last_bgc_surv_rate = current_gc_surv_rate;
            current_gen_stats->last_gen_increase_flr = 0;
        }
    }
}

void gc_heap::get_and_reset_loh_alloc_info()
{
    if (!bgc_tuning::enable_fl_tuning)
        return;

    total_loh_a_last_bgc = 0;

    uint64_t total_loh_a_no_bgc = 0;
    uint64_t total_loh_a_bgc_marking = 0;
    uint64_t total_loh_a_bgc_planning = 0;
#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < gc_heap::n_heaps; i++)
    {
        gc_heap* hp = gc_heap::g_heaps[i];
#else //MULTIPLE_HEAPS
    {
        gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS
        total_loh_a_no_bgc += hp->loh_a_no_bgc;
        hp->loh_a_no_bgc = 0;
        total_loh_a_bgc_marking += hp->loh_a_bgc_marking;
        hp->loh_a_bgc_marking = 0;
        total_loh_a_bgc_planning += hp->loh_a_bgc_planning;
        hp->loh_a_bgc_planning = 0;
    }
    dprintf (2, ("LOH alloc: outside bgc: %I64d; bm: %I64d; bp: %I64d",
        total_loh_a_no_bgc,
        total_loh_a_bgc_marking,
        total_loh_a_bgc_planning));

    total_loh_a_last_bgc = total_loh_a_no_bgc + total_loh_a_bgc_marking + total_loh_a_bgc_planning;
}
#endif //BGC_SERVO_TUNING

bool gc_heap::is_pm_ratio_exceeded()
{
    size_t maxgen_frag = 0;
    size_t maxgen_size = 0;
    size_t total_heap_size = get_total_heap_size();

#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < gc_heap::n_heaps; i++)
    {
        gc_heap* hp = gc_heap::g_heaps[i];
#else //MULTIPLE_HEAPS
    {
        gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS

        maxgen_frag += dd_fragmentation (hp->dynamic_data_of (max_generation));
        maxgen_size += hp->generation_size (max_generation);
    }

    double maxgen_ratio = (double)maxgen_size / (double)total_heap_size;
    double maxgen_frag_ratio = (double)maxgen_frag / (double)maxgen_size;
    dprintf (GTC_LOG, ("maxgen %Id(%d%% total heap), frag: %Id (%d%% maxgen)",
        maxgen_size, (int)(maxgen_ratio * 100.0),
        maxgen_frag, (int)(maxgen_frag_ratio * 100.0)));

    bool maxgen_highfrag_p = ((maxgen_ratio > 0.5) && (maxgen_frag_ratio > 0.1));

    // We need to adjust elevation here because if there's enough fragmentation it's not
    // unproductive.
    if (maxgen_highfrag_p)
    {
        settings.should_lock_elevation = FALSE;
        dprintf (GTC_LOG, ("high frag gen2, turn off elevation"));
    }

    return maxgen_highfrag_p;
}

void gc_heap::do_post_gc()
{
    if (!settings.concurrent)
    {
        initGCShadow();
    }

#ifdef TRACE_GC
#ifdef COUNT_CYCLES
    AllocStart = GetCycleCount32();
#else
    AllocStart = clock();
#endif //COUNT_CYCLES
#endif //TRACE_GC

#ifdef MULTIPLE_HEAPS
    gc_heap* hp = g_heaps[0];
#else
    gc_heap* hp = 0;
#endif //MULTIPLE_HEAPS

    GCToEEInterface::GcDone(settings.condemned_generation);

    GCToEEInterface::DiagGCEnd(VolatileLoad(&settings.gc_index),
                         (uint32_t)settings.condemned_generation,
                         (uint32_t)settings.reason,
                         !!settings.concurrent);

    uint32_t current_memory_load = 0;

#ifdef BGC_SERVO_TUNING
    if (bgc_tuning::enable_fl_tuning)
    {
        uint64_t current_available_physical = 0;
        size_t gen2_physical_size = 0;
        size_t gen3_physical_size = 0;
        ptrdiff_t gen2_virtual_fl_size = 0;
        ptrdiff_t gen3_virtual_fl_size = 0;
        ptrdiff_t vfl_from_kp = 0;
        ptrdiff_t vfl_from_ki = 0;

        gen2_physical_size = get_total_generation_size (max_generation);
        gen3_physical_size = get_total_generation_size (max_generation + 1);

        get_memory_info (&current_memory_load, &current_available_physical);
        if ((settings.condemned_generation == max_generation) && !settings.concurrent)
        {
            double gen2_size_ratio = (double)gen2_physical_size / ((double)gen2_physical_size + (double)gen3_physical_size);

            double total_virtual_fl_size = bgc_tuning::calculate_ml_tuning (current_available_physical, true, &vfl_from_kp, &vfl_from_ki);
            gen2_virtual_fl_size = (ptrdiff_t)(total_virtual_fl_size * gen2_size_ratio);
            gen3_virtual_fl_size = (ptrdiff_t)(total_virtual_fl_size * (1.0 - gen2_size_ratio));

#ifdef SIMPLE_DPRINTF
            dprintf (BGC_TUNING_LOG, ("BTL: ml: %d (g: %d)(%s), a: %I64d (g: %I64d, elg: %Id+%Id=%Id, %Id+%Id=%Id), vfl: %Id=%Id+%Id(NGC2)",
                current_memory_load, bgc_tuning::memory_load_goal,
                ((current_available_physical > bgc_tuning::available_memory_goal) ? "above" : "below"),
                current_available_physical, bgc_tuning::available_memory_goal,
                gen2_physical_size, gen2_virtual_fl_size, (gen2_physical_size + gen2_virtual_fl_size),
                gen3_physical_size, gen3_virtual_fl_size, (gen3_physical_size + gen3_virtual_fl_size),
                (ptrdiff_t)total_virtual_fl_size, vfl_from_kp, vfl_from_ki));
#endif //SIMPLE_DPRINTF
        }

        check_and_adjust_bgc_tuning (max_generation, gen2_physical_size, gen2_virtual_fl_size);
        check_and_adjust_bgc_tuning ((max_generation + 1), gen3_physical_size, gen3_virtual_fl_size);
    }
#endif //BGC_SERVO_TUNING

#ifdef SIMPLE_DPRINTF
    dprintf (1, ("*EGC* %Id(gen0:%Id)(%Id)(%d)(%s)(%s)(%s)(ml: %d->%d)",
        VolatileLoad(&settings.gc_index),
        dd_collection_count(hp->dynamic_data_of(0)),
        GetHighPrecisionTimeStamp(),
        settings.condemned_generation,
        (settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC")),
        (settings.compaction ? "C" : "S"),
        (settings.promotion ? "P" : "S"),
        settings.entry_memory_load,
        current_memory_load));
#endif //SIMPLE_DPRINTF

    if (settings.exit_memory_load != 0)
        last_gc_memory_load = settings.exit_memory_load;
    else if (settings.entry_memory_load != 0)
        last_gc_memory_load = settings.entry_memory_load;

    last_gc_heap_size = get_total_heap_size();
    last_gc_fragmentation = get_total_fragmentation();

#ifdef TRACE_GC
    if (heap_hard_limit)
    {
        size_t total_heap_committed = get_total_committed_size();
        size_t total_heap_committed_recorded = current_total_committed - current_total_committed_bookkeeping;
        dprintf (1, ("(%d)GC commit END #%Id: %Id (recorded: %Id), heap %Id, frag: %Id",
            settings.condemned_generation,
            (size_t)settings.gc_index, total_heap_committed, total_heap_committed_recorded,
            last_gc_heap_size, last_gc_fragmentation));
    }
#endif //TRACE_GC

    // Note we only do this at the end of full blocking GCs because we do not want
    // to turn on this provisional mode during the middle of a BGC.
    if ((settings.condemned_generation == max_generation) && (!settings.concurrent))
    {
        if (pm_stress_on)
        {
            size_t full_compacting_gc_count = full_gc_counts[gc_type_compacting];
            if (provisional_mode_triggered)
            {
                uint64_t r = gc_rand::get_rand(10);
                if ((full_compacting_gc_count - provisional_triggered_gc_count) >= r)
                {
                    provisional_mode_triggered = false;
                    provisional_off_gc_count = full_compacting_gc_count;
                    dprintf (GTC_LOG, ("%Id NGC2s when turned on, %Id NGCs since(%Id)",
                        provisional_triggered_gc_count, (full_compacting_gc_count - provisional_triggered_gc_count),
                        num_provisional_triggered));
                }
            }
            else
            {
                uint64_t r = gc_rand::get_rand(5);
                if ((full_compacting_gc_count - provisional_off_gc_count) >= r)
                {
                    provisional_mode_triggered = true;
                    provisional_triggered_gc_count = full_compacting_gc_count;
                    num_provisional_triggered++;
                    dprintf (GTC_LOG, ("%Id NGC2s when turned off, %Id NGCs since(%Id)",
                        provisional_off_gc_count, (full_compacting_gc_count - provisional_off_gc_count),
                        num_provisional_triggered));
                }
            }
        }
        else
        {
            if (provisional_mode_triggered)
            {
                if ((settings.entry_memory_load < high_memory_load_th) ||
                    !is_pm_ratio_exceeded())
                {
                    dprintf (GTC_LOG, ("turning off PM"));
                    provisional_mode_triggered = false;
                }
            }
            else if ((settings.entry_memory_load >= high_memory_load_th) && is_pm_ratio_exceeded())
            {
                dprintf (GTC_LOG, ("highmem && highfrag - turning on PM"));
                provisional_mode_triggered = true;
                num_provisional_triggered++;
            }
        }
    }

    GCHeap::UpdatePostGCCounters();
#ifdef STRESS_LOG
    STRESS_LOG_GC_END(VolatileLoad(&settings.gc_index),
                      (uint32_t)settings.condemned_generation,
                      (uint32_t)settings.reason);
#endif // STRESS_LOG

#ifdef GC_CONFIG_DRIVEN
    if (!settings.concurrent)
    {
        if (settings.compaction)
            (compact_or_sweep_gcs[0])++;
        else
            (compact_or_sweep_gcs[1])++;
    }

#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < n_heaps; i++)
        g_heaps[i]->record_interesting_info_per_heap();
#else
    record_interesting_info_per_heap();
#endif //MULTIPLE_HEAPS
    record_global_mechanisms();
#endif //GC_CONFIG_DRIVEN
}

unsigned GCHeap::GetGcCount()
{
    return (unsigned int)VolatileLoad(&pGenGCHeap->settings.gc_index);
}

size_t
GCHeap::GarbageCollectGeneration (unsigned int gen, gc_reason reason)
{
    dprintf (2, ("triggered a GC!"));

#ifdef MULTIPLE_HEAPS
    gc_heap* hpt = gc_heap::g_heaps[0];
#else
    gc_heap* hpt = 0;
#endif //MULTIPLE_HEAPS
    bool cooperative_mode = true;
    dynamic_data* dd = hpt->dynamic_data_of (gen);
    size_t localCount = dd_collection_count (dd);

    enter_spin_lock (&gc_heap::gc_lock);
    dprintf (SPINLOCK_LOG, ("GC Egc"));
    ASSERT_HOLDING_SPIN_LOCK(&gc_heap::gc_lock);

    //don't trigger another GC if one was already in progress
    //while waiting for the lock
    {
        size_t col_count = dd_collection_count (dd);

        if (localCount != col_count)
        {
#ifdef SYNCHRONIZATION_STATS
            gc_lock_contended++;
#endif //SYNCHRONIZATION_STATS
            dprintf (SPINLOCK_LOG, ("no need GC Lgc"));
            leave_spin_lock (&gc_heap::gc_lock);

            // We don't need to release msl here 'cause this means a GC
            // has happened and would have release all msl's.
            return col_count;
         }
    }

#ifdef COUNT_CYCLES
    int gc_start = GetCycleCount32();
#endif //COUNT_CYCLES

#ifdef TRACE_GC
#ifdef COUNT_CYCLES
    AllocDuration += GetCycleCount32() - AllocStart;
#else
    AllocDuration += clock() - AllocStart;
#endif //COUNT_CYCLES
#endif //TRACE_GC

        gc_heap::g_low_memory_status = (reason == reason_lowmemory) ||
                                       (reason == reason_lowmemory_blocking) ||
                                       (gc_heap::latency_level == latency_level_memory_footprint);

        gc_trigger_reason = reason;

#ifdef MULTIPLE_HEAPS
    for (int i = 0; i < gc_heap::n_heaps; i++)
    {
        gc_heap::g_heaps[i]->reset_gc_done();
    }
#else
    gc_heap::reset_gc_done();
#endif //MULTIPLE_HEAPS

    gc_heap::gc_started = TRUE;

    {
        init_sync_log_stats();

#ifndef MULTIPLE_HEAPS
        cooperative_mode = gc_heap::enable_preemptive ();

        dprintf (2, ("Suspending EE"));
        BEGIN_TIMING(suspend_ee_during_log);
        GCToEEInterface::SuspendEE(SUSPEND_FOR_GC);
        END_TIMING(suspend_ee_during_log);
        gc_heap::proceed_with_gc_p = gc_heap::should_proceed_with_gc();
        gc_heap::disable_preemptive (cooperative_mode);
        if (gc_heap::proceed_with_gc_p)
            pGenGCHeap->settings.init_mechanisms();
        else
            gc_heap::update_collection_counts_for_no_gc();

#endif //!MULTIPLE_HEAPS
    }

// MAP_EVENT_MONITORS(EE_MONITOR_GARBAGE_COLLECTIONS, NotifyEvent(EE_EVENT_TYPE_GC_STARTED, 0));

#ifdef TRACE_GC
#ifdef COUNT_CYCLES
    unsigned start;
    unsigned finish;
    start = GetCycleCount32();
#else
    clock_t start;
    clock_t finish;
    start = clock();
#endif //COUNT_CYCLES
    PromotedObjectCount = 0;
#endif //TRACE_GC

    unsigned int condemned_generation_number = gen;

    // We want to get a stack from the user thread that triggered the GC
    // instead of on the GC thread which is the case for Server GC.
    // But we are doing it for Workstation GC as well to be uniform.
    FIRE_EVENT(GCTriggered, static_cast<uint32_t>(reason));

#ifdef MULTIPLE_HEAPS
    GcCondemnedGeneration = condemned_generation_number;

    cooperative_mode = gc_heap::enable_preemptive ();

    BEGIN_TIMING(gc_during_log);
    gc_heap::ee_suspend_event.Set();
    gc_heap::wait_for_gc_done();
    END_TIMING(gc_during_log);

    gc_heap::disable_preemptive (cooperative_mode);

    condemned_generation_number = GcCondemnedGeneration;
#else
        if (gc_heap::proceed_with_gc_p)
        {
            BEGIN_TIMING(gc_during_log);
            pGenGCHeap->garbage_collect (condemned_generation_number);
            if (gc_heap::pm_trigger_full_gc)
            {
                pGenGCHeap->garbage_collect_pm_full_gc();
            }
            END_TIMING(gc_during_log);
        }
#endif //MULTIPLE_HEAPS

#ifdef TRACE_GC
#ifdef COUNT_CYCLES
    finish = GetCycleCount32();
#else
    finish = clock();
#endif //COUNT_CYCLES
    GcDuration += finish - start;
    dprintf (3,
             ("<GC# %d> Condemned: %d, Duration: %d, total: %d Alloc Avg: %d, Small Objects:%d Large Objects:%d",
              VolatileLoad(&pGenGCHeap->settings.gc_index), condemned_generation_number,
              finish - start, GcDuration,
              AllocCount ? (AllocDuration / AllocCount) : 0,
              AllocSmallCount, AllocBigCount));
    AllocCount = 0;
    AllocDuration = 0;
#endif // TRACE_GC

#ifdef BACKGROUND_GC
    // We are deciding whether we should fire the alloc wait end event here
    // because in begin_foreground we could be calling end_foreground
    // if we need to retry.
    if (gc_heap::alloc_wait_event_p)
    {
        hpt->fire_alloc_wait_event_end (awr_fgc_wait_for_bgc);
        gc_heap::alloc_wait_event_p = FALSE;
    }
#endif //BACKGROUND_GC

#ifndef MULTIPLE_HEAPS
#ifdef BACKGROUND_GC
    if (!gc_heap::dont_restart_ee_p)
    {
#endif //BACKGROUND_GC
        BEGIN_TIMING(restart_ee_during_log);
        GCToEEInterface::RestartEE(TRUE);
        END_TIMING(restart_ee_during_log);
#ifdef BACKGROUND_GC
    }
#endif //BACKGROUND_GC
#endif //!MULTIPLE_HEAPS

#ifdef COUNT_CYCLES
    printf ("GC: %d Time: %d\n", GcCondemnedGeneration,
            GetCycleCount32() - gc_start);
#endif //COUNT_CYCLES

#ifndef MULTIPLE_HEAPS
    process_sync_log_stats();
    gc_heap::gc_started = FALSE;
    gc_heap::set_gc_done();
    dprintf (SPINLOCK_LOG, ("GC Lgc"));
    leave_spin_lock (&gc_heap::gc_lock);
#endif //!MULTIPLE_HEAPS

#ifdef FEATURE_PREMORTEM_FINALIZATION
    GCToEEInterface::EnableFinalization(!pGenGCHeap->settings.concurrent && pGenGCHeap->settings.found_finalizers);
#endif // FEATURE_PREMORTEM_FINALIZATION

    return dd_collection_count (dd);
}

size_t      GCHeap::GetTotalBytesInUse ()
{
#ifdef MULTIPLE_HEAPS
    //enumerate all the heaps and get their size.
    size_t tot_size = 0;
    for (int i = 0; i < gc_heap::n_heaps; i++)
    {
        GCHeap* Hp = gc_heap::g_heaps [i]->vm_heap;
        tot_size += Hp->ApproxTotalBytesInUse (FALSE);
    }
    return tot_size;
#else
    return ApproxTotalBytesInUse ();
#endif //MULTIPLE_HEAPS
}

// Get the total allocated bytes
uint64_t GCHeap::GetTotalAllocatedBytes()
{
#ifdef MULTIPLE_HEAPS
    uint64_t total_alloc_bytes = 0;
    for (int i = 0; i < gc_heap::n_heaps; i++)
    {
        gc_heap* hp = gc_heap::g_heaps[i];
        total_alloc_bytes += hp->total_alloc_bytes_soh;
        total_alloc_bytes += hp->total_alloc_bytes_loh;
    }
    return total_alloc_bytes;
#else
    return (pGenGCHeap->total_alloc_bytes_soh +  pGenGCHeap->total_alloc_bytes_loh);
#endif //MULTIPLE_HEAPS
}

int GCHeap::CollectionCount (int generation, int get_bgc_fgc_count)
{
    if (get_bgc_fgc_count != 0)
    {
#ifdef BACKGROUND_GC
        if (generation == max_generation)
        {
            return (int)(gc_heap::full_gc_counts[gc_type_background]);
        }
        else
        {
            return (int)(gc_heap::ephemeral_fgc_counts[generation]);
        }
#else
        return 0;
#endif //BACKGROUND_GC
    }

#ifdef MULTIPLE_HEAPS
    gc_heap* hp = gc_heap::g_heaps [0];
#else  //MULTIPLE_HEAPS
    gc_heap* hp = pGenGCHeap;
#endif //MULTIPLE_HEAPS
    if (generation > max_generation)
        return 0;
    else
        return (int)dd_collection_count (hp->dynamic_data_of (generation));
}

size_t GCHeap::ApproxTotalBytesInUse(BOOL small_heap_only)
{
    size_t totsize = 0;
    //GCTODO
    //ASSERT(InMustComplete());
    enter_spin_lock (&pGenGCHeap->gc_lock);

    heap_segment* eph_seg = generation_allocation_segment (pGenGCHeap->generation_of (0));
    // Get small block heap size info
    totsize = (pGenGCHeap->alloc_allocated - heap_segment_mem (eph_seg));
    heap_segment* seg1 = generation_start_segment (pGenGCHeap->generation_of (max_generation));
    while (seg1 != eph_seg)
    {
        totsize += heap_segment_allocated (seg1) -
            heap_segment_mem (seg1);
        seg1 = heap_segment_next (seg1);
    }

    //discount the fragmentation
    for (int i = 0; i <= max_generation; i++)
    {
        generation* gen = pGenGCHeap->generation_of (i);
        totsize -= (generation_free_list_space (gen) + generation_free_obj_space (gen));
    }

    if (!small_heap_only)
    {
        heap_segment* seg2 = generation_start_segment (pGenGCHeap->generation_of (max_generation+1));

        while (seg2 != 0)
        {
            totsize += heap_segment_allocated (seg2) -
                heap_segment_mem (seg2);
            seg2 = heap_segment_next (seg2);
        }

        //discount the fragmentation
        generation* loh_gen = pGenGCHeap->generation_of (max_generation+1);
        size_t frag = generation_free_list_space (loh_gen) + generation_free_obj_space (loh_gen);
        totsize -= frag;
    }
    leave_spin_lock (&pGenGCHeap->gc_lock);
    return totsize;
}

#ifdef MULTIPLE_HEAPS
void GCHeap::AssignHeap (alloc_context* acontext)
{
    // Assign heap based on processor
    acontext->set_alloc_heap(GetHeap(heap_select::select_heap(acontext)));
    acontext->set_home_heap(acontext->get_alloc_heap());
}

GCHeap* GCHeap::GetHeap (int n)
{
    assert (n < gc_heap::n_heaps);
    return gc_heap::g_heaps[n]->vm_heap;
}
#endif //MULTIPLE_HEAPS

bool GCHeap::IsThreadUsingAllocationContextHeap(gc_alloc_context* context, int thread_number)
{
    alloc_context* acontext = static_cast<alloc_context*>(context);
#ifdef MULTIPLE_HEAPS
    return ((acontext->get_home_heap() == GetHeap(thread_number)) ||
            ((acontext->get_home_heap() == 0) && (thread_number == 0)));
#else
    UNREFERENCED_PARAMETER(acontext);
    UNREFERENCED_PARAMETER(thread_number);
    return true;
#endif //MULTIPLE_HEAPS
}

// Returns the number of processors required to trigger the use of thread based allocation contexts
int GCHeap::GetNumberOfHeaps ()
{
#ifdef MULTIPLE_HEAPS
    return gc_heap::n_heaps;
#else
    return 1;
#endif //MULTIPLE_HEAPS
}

/*
  in this way we spend extra time cycling through all the heaps while create the handle
  it ought to be changed by keeping alloc_context.home_heap as number (equals heap_number)
*/
int GCHeap::GetHomeHeapNumber ()
{
#ifdef MULTIPLE_HEAPS
    gc_alloc_context* ctx = GCToEEInterface::GetAllocContext();
    if (!ctx)
    {
        return 0;
    }

    GCHeap *hp = static_cast<alloc_context*>(ctx)->get_home_heap();
    return (hp ? hp->pGenGCHeap->heap_number : 0);
#else
    return 0;
#endif //MULTIPLE_HEAPS
}

unsigned int GCHeap::GetCondemnedGeneration()
{
    return gc_heap::settings.condemned_generation;
}

void GCHeap::GetMemoryInfo(uint64_t* highMemLoadThresholdBytes,
                           uint64_t* totalAvailableMemoryBytes,
                           uint64_t* lastRecordedMemLoadBytes,
                           uint32_t* lastRecordedMemLoadPct,
                           size_t* lastRecordedHeapSizeBytes,
                           size_t* lastRecordedFragmentationBytes)
{
    *highMemLoadThresholdBytes = (uint64_t) (((double)gc_heap::high_memory_load_th) / 100 * gc_heap::total_physical_mem);
    *totalAvailableMemoryBytes = gc_heap::heap_hard_limit != 0 ? gc_heap::heap_hard_limit : gc_heap::total_physical_mem;
    *lastRecordedMemLoadBytes = (uint64_t) (((double)gc_heap::last_gc_memory_load) / 100 * gc_heap::total_physical_mem);
    *lastRecordedMemLoadPct = gc_heap::last_gc_memory_load;
    *lastRecordedHeapSizeBytes = gc_heap::last_gc_heap_size;
    *lastRecordedFragmentationBytes = gc_heap::last_gc_fragmentation;
}

int GCHeap::GetGcLatencyMode()
{
    return (int)(pGenGCHeap->settings.pause_mode);
}

int GCHeap::SetGcLatencyMode (int newLatencyMode)
{
    if (gc_heap::settings.pause_mode == pause_no_gc)
        return (int)set_pause_mode_no_gc;

    gc_pause_mode new_mode = (gc_pause_mode)newLatencyMode;

    if (new_mode == pause_low_latency)
    {
#ifndef MULTIPLE_HEAPS
        pGenGCHeap->settings.pause_mode = new_mode;
#endif //!MULTIPLE_HEAPS
    }
    else if (new_mode == pause_sustained_low_latency)
    {
#ifdef BACKGROUND_GC
        if (gc_heap::gc_can_use_concurrent)
        {
            pGenGCHeap->settings.pause_mode = new_mode;
        }
#endif //BACKGROUND_GC
    }
    else
    {
        pGenGCHeap->settings.pause_mode = new_mode;
    }

#ifdef BACKGROUND_GC
    if (recursive_gc_sync::background_running_p())
    {
        // If we get here, it means we are doing an FGC. If the pause
        // mode was altered we will need to save it in the BGC settings.
        if (gc_heap::saved_bgc_settings.pause_mode != new_mode)
        {
            gc_heap::saved_bgc_settings.pause_mode = new_mode;
        }
    }
#endif //BACKGROUND_GC

    return (int)set_pause_mode_success;
}

int GCHeap::GetLOHCompactionMode()
{
    return pGenGCHeap->loh_compaction_mode;
}

void GCHeap::SetLOHCompactionMode (int newLOHCompactionMode)
{
#ifdef FEATURE_LOH_COMPACTION
    pGenGCHeap->loh_compaction_mode = (gc_loh_compaction_mode)newLOHCompactionMode;
#endif //FEATURE_LOH_COMPACTION
}

bool GCHeap::RegisterForFullGCNotification(uint32_t gen2Percentage,
                                           uint32_t lohPercentage)
{
#ifdef MULTIPLE_HEAPS
    for (int hn = 0; hn < gc_heap::n_heaps; hn++)
    {
        gc_heap* hp = gc_heap::g_heaps [hn];
        hp->fgn_last_alloc = dd_new_allocation (hp->dynamic_data_of (0));
        hp->fgn_maxgen_percent = gen2Percentage;
    }
#else //MULTIPLE_HEAPS
    pGenGCHeap->fgn_last_alloc = dd_new_allocation (pGenGCHeap->dynamic_data_of (0));
    pGenGCHeap->fgn_maxgen_percent = gen2Percentage;
#endif //MULTIPLE_HEAPS

    pGenGCHeap->full_gc_approach_event.Reset();
    pGenGCHeap->full_gc_end_event.Reset();
    pGenGCHeap->full_gc_approach_event_set = false;

    pGenGCHeap->fgn_loh_percent = lohPercentage;

    return TRUE;
}

bool GCHeap::CancelFullGCNotification()
{
#ifdef MULTIPLE_HEAPS
    for (int hn = 0; hn < gc_heap::n_heaps; hn++)
    {
        gc_heap* hp = gc_heap::g_heaps [hn];
        hp->fgn_maxgen_percent = 0;
    }
#else //MULTIPLE_HEAPS
    pGenGCHeap->fgn_maxgen_percent = 0;
#endif //MULTIPLE_HEAPS

    pGenGCHeap->fgn_loh_percent = 0;
    pGenGCHeap->full_gc_approach_event.Set();
    pGenGCHeap->full_gc_end_event.Set();

    return TRUE;
}

int GCHeap::WaitForFullGCApproach(int millisecondsTimeout)
{
    dprintf (2, ("WFGA: Begin wait"));
    int result = gc_heap::full_gc_wait (&(pGenGCHeap->full_gc_approach_event), millisecondsTimeout);
    dprintf (2, ("WFGA: End wait"));
    return result;
}

int GCHeap::WaitForFullGCComplete(int millisecondsTimeout)
{
    dprintf (2, ("WFGE: Begin wait"));
    int result = gc_heap::full_gc_wait (&(pGenGCHeap->full_gc_end_event), millisecondsTimeout);
    dprintf (2, ("WFGE: End wait"));
    return result;
}

int GCHeap::StartNoGCRegion(uint64_t totalSize, bool lohSizeKnown, uint64_t lohSize, bool disallowFullBlockingGC)
{
    NoGCRegionLockHolder lh;

    dprintf (1, ("begin no gc called"));
    start_no_gc_region_status status = gc_heap::prepare_for_no_gc_region (totalSize, lohSizeKnown, lohSize, disallowFullBlockingGC);
    if (status == start_no_gc_success)
    {
        GarbageCollect (max_generation);
        status = gc_heap::get_start_no_gc_region_status();
    }

    if (status != start_no_gc_success)
        gc_heap::handle_failure_for_no_gc();

    return (int)status;
}

int GCHeap::EndNoGCRegion()
{
    NoGCRegionLockHolder lh;
    return (int)gc_heap::end_no_gc_region();
}

void GCHeap::PublishObject (uint8_t* Obj)
{
#ifdef BACKGROUND_GC
    gc_heap* hp = gc_heap::heap_of (Obj);
    hp->bgc_alloc_lock->loh_alloc_done (Obj);
    hp->bgc_untrack_loh_alloc();
#endif //BACKGROUND_GC
}

// The spec for this one isn't clear. This function
// returns the size that can be allocated without
// triggering a GC of any kind.
size_t GCHeap::ApproxFreeBytes()
{
    //GCTODO
    //ASSERT(InMustComplete());
    enter_spin_lock (&pGenGCHeap->gc_lock);

    generation* gen = pGenGCHeap->generation_of (0);
    size_t res = generation_allocation_limit (gen) - generation_allocation_pointer (gen);

    leave_spin_lock (&pGenGCHeap->gc_lock);

    return res;
}

HRESULT GCHeap::GetGcCounters(int gen, gc_counters* counters)
{
    if ((gen < 0) || (gen > max_generation))
        return E_FAIL;
#ifdef MULTIPLE_HEAPS
    counters->current_size = 0;
    counters->promoted_size = 0;
    counters->collection_count = 0;

    //enumerate all the heaps and get their counters.
    for (int i = 0; i < gc_heap::n_heaps; i++)
    {
        dynamic_data* dd = gc_heap::g_heaps [i]->dynamic_data_of (gen);

        counters->current_size += dd_current_size (dd);
        counters->promoted_size += dd_promoted_size (dd);
        if (i == 0)
        counters->collection_count += dd_collection_count (dd);
    }
#else
    dynamic_data* dd = pGenGCHeap->dynamic_data_of (gen);
    counters->current_size = dd_current_size (dd);
    counters->promoted_size = dd_promoted_size (dd);
    counters->collection_count = dd_collection_count (dd);
#endif //MULTIPLE_HEAPS
    return S_OK;
}

// Get the segment size to use, making sure it conforms.
size_t GCHeap::GetValidSegmentSize(bool large_seg)
{
    return (large_seg ? gc_heap::min_loh_segment_size : gc_heap::soh_segment_size);
}

// Get the max gen0 heap size, making sure it conforms.
size_t gc_heap::get_gen0_min_size()
{
    size_t gen0size = static_cast<size_t>(GCConfig::GetGen0Size());
    bool is_config_invalid = ((gen0size == 0) || !g_theGCHeap->IsValidGen0MaxSize(gen0size));
    if (is_config_invalid)
    {
#ifdef SERVER_GC
        // performance data seems to indicate halving the size results
        // in optimal perf.  Ask for adjusted gen0 size.
        gen0size = max(GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE),(256*1024));

        // if gen0 size is too large given the available memory, reduce it.
        // Get true cache size, as we don't want to reduce below this.
        size_t trueSize = max(GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE),(256*1024));
        dprintf (1, ("cache: %Id-%Id",
            GCToOSInterface::GetCacheSizePerLogicalCpu(FALSE),
            GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE)));

        int n_heaps = gc_heap::n_heaps;
#else //SERVER_GC
        size_t trueSize = GCToOSInterface::GetCacheSizePerLogicalCpu(TRUE);
        gen0size = max((4*trueSize/5),(256*1024));
        trueSize = max(trueSize, (256*1024));
        int n_heaps = 1;
#endif //SERVER_GC

        dprintf (1, ("gen0size: %Id * %d = %Id, physical mem: %Id / 6 = %Id",
                gen0size, n_heaps, (gen0size * n_heaps),
                gc_heap::total_physical_mem,
                gc_heap::total_physical_mem / 6));

        // if the total min GC across heaps will exceed 1/6th of available memory,
        // then reduce the min GC size until it either fits or has been reduced to cache size.
        while ((gen0size * n_heaps) > (gc_heap::total_physical_mem / 6))
        {
            gen0size = gen0size / 2;
            if (gen0size <= trueSize)
            {
                gen0size = trueSize;
                break;
            }
        }
    }

    size_t seg_size = gc_heap::soh_segment_size;
    assert (seg_size);

    // Generation 0 must never be more than 1/2 the segment size.
    if (gen0size >= (seg_size / 2))
        gen0size = seg_size / 2;

    // If the value from config is valid we use it as is without this adjustment.
    if (is_config_invalid)
    {
        if (heap_hard_limit)
        {
            size_t gen0size_seg = seg_size / 8;
            if (gen0size >= gen0size_seg)
            {
                dprintf (1, ("gen0 limited by seg size %Id->%Id", gen0size, gen0size_seg));
                gen0size = gen0size_seg;
            }
        }

        gen0size = gen0size / 8 * 5;
    }

    gen0size = Align (gen0size);

    return gen0size;
}

void GCHeap::SetReservedVMLimit (size_t vmlimit)
{
    gc_heap::reserved_memory_limit = vmlimit;
}

//versions of same method on each heap

#ifdef FEATURE_PREMORTEM_FINALIZATION

Object* GCHeap::GetNextFinalizableObject()
{

#ifdef MULTIPLE_HEAPS

    //return the first non critical one in the first queue.
    for (int hn = 0; hn < gc_heap::n_heaps; hn++)
    {
        gc_heap* hp = gc_heap::g_heaps [hn];
        Object* O = hp->finalize_queue->GetNextFinalizableObject(TRUE);
        if (O)
            return O;
    }
    //return the first non critical/critical one in the first queue.
    for (int hn = 0; hn < gc_heap::n_heaps; hn++)
    {
        gc_heap* hp = gc_heap::g_heaps [hn];
        Object* O = hp->finalize_queue->GetNextFinalizableObject(FALSE);
        if (O)
            return O;
    }
    return 0;


#else //MULTIPLE_HEAPS
    return pGenGCHeap->finalize_queue->GetNextFinalizableObject();
#endif //MULTIPLE_HEAPS

}

size_t GCHeap::GetNumberFinalizableObjects()
{
#ifdef MULTIPLE_HEAPS
    size_t cnt = 0;
    for (int hn = 0; hn < gc_heap::n_heaps; hn++)
    {
        gc_heap* hp = gc_heap::g_heaps [hn];
        cnt += hp->finalize_queue->GetNumberFinalizableObjects();
    }
    return cnt;


#else //MULTIPLE_HEAPS
    return pGenGCHeap->finalize_queue->GetNumberFinalizableObjects();
#endif //MULTIPLE_HEAPS
}

size_t GCHeap::GetFinalizablePromotedCount()
{
#ifdef MULTIPLE_HEAPS
    size_t cnt = 0;

    for (int hn = 0; hn < gc_heap::n_heaps; hn++)
    {
        gc_heap* hp = gc_heap::g_heaps [hn];
        cnt += hp->finalize_queue->GetPromotedCount();
    }
    return cnt;

#else //MULTIPLE_HEAPS
    return pGenGCHeap->finalize_queue->GetPromotedCount();
#endif //MULTIPLE_HEAPS
}

bool GCHeap::ShouldRestartFinalizerWatchDog()
{
    // This condition was historically used as part of the condition to detect finalizer thread timeouts
    return gc_heap::gc_lock.lock != -1;
}

void GCHeap::SetFinalizeQueueForShutdown(bool fHasLock)
{
#ifdef MULTIPLE_HEAPS
    for (int hn = 0; hn < gc_heap::n_heaps; hn++)
    {
        gc_heap* hp = gc_heap::g_heaps [hn];
        hp->finalize_queue->SetSegForShutDown(fHasLock);
    }

#else //MULTIPLE_HEAPS
    pGenGCHeap->finalize_queue->SetSegForShutDown(fHasLock);
#endif //MULTIPLE_HEAPS
}

//---------------------------------------------------------------------------
// Finalized class tracking
//---------------------------------------------------------------------------

bool GCHeap::RegisterForFinalization (int gen, Object* obj)
{
    if (gen == -1)
        gen = 0;
    if (((((CObjectHeader*)obj)->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN))
    {
        //just reset the bit
        ((CObjectHeader*)obj)->GetHeader()->ClrBit(BIT_SBLK_FINALIZER_RUN);
        return true;
    }
    else
    {
        gc_heap* hp = gc_heap::heap_of ((uint8_t*)obj);
        return hp->finalize_queue->RegisterForFinalization (gen, obj);
    }
}

void GCHeap::SetFinalizationRun (Object* obj)
{
    ((CObjectHeader*)obj)->GetHeader()->SetBit(BIT_SBLK_FINALIZER_RUN);
}


//--------------------------------------------------------------------
//
//          Support for finalization
//
//--------------------------------------------------------------------

inline
unsigned int gen_segment (int gen)
{
    assert (((signed)NUMBERGENERATIONS - gen - 1)>=0);
    return (NUMBERGENERATIONS - gen - 1);
}

bool CFinalize::Initialize()
{
    CONTRACTL {
        NOTHROW;
        GC_NOTRIGGER;
    } CONTRACTL_END;

    m_Array = new (nothrow)(Object*[100]);

    if (!m_Array)
    {
        ASSERT (m_Array);
        STRESS_LOG_OOM_STACK(sizeof(Object*[100]));
        if (GCConfig::GetBreakOnOOM())
        {
            GCToOSInterface::DebugBreak();
        }
        return false;
    }
    m_EndArray = &m_Array[100];

    for (int i =0; i < FreeList; i++)
    {
        SegQueueLimit (i) = m_Array;
    }
    m_PromotedCount = 0;
    lock = -1;
#ifdef _DEBUG
    lockowner_threadid.Clear();
#endif // _DEBUG

    return true;
}

CFinalize::~CFinalize()
{
    delete m_Array;
}

size_t CFinalize::GetPromotedCount ()
{
    return m_PromotedCount;
}

inline
void CFinalize::EnterFinalizeLock()
{
    _ASSERTE(dbgOnly_IsSpecialEEThread() ||
             GCToEEInterface::GetThread() == 0 ||
             GCToEEInterface::IsPreemptiveGCDisabled());

retry:
    if (Interlocked::CompareExchange(&lock, 0, -1) >= 0)
    {
        unsigned int i = 0;
        while (lock >= 0)
        {
            YieldProcessor();           // indicate to the processor that we are spinning
            if (++i & 7)
                GCToOSInterface::YieldThread (0);
            else
                GCToOSInterface::Sleep (5);
        }
        goto retry;
    }

#ifdef _DEBUG
    lockowner_threadid.SetToCurrentThread();
#endif // _DEBUG
}

inline
void CFinalize::LeaveFinalizeLock()
{
    _ASSERTE(dbgOnly_IsSpecialEEThread() ||
             GCToEEInterface::GetThread() == 0 ||
             GCToEEInterface::IsPreemptiveGCDisabled());

#ifdef _DEBUG
    lockowner_threadid.Clear();
#endif // _DEBUG
    lock = -1;
}

bool
CFinalize::RegisterForFinalization (int gen, Object* obj, size_t size)
{
    CONTRACTL {
        NOTHROW;
        GC_NOTRIGGER;
    } CONTRACTL_END;

    EnterFinalizeLock();
    // Adjust gen
    unsigned int dest = 0;

    if (g_fFinalizerRunOnShutDown)
    {
        //no method table available yet,
        //put it in the finalizer queue and sort out when
        //dequeueing
        dest = FinalizerListSeg;
    }

    else
        dest = gen_segment (gen);

    // Adjust boundary for segments so that GC will keep objects alive.
    Object*** s_i = &SegQueue (FreeList);
    if ((*s_i) == m_EndArray)
    {
        if (!GrowArray())
        {
            LeaveFinalizeLock();
            if (method_table(obj) == NULL)
            {
                // If the object is uninitialized, a valid size should have been passed.
                assert (size >= Align (min_obj_size));
                dprintf (3, ("Making unused array [%Ix, %Ix[", (size_t)obj, (size_t)(obj+size)));
                ((CObjectHeader*)obj)->SetFree(size);
            }
            STRESS_LOG_OOM_STACK(0);
            if (GCConfig::GetBreakOnOOM())
            {
                GCToOSInterface::DebugBreak();
            }
            return false;
        }
    }
    Object*** end_si = &SegQueueLimit (dest);
    do
    {
        //is the segment empty?
        if (!(*s_i == *(s_i-1)))
        {
            //no, swap the end elements.
            *(*s_i) = *(*(s_i-1));
        }
        //increment the fill pointer
        (*s_i)++;
        //go to the next segment.
        s_i--;
    } while (s_i > end_si);

    // We have reached the destination segment
    // store the object
    **s_i = obj;
    // increment the fill pointer
    (*s_i)++;

    LeaveFinalizeLock();

    return true;
}

Object*
CFinalize::GetNextFinalizableObject (BOOL only_non_critical)
{
    Object* obj = 0;
    //serialize
    EnterFinalizeLock();

retry:
    if (!IsSegEmpty(FinalizerListSeg))
    {
        if (g_fFinalizerRunOnShutDown)
        {
            obj = *(SegQueueLimit (FinalizerListSeg)-1);
            if (method_table(obj)->HasCriticalFinalizer())
            {
                MoveItem ((SegQueueLimit (FinalizerListSeg)-1),
                          FinalizerListSeg, CriticalFinalizerListSeg);
                goto retry;
            }
            else
                --SegQueueLimit (FinalizerListSeg);
        }
        else
            obj =  *(--SegQueueLimit (FinalizerListSeg));

    }
    else if (!only_non_critical && !IsSegEmpty(CriticalFinalizerListSeg))
    {
        //the FinalizerList is empty, we can adjust both
        // limit instead of moving the object to the free list
        obj =  *(--SegQueueLimit (CriticalFinalizerListSeg));
        --SegQueueLimit (FinalizerListSeg);
    }
    if (obj)
    {
        dprintf (3, ("running finalizer for %Ix (mt: %Ix)", obj, method_table (obj)));
    }
    LeaveFinalizeLock();
    return obj;
}

void
CFinalize::SetSegForShutDown(BOOL fHasLock)
{
    int i;

    if (!fHasLock)
        EnterFinalizeLock();
    for (i = 0; i <= max_generation; i++)
    {
        unsigned int seg = gen_segment (i);
        Object** startIndex = SegQueueLimit (seg)-1;
        Object** stopIndex  = SegQueue (seg);
        for (Object** po = startIndex; po >= stopIndex; po--)
        {
            Object* obj = *po;
            if (method_table(obj)->HasCriticalFinalizer())
            {
                MoveItem (po, seg, CriticalFinalizerListSeg);
            }
            else
            {
                MoveItem (po, seg, FinalizerListSeg);
            }
        }
    }
    if (!fHasLock)
        LeaveFinalizeLock();
}

void
CFinalize::DiscardNonCriticalObjects()
{
    //empty the finalization queue
    Object** startIndex = SegQueueLimit (FinalizerListSeg)-1;
    Object** stopIndex  = SegQueue (FinalizerListSeg);
    for (Object** po = startIndex; po >= stopIndex; po--)
    {
        MoveItem (po, FinalizerListSeg, FreeList);
    }
}

size_t
CFinalize::GetNumberFinalizableObjects()
{
    return SegQueueLimit (FinalizerListSeg) -
        (g_fFinalizerRunOnShutDown ? m_Array : SegQueue(FinalizerListSeg));
}

void
CFinalize::MoveItem (Object** fromIndex,
                     unsigned int fromSeg,
                     unsigned int toSeg)
{

    int step;
    ASSERT (fromSeg != toSeg);
    if (fromSeg > toSeg)
        step = -1;
    else
        step = +1;
    // Place the element at the boundary closest to dest
    Object** srcIndex = fromIndex;
    for (unsigned int i = fromSeg; i != toSeg; i+= step)
    {
        Object**& destFill = m_FillPointers[i+(step - 1 )/2];
        Object** destIndex = destFill - (step + 1)/2;
        if (srcIndex != destIndex)
        {
            Object* tmp = *srcIndex;
            *srcIndex = *destIndex;
            *destIndex = tmp;
        }
        destFill -= step;
        srcIndex = destIndex;
    }
}

void
CFinalize::GcScanRoots (promote_func* fn, int hn, ScanContext *pSC)
{
    ScanContext sc;
    if (pSC == 0)
        pSC = &sc;

    pSC->thread_number = hn;

    //scan the finalization queue
    Object** startIndex  = SegQueue (CriticalFinalizerListSeg);
    Object** stopIndex  = SegQueueLimit (FinalizerListSeg);

    for (Object** po = startIndex; po < stopIndex; po++)
    {
        Object* o = *po;
        //dprintf (3, ("scan freacheable %Ix", (size_t)o));
        dprintf (3, ("scan f %Ix", (size_t)o));

        (*fn)(po, pSC, 0);
    }
}

void CFinalize::WalkFReachableObjects (fq_walk_fn fn)
{
    Object** startIndex = SegQueue (CriticalFinalizerListSeg);
    Object** stopCriticalIndex = SegQueueLimit (CriticalFinalizerListSeg);
    Object** stopIndex  = SegQueueLimit (FinalizerListSeg);
    for (Object** po = startIndex; po < stopIndex; po++)
    {
        //report *po
        fn(po < stopCriticalIndex, *po);
    }
}

BOOL
CFinalize::ScanForFinalization (promote_func* pfn, int gen, BOOL mark_only_p,
                                gc_heap* hp)
{
    ScanContext sc;
    sc.promotion = TRUE;
#ifdef MULTIPLE_HEAPS
    sc.thread_number = hp->heap_number;
#else
    UNREFERENCED_PARAMETER(hp);
#endif //MULTIPLE_HEAPS

    BOOL finalizedFound = FALSE;

    //start with gen and explore all the younger generations.
    unsigned int startSeg = gen_segment (gen);
    {
        m_PromotedCount = 0;
        for (unsigned int Seg = startSeg; Seg <= gen_segment(0); Seg++)
        {
            Object** endIndex = SegQueue (Seg);
            for (Object** i = SegQueueLimit (Seg)-1; i >= endIndex ;i--)
            {
                CObjectHeader* obj = (CObjectHeader*)*i;
                dprintf (3, ("scanning: %Ix", (size_t)obj));
                if (!g_theGCHeap->IsPromoted (obj))
                {
                    dprintf (3, ("freacheable: %Ix", (size_t)obj));

                    assert (method_table(obj)->HasFinalizer());

                    if (GCToEEInterface::EagerFinalized(obj))
                    {
                        MoveItem (i, Seg, FreeList);
                    }
                    else if ((obj->GetHeader()->GetBits()) & BIT_SBLK_FINALIZER_RUN)
                    {
                        //remove the object because we don't want to
                        //run the finalizer

                        MoveItem (i, Seg, FreeList);

                        //Reset the bit so it will be put back on the queue
                        //if resurrected and re-registered.
                        obj->GetHeader()->ClrBit (BIT_SBLK_FINALIZER_RUN);

                    }
                    else
                    {
                        m_PromotedCount++;

                        if (method_table(obj)->HasCriticalFinalizer())
                        {
                            MoveItem (i, Seg, CriticalFinalizerListSeg);
                        }
                        else
                        {
                            MoveItem (i, Seg, FinalizerListSeg);
                        }
                    }
                }
#ifdef BACKGROUND_GC
                else
                {
                    if ((gen == max_generation) && (recursive_gc_sync::background_running_p()))
                    {
                        // TODO - fix the following line.
                        //assert (gc_heap::background_object_marked ((uint8_t*)obj, FALSE));
                        dprintf (3, ("%Ix is marked", (size_t)obj));
                    }
                }
#endif //BACKGROUND_GC
            }
        }
    }
    finalizedFound = !IsSegEmpty(FinalizerListSeg) ||
                     !IsSegEmpty(CriticalFinalizerListSeg);

    if (finalizedFound)
    {
        //Promote the f-reachable objects
        GcScanRoots (pfn,
#ifdef MULTIPLE_HEAPS
                     hp->heap_number
#else
                     0
#endif //MULTIPLE_HEAPS
                     , 0);

        hp->settings.found_finalizers = TRUE;

#ifdef BACKGROUND_GC
        if (hp->settings.concurrent)
        {
            hp->settings.found_finalizers = !(IsSegEmpty(FinalizerListSeg) && IsSegEmpty(CriticalFinalizerListSeg));
        }
#endif //BACKGROUND_GC
        if (hp->settings.concurrent && hp->settings.found_finalizers)
        {
            if (!mark_only_p)
                GCToEEInterface::EnableFinalization(true);
        }
    }

    return finalizedFound;
}

//Relocates all of the objects in the finalization array
void
CFinalize::RelocateFinalizationData (int gen, gc_heap* hp)
{
    ScanContext sc;
    sc.promotion = FALSE;
#ifdef MULTIPLE_HEAPS
    sc.thread_number = hp->heap_number;
#else
    UNREFERENCED_PARAMETER(hp);
#endif //MULTIPLE_HEAPS

    unsigned int Seg = gen_segment (gen);

    Object** startIndex = SegQueue (Seg);
    for (Object** po = startIndex; po < SegQueue (FreeList);po++)
    {
        GCHeap::Relocate (po, &sc);
    }
}

void
CFinalize::UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p)
{
    // update the generation fill pointers.
    // if gen_0_empty is FALSE, test each object to find out if
    // it was promoted or not
    if (gen_0_empty_p)
    {
        for (int i = min (gen+1, max_generation); i > 0; i--)
        {
            m_FillPointers [gen_segment(i)] = m_FillPointers [gen_segment(i-1)];
        }
    }
    else
    {
        //Look for demoted or promoted plugs

        for (int i = gen; i >= 0; i--)
        {
            unsigned int Seg = gen_segment (i);
            Object** startIndex = SegQueue (Seg);

            for (Object** po = startIndex;
                 po < SegQueueLimit (gen_segment(i)); po++)
            {
                int new_gen = g_theGCHeap->WhichGeneration (*po);
                if (new_gen != i)
                {
                    if (new_gen > i)
                    {
                        //promotion
                        MoveItem (po, gen_segment (i), gen_segment (new_gen));
                    }
                    else
                    {
                        //demotion
                        MoveItem (po, gen_segment (i), gen_segment (new_gen));
                        //back down in order to see all objects.
                        po--;
                    }
                }

            }
        }
    }
}

BOOL
CFinalize::GrowArray()
{
    size_t oldArraySize = (m_EndArray - m_Array);
    size_t newArraySize =  (size_t)(((float)oldArraySize / 10) * 12);

    Object** newArray = new (nothrow) Object*[newArraySize];
    if (!newArray)
    {
        // It's not safe to throw here, because of the FinalizeLock.  Tell our caller
        // to throw for us.
//        ASSERT (newArray);
        return FALSE;
    }
    memcpy (newArray, m_Array, oldArraySize*sizeof(Object*));

    //adjust the fill pointers
    for (int i = 0; i < FreeList; i++)
    {
        m_FillPointers [i] += (newArray - m_Array);
    }
    delete m_Array;
    m_Array = newArray;
    m_EndArray = &m_Array [newArraySize];

    return TRUE;
}

#ifdef VERIFY_HEAP
void CFinalize::CheckFinalizerObjects()
{
    for (int i = 0; i <= max_generation; i++)
    {
        Object **startIndex = SegQueue (gen_segment (i));
        Object **stopIndex  = SegQueueLimit (gen_segment (i));

        for (Object **po = startIndex; po < stopIndex; po++)
        {
            if ((int)g_theGCHeap->WhichGeneration (*po) < i)
                FATAL_GC_ERROR ();
            ((CObjectHeader*)*po)->Validate();
        }
    }
}
#endif //VERIFY_HEAP

#endif // FEATURE_PREMORTEM_FINALIZATION


//------------------------------------------------------------------------------
//
//                      End of VM specific support
//
//------------------------------------------------------------------------------
void gc_heap::walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
{
    generation* gen = gc_heap::generation_of (gen_number);
    heap_segment*    seg = generation_start_segment (gen);
    uint8_t*       x = ((gen_number == max_generation) ? heap_segment_mem (seg) :
                     generation_allocation_start (gen));

    uint8_t*       end = heap_segment_allocated (seg);
    BOOL small_object_segments = TRUE;
    int align_const = get_alignment_constant (small_object_segments);

    while (1)

    {
        if (x >= end)
        {
            if ((seg = heap_segment_next (seg)) != 0)
            {
                x = heap_segment_mem (seg);
                end = heap_segment_allocated (seg);
                continue;
            }
            else
            {
                if (small_object_segments && walk_large_object_heap_p)

                {
                    small_object_segments = FALSE;
                    align_const = get_alignment_constant (small_object_segments);
                    seg = generation_start_segment (large_object_generation);
                    x = heap_segment_mem (seg);
                    end = heap_segment_allocated (seg);
                    continue;
                }
                else
                {
                    break;
                }
            }
        }

        size_t s = size (x);
        CObjectHeader* o = (CObjectHeader*)x;

        if (!o->IsFree())

        {
            _ASSERTE(((size_t)o & 0x3) == 0); // Last two bits should never be set at this point

            if (!fn (o->GetObjectBase(), context))
                return;
        }
        x = x + Align (s, align_const);
    }
}

void gc_heap::walk_finalize_queue (fq_walk_fn fn)
{
#ifdef FEATURE_PREMORTEM_FINALIZATION
    finalize_queue->WalkFReachableObjects (fn);
#endif //FEATURE_PREMORTEM_FINALIZATION
}

void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p)
{
#ifdef MULTIPLE_HEAPS
    for (int hn = 0; hn < gc_heap::n_heaps; hn++)
    {
        gc_heap* hp = gc_heap::g_heaps [hn];

        hp->walk_heap_per_heap (fn, context, gen_number, walk_large_object_heap_p);
    }
#else
    walk_heap_per_heap(fn, context, gen_number, walk_large_object_heap_p);
#endif //MULTIPLE_HEAPS
}

void GCHeap::DiagWalkObject (Object* obj, walk_fn fn, void* context)
{
    uint8_t* o = (uint8_t*)obj;
    if (o)
    {
        go_through_object_cl (method_table (o), o, size(o), oo,
                                    {
                                        if (*oo)
                                        {
                                            Object *oh = (Object*)*oo;
                                            if (!fn (oh, context))
                                                return;
                                        }
                                    }
            );
    }
}

void GCHeap::DiagWalkObject2 (Object* obj, walk_fn2 fn, void* context)
{
    uint8_t* o = (uint8_t*)obj;
    if (o)
    {
        go_through_object_cl (method_table (o), o, size(o), oo,
                                    {
                                        if (*oo)
                                        {
                                            if (!fn (obj, oo, context))
                                                return;
                                        }
                                    }
            );
    }
}

void GCHeap::DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, void* diag_context, walk_surv_type type)
{
    gc_heap* hp = (gc_heap*)gc_context;
    hp->walk_survivors (fn, diag_context, type);
}

void GCHeap::DiagWalkHeap (walk_fn fn, void* context, int gen_number, bool walk_large_object_heap_p)
{
    gc_heap::walk_heap (fn, context, gen_number, walk_large_object_heap_p);
}

void GCHeap::DiagWalkFinalizeQueue (void* gc_context, fq_walk_fn fn)
{
    gc_heap* hp = (gc_heap*)gc_context;
    hp->walk_finalize_queue (fn);
}

void GCHeap::DiagScanFinalizeQueue (fq_scan_fn fn, ScanContext* sc)
{
#ifdef MULTIPLE_HEAPS
    for (int hn = 0; hn < gc_heap::n_heaps; hn++)
    {
        gc_heap* hp = gc_heap::g_heaps [hn];
        hp->finalize_queue->GcScanRoots(fn, hn, sc);
    }
#else
        pGenGCHeap->finalize_queue->GcScanRoots(fn, 0, sc);
#endif //MULTIPLE_HEAPS
}

void GCHeap::DiagScanHandles (handle_scan_fn fn, int gen_number, ScanContext* context)
{
    UNREFERENCED_PARAMETER(gen_number);
    GCScan::GcScanHandlesForProfilerAndETW (max_generation, context, fn);
}

void GCHeap::DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context)
{
    UNREFERENCED_PARAMETER(gen_number);
    GCScan::GcScanDependentHandlesForProfilerAndETW (max_generation, context, fn);
}

// Go through and touch (read) each page straddled by a memory block.
void TouchPages(void * pStart, size_t cb)
{
    const size_t pagesize = OS_PAGE_SIZE;
    _ASSERTE(0 == (pagesize & (pagesize-1))); // Must be a power of 2.
    if (cb)
    {
        VOLATILE(char)* pEnd = (VOLATILE(char)*)(cb + (char*)pStart);
        VOLATILE(char)* p = (VOLATILE(char)*)(((char*)pStart) -  (((size_t)pStart) & (pagesize-1)));
        while (p < pEnd)
        {
            char a;
            a = VolatileLoad(p);
            //printf("Touching page %lxh\n", (uint32_t)p);
            p += pagesize;
        }
    }
}

#if defined(WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
    // This code is designed to catch the failure to update the write barrier
    // The way it works is to copy the whole heap right after every GC.  The write
    // barrier code has been modified so that it updates the shadow as well as the
    // real GC heap.  Before doing the next GC, we walk the heap, looking for pointers
    // that were updated in the real heap, but not the shadow.  A mismatch indicates
    // an error.  The offending code can be found by breaking after the correct GC,
    // and then placing a data breakpoint on the Heap location that was updated without
    // going through the write barrier.

    // Called at process shutdown
void deleteGCShadow()
{
    if (g_GCShadow != 0)
        GCToOSInterface::VirtualRelease (g_GCShadow, g_GCShadowEnd - g_GCShadow);
    g_GCShadow = 0;
    g_GCShadowEnd = 0;
}

    // Called at startup and right after a GC, get a snapshot of the GC Heap
void initGCShadow()
{
    if (!(GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_BARRIERCHECK))
        return;

    size_t len = g_gc_highest_address - g_gc_lowest_address;
    if (len > (size_t)(g_GCShadowEnd - g_GCShadow))
    {
        deleteGCShadow();
        g_GCShadowEnd = g_GCShadow = (uint8_t *)GCToOSInterface::VirtualReserve(len, 0, VirtualReserveFlags::None);
        if (g_GCShadow == NULL || !GCToOSInterface::VirtualCommit(g_GCShadow, len))
        {
            _ASSERTE(!"Not enough memory to run HeapVerify level 2");
            // If after the assert we decide to allow the program to continue
            // running we need to be in a state that will not trigger any
            // additional AVs while we fail to allocate a shadow segment, i.e.
            // ensure calls to updateGCShadow() checkGCWriteBarrier() don't AV
            deleteGCShadow();
            return;
        }

        g_GCShadowEnd += len;
    }

    // save the value of g_gc_lowest_address at this time.  If this value changes before
    // the next call to checkGCWriteBarrier() it means we extended the heap (with a
    // large object segment most probably), and the whole shadow segment is inconsistent.
    g_shadow_lowest_address = g_gc_lowest_address;

        //****** Copy the whole GC heap ******
    //
    // NOTE: This is the one situation where the combination of heap_segment_rw(gen_start_segment())
    // can produce a NULL result.  This is because the initialization has not completed.
    //
    generation* gen = gc_heap::generation_of (max_generation);
    heap_segment* seg = heap_segment_rw (generation_start_segment (gen));

    ptrdiff_t delta = g_GCShadow - g_gc_lowest_address;
    BOOL small_object_segments = TRUE;
    while(1)
    {
        if (!seg)
        {
            if (small_object_segments)
            {
                small_object_segments = FALSE;
                seg = heap_segment_rw (generation_start_segment (gc_heap::generation_of (max_generation+1)));
                continue;
            }
            else
                break;
        }
            // Copy the segment
        uint8_t* start = heap_segment_mem(seg);
        uint8_t* end = heap_segment_allocated (seg);
        memcpy(start + delta, start, end - start);
        seg = heap_segment_next_rw (seg);
    }
}

#define INVALIDGCVALUE (void*)((size_t)0xcccccccd)

    // test to see if 'ptr' was only updated via the write barrier.
inline void testGCShadow(Object** ptr)
{
    Object** shadow = (Object**) &g_GCShadow[((uint8_t*) ptr - g_gc_lowest_address)];
    if (*ptr != 0 && (uint8_t*) shadow < g_GCShadowEnd && *ptr != *shadow)
    {

        // If you get this assertion, someone updated a GC pointer in the heap without
        // using the write barrier.  To find out who, check the value of
        // dd_collection_count (dynamic_data_of (0)). Also
        // note the value of 'ptr'.  Rerun the App that the previous GC just occurred.
        // Then put a data breakpoint for the value of 'ptr'  Then check every write
        // to pointer between the two GCs.  The last one is not using the write barrier.

        // If the memory of interest does not exist at system startup,
        // you need to set the data breakpoint right after the memory gets committed
        // Set a breakpoint at the end of grow_heap_segment, and put the value of 'ptr'
        // in the memory window.  run until the memory gets mapped. Then you can set
        // your breakpoint

        // Note a recent change, we've identified race conditions when updating the gc shadow.
        // Throughout the runtime, code will update an address in the gc heap, then erect the
        // write barrier, which calls updateGCShadow. With an app that pounds one heap location
        // from multiple threads, you can hit this assert even though all involved are using the
        // write barrier properly. Thusly, we detect the race and set this location to INVALIDGCVALUE.
        // TODO: the code in jithelp.asm doesn't call updateGCShadow, and hasn't been
        // TODO: fixed to detect the race. We've only seen this race from VolatileWritePtr,
        // TODO: so elect not to fix jithelp.asm at this time. It should be done if we start hitting
        // TODO: erroneous asserts in here.

        if(*shadow!=INVALIDGCVALUE)
        {
#ifdef FEATURE_BASICFREEZE
            // Write barriers for stores of references to frozen objects may be optimized away.
            if (!gc_heap::frozen_object_p(*ptr))
#endif // FEATURE_BASICFREEZE
            {
                _ASSERTE(!"Pointer updated without using write barrier");
            }
        }
        /*
        else
        {
             printf("saw a INVALIDGCVALUE. (just to let you know)\n");
        }
        */
    }
}

void testGCShadowHelper (uint8_t* x)
{
    size_t s = size (x);
    if (contain_pointers (x))
    {
        go_through_object_nostart (method_table(x), x, s, oo,
                           { testGCShadow((Object**) oo); });
    }
}

    // Walk the whole heap, looking for pointers that were not updated with the write barrier.
void checkGCWriteBarrier()
{
    // g_shadow_lowest_address != g_gc_lowest_address means the GC heap was extended by a segment
    // and the GC shadow segment did not track that change!
    if (g_GCShadowEnd <= g_GCShadow || g_shadow_lowest_address != g_gc_lowest_address)
    {
        // No shadow stack, nothing to check.
        return;
    }

    {
        generation* gen = gc_heap::generation_of (max_generation);
        heap_segment* seg = heap_segment_rw (generation_start_segment (gen));

        PREFIX_ASSUME(seg != NULL);

        while(seg)
        {
            uint8_t* x = heap_segment_mem(seg);
            while (x < heap_segment_allocated (seg))
            {
                size_t s = size (x);
                testGCShadowHelper (x);
                x = x + Align (s);
            }
            seg = heap_segment_next_rw (seg);
        }
    }

    {
        // go through large object heap
        int alignment = get_alignment_constant(FALSE);
        generation* gen = gc_heap::generation_of (max_generation+1);
        heap_segment* seg = heap_segment_rw (generation_start_segment (gen));

        PREFIX_ASSUME(seg != NULL);

        while(seg)
        {
            uint8_t* x = heap_segment_mem(seg);
            while (x < heap_segment_allocated (seg))
            {
                size_t s = size (x);
                testGCShadowHelper (x);
                x = x + Align (s, alignment);
            }
            seg = heap_segment_next_rw (seg);
        }
    }
}
#endif //WRITE_BARRIER_CHECK && !SERVER_GC

#endif // !DACCESS_COMPILE

#ifdef FEATURE_BASICFREEZE
void gc_heap::walk_read_only_segment(heap_segment *seg, void *pvContext, object_callback_func pfnMethodTable, object_callback_func pfnObjRef)
{
#ifdef DACCESS_COMPILE
    UNREFERENCED_PARAMETER(seg);
    UNREFERENCED_PARAMETER(pvContext);
    UNREFERENCED_PARAMETER(pfnMethodTable);
    UNREFERENCED_PARAMETER(pfnObjRef);
#else
    uint8_t *o = heap_segment_mem(seg);

    // small heap alignment constant
    int alignment = get_alignment_constant(TRUE);

    while (o < heap_segment_allocated(seg))
    {
        pfnMethodTable(pvContext, o);

        if (contain_pointers (o))
        {
            go_through_object_nostart (method_table (o), o, size(o), oo,
                   {
                       if (*oo)
                           pfnObjRef(pvContext, oo);
                   }
            );
        }

        o += Align(size(o), alignment);
    }
#endif //!DACCESS_COMPILE
}
#endif // FEATURE_BASICFREEZE

#ifndef DACCESS_COMPILE
HRESULT GCHeap::WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout)
{
#ifdef BACKGROUND_GC
    if (recursive_gc_sync::background_running_p())
    {
        uint32_t dwRet = pGenGCHeap->background_gc_wait(awr_ignored, millisecondsTimeout);
        if (dwRet == WAIT_OBJECT_0)
            return S_OK;
        else if (dwRet == WAIT_TIMEOUT)
            return HRESULT_FROM_WIN32(ERROR_TIMEOUT);
        else
            return E_FAIL;      // It is not clear if what the last error would be if the wait failed,
                                // as there are too many layers in between. The best we can do is to return E_FAIL;
    }
#endif

    return S_OK;
}
#endif // !DACCESS_COMPILE

void GCHeap::TemporaryEnableConcurrentGC()
{
#ifdef BACKGROUND_GC
    gc_heap::temp_disable_concurrent_p = false;
#endif //BACKGROUND_GC
}

void GCHeap::TemporaryDisableConcurrentGC()
{
#ifdef BACKGROUND_GC
    gc_heap::temp_disable_concurrent_p = true;
#endif //BACKGROUND_GC
}

bool GCHeap::IsConcurrentGCEnabled()
{
#ifdef BACKGROUND_GC
    return (gc_heap::gc_can_use_concurrent && !(gc_heap::temp_disable_concurrent_p));
#else
    return FALSE;
#endif //BACKGROUND_GC
}

void GCHeap::SetFinalizeRunOnShutdown(bool value)
{
    g_fFinalizerRunOnShutDown = value;
}

void PopulateDacVars(GcDacVars *gcDacVars)
{
#ifndef DACCESS_COMPILE
    assert(gcDacVars != nullptr);
    *gcDacVars = {};
    gcDacVars->major_version_number = 1;
    gcDacVars->minor_version_number = 0;
    gcDacVars->built_with_svr = &g_built_with_svr_gc;
    gcDacVars->build_variant = &g_build_variant;
    gcDacVars->gc_structures_invalid_cnt = const_cast<int32_t*>(&GCScan::m_GcStructuresInvalidCnt);
    gcDacVars->generation_size = sizeof(generation);
    gcDacVars->max_gen = &g_max_generation;
#ifndef MULTIPLE_HEAPS
    gcDacVars->mark_array = &gc_heap::mark_array;
    gcDacVars->ephemeral_heap_segment = reinterpret_cast<dac_heap_segment**>(&gc_heap::ephemeral_heap_segment);
    gcDacVars->current_c_gc_state = const_cast<c_gc_state*>(&gc_heap::current_c_gc_state);
    gcDacVars->saved_sweep_ephemeral_seg = reinterpret_cast<dac_heap_segment**>(&gc_heap::saved_sweep_ephemeral_seg);
    gcDacVars->saved_sweep_ephemeral_start = &gc_heap::saved_sweep_ephemeral_start;
    gcDacVars->background_saved_lowest_address = &gc_heap::background_saved_lowest_address;
    gcDacVars->background_saved_highest_address = &gc_heap::background_saved_highest_address;
    gcDacVars->alloc_allocated = &gc_heap::alloc_allocated;
    gcDacVars->next_sweep_obj = &gc_heap::next_sweep_obj;
    gcDacVars->oom_info = &gc_heap::oom_info;
    gcDacVars->finalize_queue = reinterpret_cast<dac_finalize_queue**>(&gc_heap::finalize_queue);
    gcDacVars->generation_table = reinterpret_cast<dac_generation**>(&gc_heap::generation_table);
#ifdef GC_CONFIG_DRIVEN
    gcDacVars->gc_global_mechanisms = reinterpret_cast<size_t**>(&gc_global_mechanisms);
    gcDacVars->interesting_data_per_heap = reinterpret_cast<size_t**>(&gc_heap::interesting_data_per_heap);
    gcDacVars->compact_reasons_per_heap = reinterpret_cast<size_t**>(&gc_heap::compact_reasons_per_heap);
    gcDacVars->expand_mechanisms_per_heap = reinterpret_cast<size_t**>(&gc_heap::expand_mechanisms_per_heap);
    gcDacVars->interesting_mechanism_bits_per_heap = reinterpret_cast<size_t**>(&gc_heap::interesting_mechanism_bits_per_heap);
#endif // GC_CONFIG_DRIVEN
#ifdef HEAP_ANALYZE
    gcDacVars->internal_root_array = &gc_heap::internal_root_array;
    gcDacVars->internal_root_array_index = &gc_heap::internal_root_array_index;
    gcDacVars->heap_analyze_success = &gc_heap::heap_analyze_success;
#endif // HEAP_ANALYZE
#else
    gcDacVars->n_heaps = &gc_heap::n_heaps;
    gcDacVars->g_heaps = reinterpret_cast<dac_gc_heap***>(&gc_heap::g_heaps);
#endif // MULTIPLE_HEAPS
#else
    UNREFERENCED_PARAMETER(gcDacVars);
#endif // DACCESS_COMPILE
}