46 STATS xht_stats (0, 128);
51 row_it.set_to_list (block->
get_rows ());
60 for (row_it.mark_cycle_pt (); !row_it.cycled_list (); row_it.forward ()) {
66 start_of_row = blob_it.data ()->bounding_box ().left ();
67 end_of_row = blob_it.data_relative (-1)->bounding_box ().right ();
68 if (min_left > start_of_row)
69 min_left = start_of_row;
70 if (max_right < end_of_row)
71 max_right = end_of_row;
74 if ((total_rows < 3) || (min_left >= max_right)) {
76 min_left = max_right = 0;
79 bucket_size = (
inT16) floor (xht_stats.median () + 0.5) / 2;
80 map_max = (max_right - min_left) / bucket_size;
82 for (i = 0; i <= map_max; i++)
85 for (row_it.mark_cycle_pt (); !row_it.cycled_list (); row_it.forward ()) {
89 blob_it.mark_cycle_pt ();
91 prev_blob_box = blob_box;
94 gap_width = blob_box.
left () - min_left;
97 max_quantum = (blob_box.
left () - min_left) / bucket_size;
98 if (max_quantum > map_max) max_quantum = map_max;
99 for (i = 0; i <= max_quantum; i++)
103 while (!blob_it.cycled_list ()) {
105 gap_width = blob_box.
left () - prev_blob_box.
right ();
109 (prev_blob_box.
right () - min_left) / bucket_size;
110 max_quantum = (blob_box.
left () - min_left) / bucket_size;
111 if (max_quantum > map_max) max_quantum = map_max;
112 for (i = min_quantum; i <= max_quantum; i++)
115 prev_blob_box = blob_box;
119 gap_width = max_right - prev_blob_box.
right ();
123 (prev_blob_box.
right () - min_left) / bucket_size;
124 if (min_quantum < 0) min_quantum = 0;
125 for (i = min_quantum; i <= map_max; i++)
131 for (i = 0; i <= map_max; i++) {
132 if (map[i] > total_rows / 2) {
135 (map[i + 1] <= total_rows / 2)) ||
137 (map[i - 1] <= total_rows / 2)) ||
140 (map[i - 1] <= total_rows / 2) &&
141 (map[i + 1] <= total_rows / 2)))) {
EXTERN bool gapmap_use_ends
BLOBNBOX_LIST * blob_list()
EXTERN bool gapmap_no_isolated_quanta
void * alloc_mem(inT32 count)
TBOX box_next(BLOBNBOX_IT *it)
EXTERN double gapmap_big_gaps