Add some CHECK_FOR_INTERRUPTS() calls to the heap-sort call path.

I broke this in commit 337b6f5ecf, which
among other things arranged for quicksorts to CHECK_FOR_INTERRUPTS()
slightly less frequently.  Sadly, it also arranged for heapsorts to
CHECK_FOR_INTERRUPTS() much less frequently.  Repair.
This commit is contained in:
Robert Haas 2012-03-20 21:00:11 -04:00
parent 64c604898e
commit aefa6d163e
1 changed files with 7 additions and 0 deletions

View File

@ -1168,6 +1168,7 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple)
{ {
/* new tuple <= top of the heap, so we can discard it */ /* new tuple <= top of the heap, so we can discard it */
free_sort_tuple(state, tuple); free_sort_tuple(state, tuple);
CHECK_FOR_INTERRUPTS();
} }
else else
{ {
@ -2431,6 +2432,7 @@ make_bounded_heap(Tuplesortstate *state)
{ {
/* New tuple would just get thrown out, so skip it */ /* New tuple would just get thrown out, so skip it */
free_sort_tuple(state, &state->memtuples[i]); free_sort_tuple(state, &state->memtuples[i]);
CHECK_FOR_INTERRUPTS();
} }
else else
{ {
@ -2518,6 +2520,8 @@ tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple,
memtuples = state->memtuples; memtuples = state->memtuples;
Assert(state->memtupcount < state->memtupsize); Assert(state->memtupcount < state->memtupsize);
CHECK_FOR_INTERRUPTS();
/* /*
* Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
* using 1-based array indexes, not 0-based. * using 1-based array indexes, not 0-based.
@ -2549,6 +2553,9 @@ tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex)
if (--state->memtupcount <= 0) if (--state->memtupcount <= 0)
return; return;
CHECK_FOR_INTERRUPTS();
n = state->memtupcount; n = state->memtupcount;
tuple = &memtuples[n]; /* tuple that must be reinserted */ tuple = &memtuples[n]; /* tuple that must be reinserted */
i = 0; /* i is where the "hole" is */ i = 0; /* i is where the "hole" is */