Kstars

stretch.cpp
1/* Stretch
2
3 SPDX-License-Identifier: GPL-2.0-or-later
4*/
5
6#include "stretch.h"
7
8#include <fitsio.h>
9#include <math.h>
10#include <QtConcurrent>
11
12namespace
13{
14
15// Returns the median value of the vector.
16// The vector is modified in an undefined way.
17template <typename T>
18T median(std::vector<T> &values)
19{
20 const int middle = values.size() / 2;
21 std::nth_element(values.begin(), values.begin() + middle, values.end());
22 return values[middle];
23}
24
25// Returns the rough max of the buffer.
26template <typename T>
27T sampledMax(T const *values, int size, int sampleBy)
28{
29 T maxVal = 0;
30 for (int i = 0; i < size; i += sampleBy)
31 if (maxVal < values[i])
32 maxVal = values[i];
33 return maxVal;
34}
35
36// Returns the median of the sample values.
37// The values are not modified.
38template <typename T>
39T median(T const *values, int size, int sampleBy)
40{
41 const int downsampled_size = size / sampleBy;
42 std::vector<T> samples(downsampled_size);
43 for (int index = 0, i = 0; i < downsampled_size; ++i, index += sampleBy)
44 samples[i] = values[index];
45 return median(samples);
46}
47
48// This stretches one channel given the input parameters.
49// Based on the spec in section 8.5.6
50// https://pixinsight.com/doc/docs/XISF-1.0-spec/XISF-1.0-spec.html
51// Uses multiple threads, blocks until done.
52// The extension parameters are not used.
53// Sampling is applied to the output (that is, with sampling=2, we compute every other output
54// sample both in width and height, so the output would have about 4X fewer pixels.
55template <typename T>
56void stretchOneChannel(T *input_buffer, QImage *output_image,
57 const StretchParams &stretch_params,
58 int input_range, int image_height, int image_width, int sampling)
59{
60 QVector<QFuture<void>> futures;
61
62 // We're outputting uint8, so the max output is 255.
63 constexpr int maxOutput = 255;
64
65 // Maximum possible input value (e.g. 1024*64 - 1 for a 16 bit unsigned int).
66 const float maxInput = input_range > 1 ? input_range - 1 : input_range;
67
68 const float midtones = stretch_params.grey_red.midtones;
69 const float highlights = stretch_params.grey_red.highlights;
70 const float shadows = stretch_params.grey_red.shadows;
71
72 // Precomputed expressions moved out of the loop.
73 // highlights - shadows, protecting for divide-by-0, in a 0->1.0 scale.
74 const float hsRangeFactor = highlights == shadows ? 1.0f : 1.0f / (highlights - shadows);
75 // Shadow and highlight values translated to the ADU scale.
76 const T nativeShadows = shadows * maxInput;
77 const T nativeHighlights = highlights * maxInput;
78 // Constants based on above needed for the stretch calculations.
79 const float k1 = (midtones - 1) * hsRangeFactor * maxOutput / maxInput;
80 const float k2 = ((2 * midtones) - 1) * hsRangeFactor / maxInput;
81
82 // Increment the input index by the sampling, the output index increments by 1.
83 for (int j = 0, jout = 0; j < image_height; j += sampling, jout++)
84 {
85 futures.append(QtConcurrent::run([ = ]()
86 {
87 T * inputLine = input_buffer + j * image_width;
88 auto * scanLine = output_image->scanLine(jout);
89
90 for (int i = 0, iout = 0; i < image_width; i += sampling, iout++)
91 {
92 const T input = inputLine[i];
93 if (input < nativeShadows) scanLine[iout] = 0;
94 else if (input >= nativeHighlights) scanLine[iout] = maxOutput;
95 else
96 {
97 const T inputFloored = (input - nativeShadows);
98 scanLine[iout] = (inputFloored * k1) / (inputFloored * k2 - midtones);
99 }
100 }
101 }));
102 }
103 for(QFuture<void> future : futures)
104 future.waitForFinished();
105}
106
107// This is like the above 1-channel stretch, but extended for 3 channels.
108// This could have been more modular, but the three channels are combined
109// into a single qRgb value at the end, so it seems the simplest thing is to
110// replicate the code. It is assume the colors are not interleaved--the red image
111// is stored fully, then the green, then the blue.
112// Sampling is applied to the output (that is, with sampling=2, we compute every other output
113// sample both in width and height, so the output would have about 4X fewer pixels.
114template <typename T>
115void stretchThreeChannels(T *inputBuffer, QImage *outputImage,
116 const StretchParams &stretchParams,
117 int inputRange, int imageHeight, int imageWidth, int sampling)
118{
119 QVector<QFuture<void>> futures;
120
121 // We're outputting uint8, so the max output is 255.
122 constexpr int maxOutput = 255;
123
124 // Maximum possible input value (e.g. 1024*64 - 1 for a 16 bit unsigned int).
125 const float maxInput = inputRange > 1 ? inputRange - 1 : inputRange;
126
127 const float midtonesR = stretchParams.grey_red.midtones;
128 const float highlightsR = stretchParams.grey_red.highlights;
129 const float shadowsR = stretchParams.grey_red.shadows;
130 const float midtonesG = stretchParams.green.midtones;
131 const float highlightsG = stretchParams.green.highlights;
132 const float shadowsG = stretchParams.green.shadows;
133 const float midtonesB = stretchParams.blue.midtones;
134 const float highlightsB = stretchParams.blue.highlights;
135 const float shadowsB = stretchParams.blue.shadows;
136
137 // Precomputed expressions moved out of the loop.
138 // highlights - shadows, protecting for divide-by-0, in a 0->1.0 scale.
139 const float hsRangeFactorR = highlightsR == shadowsR ? 1.0f : 1.0f / (highlightsR - shadowsR);
140 const float hsRangeFactorG = highlightsG == shadowsG ? 1.0f : 1.0f / (highlightsG - shadowsG);
141 const float hsRangeFactorB = highlightsB == shadowsB ? 1.0f : 1.0f / (highlightsB - shadowsB);
142 // Shadow and highlight values translated to the ADU scale.
143 const T nativeShadowsR = shadowsR * maxInput;
144 const T nativeShadowsG = shadowsG * maxInput;
145 const T nativeShadowsB = shadowsB * maxInput;
146 const T nativeHighlightsR = highlightsR * maxInput;
147 const T nativeHighlightsG = highlightsG * maxInput;
148 const T nativeHighlightsB = highlightsB * maxInput;
149 // Constants based on above needed for the stretch calculations.
150 const float k1R = (midtonesR - 1) * hsRangeFactorR * maxOutput / maxInput;
151 const float k1G = (midtonesG - 1) * hsRangeFactorG * maxOutput / maxInput;
152 const float k1B = (midtonesB - 1) * hsRangeFactorB * maxOutput / maxInput;
153 const float k2R = ((2 * midtonesR) - 1) * hsRangeFactorR / maxInput;
154 const float k2G = ((2 * midtonesG) - 1) * hsRangeFactorG / maxInput;
155 const float k2B = ((2 * midtonesB) - 1) * hsRangeFactorB / maxInput;
156
157 const int size = imageWidth * imageHeight;
158
159 for (int j = 0, jout = 0; j < imageHeight; j += sampling, jout++)
160 {
161 futures.append(QtConcurrent::run([ = ]()
162 {
163 // R, G, B input images are stored one after another.
164 T * inputLineR = inputBuffer + j * imageWidth;
165 T * inputLineG = inputLineR + size;
166 T * inputLineB = inputLineG + size;
167
168 auto * scanLine = reinterpret_cast<QRgb*>(outputImage->scanLine(jout));
169
170 for (int i = 0, iout = 0; i < imageWidth; i += sampling, iout++)
171 {
172 const T inputR = inputLineR[i];
173 const T inputG = inputLineG[i];
174 const T inputB = inputLineB[i];
175
176 uint8_t red, green, blue;
177
178 if (inputR < nativeShadowsR) red = 0;
179 else if (inputR >= nativeHighlightsR) red = maxOutput;
180 else
181 {
182 const T inputFloored = (inputR - nativeShadowsR);
183 red = (inputFloored * k1R) / (inputFloored * k2R - midtonesR);
184 }
185
186 if (inputG < nativeShadowsG) green = 0;
187 else if (inputG >= nativeHighlightsG) green = maxOutput;
188 else
189 {
190 const T inputFloored = (inputG - nativeShadowsG);
191 green = (inputFloored * k1G) / (inputFloored * k2G - midtonesG);
192 }
193
194 if (inputB < nativeShadowsB) blue = 0;
195 else if (inputB >= nativeHighlightsB) blue = maxOutput;
196 else
197 {
198 const T inputFloored = (inputB - nativeShadowsB);
199 blue = (inputFloored * k1B) / (inputFloored * k2B - midtonesB);
200 }
201 scanLine[iout] = qRgb(red, green, blue);
202 }
203 }));
204 }
205 for(QFuture<void> future : futures)
206 future.waitForFinished();
207}
208
209template <typename T>
210void stretchChannels(T *input_buffer, QImage *output_image,
211 const StretchParams &stretch_params,
212 int input_range, int image_height, int image_width, int num_channels, int sampling)
213{
214 if (num_channels == 1)
215 stretchOneChannel(input_buffer, output_image, stretch_params, input_range,
216 image_height, image_width, sampling);
217 else if (num_channels == 3)
218 stretchThreeChannels(input_buffer, output_image, stretch_params, input_range,
219 image_height, image_width, sampling);
220}
221
222// See section 8.5.7 in above link https://pixinsight.com/doc/docs/XISF-1.0-spec/XISF-1.0-spec.html
223template <typename T>
224void computeParamsOneChannel(T const *buffer, StretchParams1Channel *params,
225 int inputRange, int height, int width)
226{
227 // Find the median sample.
228 constexpr int maxSamples = 500000;
229 const int sampleBy = width * height < maxSamples ? 1 : width * height / maxSamples;
230
231 T medianSample = median(buffer, width * height, sampleBy);
232 // Find the Median deviation: 1.4826 * median of abs(sample[i] - median).
233 const int numSamples = width * height / sampleBy;
234 std::vector<T> deviations(numSamples);
235 for (int index = 0, i = 0; i < numSamples; ++i, index += sampleBy)
236 {
237 if (medianSample > buffer[index])
238 deviations[i] = medianSample - buffer[index];
239 else
240 deviations[i] = buffer[index] - medianSample;
241 }
242
243 // Shift everything to 0 -> 1.0.
244 const float medDev = median(deviations);
245 const float normalizedMedian = medianSample / static_cast<float>(inputRange);
246 const float MADN = 1.4826 * medDev / static_cast<float>(inputRange);
247
248 const bool upperHalf = normalizedMedian > 0.5;
249
250 const float shadows = (upperHalf || MADN == 0) ? 0.0 :
251 fmin(1.0, fmax(0.0, (normalizedMedian + -2.8 * MADN)));
252
253 const float highlights = (!upperHalf || MADN == 0) ? 1.0 :
254 fmin(1.0, fmax(0.0, (normalizedMedian - -2.8 * MADN)));
255
256 float X, M;
257 constexpr float B = 0.25;
258 if (!upperHalf)
259 {
260 X = normalizedMedian - shadows;
261 M = B;
262 }
263 else
264 {
265 X = B;
266 M = highlights - normalizedMedian;
267 }
268 float midtones;
269 if (X == 0) midtones = 0.0f;
270 else if (X == M) midtones = 0.5f;
271 else if (X == 1) midtones = 1.0f;
272 else midtones = ((M - 1) * X) / ((2 * M - 1) * X - M);
273
274 // Store the params.
275 params->shadows = shadows;
276 params->highlights = highlights;
277 params->midtones = midtones;
278 params->shadows_expansion = 0.0;
279 params->highlights_expansion = 1.0;
280}
281
282// Need to know the possible range of input values.
283// Using the type of the sample and guessing.
284// Perhaps we should examine the contents for the file
285// (e.g. look at maximum value and extrapolate from that).
286int getRange(int data_type)
287{
288 switch (data_type)
289 {
290 case TBYTE:
291 return 256;
292 case TSHORT:
293 return 64 * 1024;
294 case TUSHORT:
295 return 64 * 1024;
296 case TLONG:
297 return 64 * 1024;
298 case TFLOAT:
299 return 64 * 1024;
300 case TLONGLONG:
301 return 64 * 1024;
302 case TDOUBLE:
303 return 64 * 1024;
304 default:
305 return 64 * 1024;
306 }
307}
308
309} // namespace
310
311Stretch::Stretch(int width, int height, int channels, int data_type)
312{
313 image_width = width;
314 image_height = height;
315 image_channels = channels;
316 dataType = data_type;
317 input_range = getRange(dataType);
318}
319
320void Stretch::run(uint8_t const *input, QImage *outputImage, int sampling)
321{
322 Q_ASSERT(outputImage->width() == (image_width + sampling - 1) / sampling);
323 Q_ASSERT(outputImage->height() == (image_height + sampling - 1) / sampling);
324 recalculateInputRange(input);
325
326 switch (dataType)
327 {
328 case TBYTE:
329 stretchChannels(reinterpret_cast<uint8_t const*>(input), outputImage, params,
330 input_range, image_height, image_width, image_channels, sampling);
331 break;
332 case TSHORT:
333 stretchChannels(reinterpret_cast<short const*>(input), outputImage, params,
334 input_range, image_height, image_width, image_channels, sampling);
335 break;
336 case TUSHORT:
337 stretchChannels(reinterpret_cast<unsigned short const*>(input), outputImage, params,
338 input_range, image_height, image_width, image_channels, sampling);
339 break;
340 case TLONG:
341 stretchChannels(reinterpret_cast<long const*>(input), outputImage, params,
342 input_range, image_height, image_width, image_channels, sampling);
343 break;
344 case TFLOAT:
345 stretchChannels(reinterpret_cast<float const*>(input), outputImage, params,
346 input_range, image_height, image_width, image_channels, sampling);
347 break;
348 case TLONGLONG:
349 stretchChannels(reinterpret_cast<long long const*>(input), outputImage, params,
350 input_range, image_height, image_width, image_channels, sampling);
351 break;
352 case TDOUBLE:
353 stretchChannels(reinterpret_cast<double const*>(input), outputImage, params,
354 input_range, image_height, image_width, image_channels, sampling);
355 break;
356 default:
357 break;
358 }
359}
360
361// The input range for float/double is ambiguous, and we can't tell without the buffer,
362// so we set it to 64K and possibly reduce it when we see the data.
363void Stretch::recalculateInputRange(uint8_t const *input)
364{
365 if (input_range <= 1) return;
366 if (dataType != TFLOAT && dataType != TDOUBLE) return;
367
368 float mx = 0;
369 if (dataType == TFLOAT)
370 mx = sampledMax(reinterpret_cast<float const*>(input), image_height * image_width, 1000);
371 else if (dataType == TDOUBLE)
372 mx = sampledMax(reinterpret_cast<double const*>(input), image_height * image_width, 1000);
373 if (mx <= 1.01f) input_range = 1;
374}
375
376StretchParams Stretch::computeParams(uint8_t const *input)
377{
378 recalculateInputRange(input);
379 StretchParams result;
380 for (int channel = 0; channel < image_channels; ++channel)
381 {
382 int offset = channel * image_width * image_height;
383 StretchParams1Channel *params = channel == 0 ? &result.grey_red :
384 (channel == 1 ? &result.green : &result.blue);
385 switch (dataType)
386 {
387 case TBYTE:
388 {
389 auto buffer = reinterpret_cast<uint8_t const*>(input);
390 computeParamsOneChannel(buffer + offset, params, input_range,
391 image_height, image_width);
392 break;
393 }
394 case TSHORT:
395 {
396 auto buffer = reinterpret_cast<short const*>(input);
397 computeParamsOneChannel(buffer + offset, params, input_range,
398 image_height, image_width);
399 break;
400 }
401 case TUSHORT:
402 {
403 auto buffer = reinterpret_cast<unsigned short const*>(input);
404 computeParamsOneChannel(buffer + offset, params, input_range,
405 image_height, image_width);
406 break;
407 }
408 case TLONG:
409 {
410 auto buffer = reinterpret_cast<long const*>(input);
411 computeParamsOneChannel(buffer + offset, params, input_range,
412 image_height, image_width);
413 break;
414 }
415 case TFLOAT:
416 {
417 auto buffer = reinterpret_cast<float const*>(input);
418 computeParamsOneChannel(buffer + offset, params, input_range,
419 image_height, image_width);
420 break;
421 }
422 case TLONGLONG:
423 {
424 auto buffer = reinterpret_cast<long long const*>(input);
425 computeParamsOneChannel(buffer + offset, params, input_range,
426 image_height, image_width);
427 break;
428 }
429 case TDOUBLE:
430 {
431 auto buffer = reinterpret_cast<double const*>(input);
432 computeParamsOneChannel(buffer + offset, params, input_range,
433 image_height, image_width);
434 break;
435 }
436 default:
437 break;
438 }
439 }
440 return result;
441}
int height() const const
uchar * scanLine(int i)
int width() const const
void append(QList< T > &&value)
QFuture< T > run(Function function,...)
This file is part of the KDE documentation.
Documentation copyright © 1996-2024 The KDE developers.
Generated on Mon Nov 4 2024 16:38:43 by doxygen 1.12.0 written by Dimitri van Heesch, © 1997-2006

KDE's Doxygen guidelines are available online.