Line data Source code
1 : //# Copyright (C) 1997-2010
2 : //# Associated Universities, Inc. Washington DC, USA.
3 : //#
4 : //# This library is free software; you can redistribute it and/or modify it
5 : //# under the terms of the GNU Library General Public License as published by
6 : //# the Free Software Foundation; either version 2 of the License, or (at your
7 : //# option) any later version.
8 : //#
9 : //# This library is distributed in the hope that it will be useful, but WITHOUT
10 : //# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 : //# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
12 : //# License for more details.
13 : //#
14 : //# You should have received a copy of the GNU Library General Public License
15 : //# along with this library; if not, write to the Free Software Foundation,
16 : //# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
17 : //#
18 : //# Correspondence concerning AIPS++ should be addressed as follows:
19 : //# Internet email: aips2-request@nrao.edu.
20 : //# Postal address: AIPS++ Project Office
21 : //# National Radio Astronomy Observatory
22 : //# 520 Edgemont Road
23 : //# Charlottesville, VA 22903-2475 USA
24 : //#
25 : //# $Id: $AspMatrixCleaner.cc
26 :
27 : // Same include list as in MatrixCleaner.cc
28 : #include <casacore/casa/Arrays/Matrix.h>
29 : #include <casacore/casa/Arrays/Cube.h>
30 : #include <casacore/casa/Arrays/ArrayMath.h>
31 : #include <casacore/casa/Arrays/MatrixMath.h>
32 : //#include <casa/Arrays/ArrayIO.h>
33 : #include <casacore/casa/BasicMath/Math.h>
34 : #include <casacore/casa/BasicSL/Complex.h>
35 : #include <casacore/casa/Logging/LogIO.h>
36 : #include <casacore/casa/OS/File.h>
37 : #include <casacore/casa/Containers/Record.h>
38 :
39 : #include <casacore/lattices/LRegions/LCBox.h>
40 : #include <casacore/casa/Arrays/Slicer.h>
41 : #include <casacore/scimath/Mathematics/FFTServer.h>
42 : #include <casacore/casa/OS/HostInfo.h>
43 : #include <casacore/casa/Arrays/ArrayError.h>
44 : #include <casacore/casa/Arrays/ArrayIter.h>
45 : #include <casacore/casa/Arrays/VectorIter.h>
46 :
47 : #include <casacore/casa/Utilities/GenSort.h>
48 : #include <casacore/casa/BasicSL/String.h>
49 : #include <casacore/casa/Utilities/Assert.h>
50 : #include <casacore/casa/Utilities/Fallible.h>
51 :
52 : #include <casacore/casa/BasicSL/Constants.h>
53 :
54 : #include <casacore/casa/Logging/LogSink.h>
55 : #include <casacore/casa/Logging/LogMessage.h>
56 :
57 : #include <synthesis/MeasurementEquations/MatrixCleaner.h>
58 : #include <synthesis/TransformMachines/StokesImageUtil.h>
59 : #include <synthesis/TransformMachines2/Utils.h>
60 : #include <casacore/coordinates/Coordinates/TabularCoordinate.h>
61 :
62 : #ifdef _OPENMP
63 : #include <omp.h>
64 : #endif
65 :
66 : // Additional include files
67 : #include <algorithm>
68 : #include <chrono>
69 :
70 : #include<synthesis/MeasurementEquations/AspMatrixCleaner.h>
71 :
72 : // for alglib
73 : #include <synthesis/MeasurementEquations/objfunc_alglib.h>
74 : using namespace alglib;
75 :
76 : using namespace casacore;
77 : using namespace std::chrono;
78 : namespace casa { //# NAMESPACE CASA - BEGIN
79 0 : AspMatrixCleaner::AspMatrixCleaner():
80 : MatrixCleaner(),
81 : itsInitScaleSizes(0),
82 : //itsAspScaleSizes(0),
83 : //itsAspAmplitude(0),
84 : itsNInitScales(5),
85 : itsPsfWidth(0.0),
86 : itsUseZhang(false),
87 : itsSwitchedToHogbom(false),
88 : itsNumHogbomIter(0),
89 : itsNthHogbom(0),
90 : itsOptimumScale(0),
91 : itsOptimumScaleSize(0.0),
92 : itsPeakResidual(1000.0), // temp. should this be changed to MAX?
93 : itsPrevPeakResidual(0.0),
94 : itsOrigDirty( ),
95 : itsFusedThreshold(0.0),
96 : itsNumNoChange(0),
97 : itsBinSizeForSumFlux(4),
98 0 : itsUserLargestScale(-1.0)
99 : {
100 0 : itsInitScales.resize(0);
101 0 : itsInitScaleXfrs.resize(0);
102 0 : itsDirtyConvInitScales.resize(0);
103 0 : itsInitScaleMasks.resize(0);
104 0 : itsPsfConvInitScales.resize(0);
105 0 : itsNumIterNoGoodAspen.resize(0);
106 : //itsAspCenter.resize(0);
107 0 : itsGoodAspActiveSet.resize(0);
108 0 : itsGoodAspAmplitude.resize(0);
109 0 : itsGoodAspCenter.resize(0);
110 : //itsPrevAspActiveSet.resize(0);
111 : //itsPrevAspAmplitude.resize(0);
112 0 : itsUsedMemoryMB = double(HostInfo::memoryUsed()/2014);
113 0 : itsNormMethod = casa::refim::SynthesisUtils::getenv("ASP_NORM", itsDefaultNorm);
114 0 : }
115 :
116 0 : AspMatrixCleaner::~AspMatrixCleaner()
117 : {
118 0 : destroyAspScales();
119 0 : destroyInitMasks();
120 0 : destroyInitScales();
121 0 : if(!itsMask.null())
122 0 : itsMask=0;
123 0 : }
124 :
125 0 : Bool AspMatrixCleaner::setaspcontrol(const Int niter,
126 : const Float gain,
127 : const Quantity& aThreshold,
128 : const Quantity& fThreshold)
129 : {
130 0 : itsMaxNiter=niter;
131 0 : itsGain=gain;
132 0 : itsThreshold=aThreshold;
133 0 : itsFracThreshold=fThreshold;
134 0 : return true;
135 : }
136 :
137 : // Do the clean as set up
138 0 : Int AspMatrixCleaner::aspclean(Matrix<Float>& model,
139 : Bool /*showProgress*/)
140 : {
141 0 : AlwaysAssert(model.shape()==itsDirty->shape(), AipsError);
142 :
143 0 : LogIO os(LogOrigin("AspMatrixCleaner", "aspclean()", WHERE));
144 0 : os << LogIO::NORMAL1 << "Asp clean algorithm" << LogIO::POST;
145 :
146 :
147 : //Int scale;
148 :
149 0 : AlwaysAssert(itsScalesValid, AipsError);
150 : //no need to use all cores if possible
151 0 : Int nth = itsNscales;
152 : #ifdef _OPENMP
153 :
154 0 : nth = min(nth, omp_get_max_threads());
155 :
156 : #endif
157 :
158 : // Define a subregion for the inner quarter. No longer used
159 : /*IPosition blcDirty(model.shape().nelements(), 0);
160 : IPosition trcDirty(model.shape()-1);
161 :
162 : if(!itsMask.null())
163 : {
164 : os << "Cleaning using given mask" << LogIO::POST;
165 : if (itsMaskThreshold < 0)
166 : {
167 : os << LogIO::NORMAL
168 : << "Mask thresholding is not used, values are interpreted as weights"
169 : <<LogIO::POST;
170 : }
171 : else
172 : {
173 : // a mask that does not allow for clean was sent
174 : if(noClean_p)
175 : return 0;
176 :
177 : os << LogIO::NORMAL
178 : << "Cleaning pixels with mask values above " << itsMaskThreshold
179 : << LogIO::POST;
180 : }
181 :
182 : Int nx=model.shape()(0);
183 : Int ny=model.shape()(1);
184 :
185 : AlwaysAssert(itsMask->shape()(0)==nx, AipsError);
186 : AlwaysAssert(itsMask->shape()(1)==ny, AipsError);
187 : Int xbeg=nx-1;
188 : Int ybeg=ny-1;
189 : Int xend=0;
190 : Int yend=0;
191 : for (Int iy=0;iy<ny;iy++)
192 : {
193 : for (Int ix=0;ix<nx;ix++)
194 : {
195 : if((*itsMask)(ix,iy)>0.000001)
196 : {
197 : xbeg=min(xbeg,ix);
198 : ybeg=min(ybeg,iy);
199 : xend=max(xend,ix);
200 : yend=max(yend,iy);
201 : }
202 : }
203 : }
204 :
205 : if (!itsIgnoreCenterBox) // this is false
206 : {
207 : if((xend - xbeg)>nx/2)
208 : {
209 : xbeg=nx/4-1; //if larger than quarter take inner of mask
210 : os << LogIO::WARN << "Mask span over more than half the x-axis: Considering inner half of the x-axis" << LogIO::POST;
211 : }
212 : if((yend - ybeg)>ny/2)
213 : {
214 : ybeg=ny/4-1;
215 : os << LogIO::WARN << "Mask span over more than half the y-axis: Considering inner half of the y-axis" << LogIO::POST;
216 : }
217 : xend=min(xend,xbeg+nx/2-1);
218 : yend=min(yend,ybeg+ny/2-1);
219 : }
220 :
221 : blcDirty(0)=xbeg;
222 : blcDirty(1)=ybeg;
223 : trcDirty(0)=xend;
224 : trcDirty(1)=yend;
225 : }
226 : else
227 : {
228 : if (itsIgnoreCenterBox) {
229 : os << LogIO::NORMAL << "Cleaning entire image" << LogIO::POST;
230 : os << LogIO::NORMAL1 << "as per MF/WF" << LogIO::POST; // ???
231 : }
232 : else {
233 : os << "Cleaning inner quarter of the image" << LogIO::POST;
234 : for (Int i=0;i<Int(model.shape().nelements());i++)
235 : {
236 : blcDirty(i)=model.shape()(i)/4;
237 : trcDirty(i)=blcDirty(i)+model.shape()(i)/2-1;
238 : if(trcDirty(i)<0)
239 : trcDirty(i)=1;
240 : }
241 : }
242 : }
243 : LCBox centerBox(blcDirty, trcDirty, model.shape());*/
244 :
245 : // Start the iteration
246 0 : Float totalFlux=0.0;
247 0 : Int converged=0;
248 0 : Int stopPointModeCounter = 0;
249 0 : Float tmpMaximumResidual = 0.0;
250 0 : Float minMaximumResidual = 1000.0;
251 0 : Float initRMSResidual = 1000.0;
252 0 : float initModelFlux = 0.0;
253 :
254 0 : os <<LogIO::NORMAL3<< "Starting iteration"<< LogIO::POST;
255 0 : vector<Float> tempScaleSizes;
256 0 : itsIteration = itsStartingIter; // 0
257 :
258 0 : Matrix<Float> itsScale0 = Matrix<Float>(psfShape_p);
259 0 : Matrix<Complex>itsScaleXfr0 = Matrix<Complex> ();
260 :
261 0 : Matrix<Float> itsScale = Matrix<Float>(psfShape_p);
262 0 : Matrix<Complex>itsScaleXfr = Matrix<Complex> ();
263 :
264 : // Define a subregion so that the peak is centered
265 0 : IPosition support(model.shape());
266 0 : support(0) = max(Int(itsInitScaleSizes[itsNInitScales-1] + 0.5), support(0));
267 0 : support(1) = max(Int(itsInitScaleSizes[itsNInitScales-1] + 0.5), support(1));
268 0 : IPosition inc(model.shape().nelements(), 1);
269 :
270 : // get init peakres
271 : // this is important so we have correct peakres for each channel in cube imaging
272 0 : Float maxVal=0;
273 0 : IPosition posmin((*itsDirty).shape().nelements(), 0);
274 0 : Float minVal=0;
275 0 : IPosition posmax((*itsDirty).shape().nelements(), 0);
276 0 : minMaxMasked(minVal, maxVal, posmin, posmax, (*itsDirty), itsInitScaleMasks[0]);
277 0 : itsPeakResidual = (fabs(maxVal) > fabs(minVal)) ? fabs(maxVal) : fabs(minVal);
278 :
279 0 : vector<Float> vecItsStrengthOptimum;
280 0 : vector<Float> vecItsOptimumScaleSize;
281 0 : vecItsStrengthOptimum.clear();
282 0 : vecItsOptimumScaleSize.clear();
283 :
284 : // calculate rms residual
285 0 : float rms = 0.0;
286 0 : int num = int(model.shape()(0) * model.shape()(1));
287 0 : for (int j = 0; j < model.shape()(1); ++j)
288 : {
289 0 : for (int i = 0; i < model.shape()(0); ++i)
290 : {
291 0 : rms += pow((*itsDirty)(i, j), 2);
292 : }
293 : }
294 0 : rms = rms / num;
295 0 : initRMSResidual = rms;
296 : //os << LogIO::NORMAL3 << "initial rms residual " << initRMSResidual << LogIO::POST;
297 :
298 0 : initModelFlux = sum(model);
299 : //os << LogIO::NORMAL3 << "initial model flux " << initModelFlux << LogIO::POST;
300 :
301 0 : for (Int ii = itsStartingIter; ii < itsMaxNiter; ii++)
302 : {
303 : //cout << "cur iter " << itsIteration << " max iter is "<< itsMaxNiter << endl;
304 0 : itsIteration++;
305 :
306 : // calculate rms residual
307 0 : rms = 0.0;
308 0 : for (int j = 0; j < model.shape()(1); ++j)
309 : {
310 0 : for (int i = 0; i < model.shape()(0); ++i)
311 : {
312 0 : rms += pow((*itsDirty)(i, j), 2);
313 : }
314 : }
315 0 : rms = rms / num;
316 :
317 : // make single optimized scale image
318 0 : os << LogIO::NORMAL3 << "Making optimized scale " << itsOptimumScaleSize << " at location " << itsPositionOptimum << LogIO::POST;
319 :
320 0 : if (itsSwitchedToHogbom)
321 : {
322 0 : makeScaleImage(itsScale0, 0.0, itsStrengthOptimum, itsPositionOptimum);
323 0 : fft.fft0(itsScaleXfr0, itsScale0);
324 0 : itsScale = 0.0;
325 0 : itsScale = itsScale0;
326 0 : itsScaleXfr.resize();
327 0 : itsScaleXfr = itsScaleXfr0;
328 0 : vecItsStrengthOptimum.push_back(itsStrengthOptimum);
329 0 : vecItsOptimumScaleSize.push_back(0);
330 : }
331 : else
332 : {
333 0 : makeScaleImage(itsScale, itsOptimumScaleSize, itsStrengthOptimum, itsPositionOptimum);
334 0 : itsScaleXfr.resize();
335 0 : fft.fft0(itsScaleXfr, itsScale);
336 0 : vecItsStrengthOptimum.push_back(itsStrengthOptimum);
337 0 : vecItsOptimumScaleSize.push_back(itsOptimumScaleSize);
338 : }
339 :
340 : // trigger hogbom when
341 : // (1) itsStrengthOptimum is small enough & peakres rarely changes or itsPeakResidual is small enough
342 : // (2) peakres rarely changes
343 0 : if (itsNormMethod == 1) // only Norm Method 1 needs hogbom for speedup
344 : {
345 : //if (!itsSwitchedToHogbom && abs(itsStrengthOptimum) < 0.001) // M31 value - new Asp + gaussian
346 : //if (!itsSwitchedToHogbom && abs(itsStrengthOptimum) < 2.8) // M31 value-new asp: 1k->5k
347 : //if (!itsSwitchedToHogbom && abs(itsPeakResidual) < 8e-5 && abs(itsStrengthOptimum) < 1e-4) // G55
348 : //if (!itsSwitchedToHogbom && abs(itsPeakResidual) < 7e-3 && abs(itsStrengthOptimum) < 1e-7) // Points
349 : //if (!itsSwitchedToHogbom && abs(itsPeakResidual) < 0.138) //GSL M100 channel 22
350 : //if (!itsSwitchedToHogbom && abs(itsPeakResidual) < 0.15) // GSL M100 channel 22 & 23 & others
351 : //if (!itsSwitchedToHogbom && (abs(itsPeakResidual) < 2.5 || abs(itsStrengthOptimum) < 1e-3)) // GSL, CygA
352 :
353 : /*if(!itsSwitchedToHogbom && (abs(itsPeakResidual) < itsFusedThreshold
354 : || abs(itsStrengthOptimum) < (5e-4 * itsFusedThreshold)))*/ // GSL, CygA.
355 0 : if(!itsSwitchedToHogbom && (abs(itsPeakResidual) < itsFusedThreshold
356 0 : || ((abs(itsStrengthOptimum) < (5e-4 * itsFusedThreshold)) && (itsNumNoChange >= 2))))
357 : // 5e-4 is a experimental number here assuming under that threshold itsStrengthOptimum is too small to take affect.
358 : {
359 0 : os << LogIO::NORMAL3 << "Switch to hogbom b/c peak residual or optimum strength is small enough: " << itsFusedThreshold << LogIO::POST;
360 :
361 0 : bool runlong = false;
362 :
363 : //option 1: use rms residual to detect convergence
364 0 : if (initRMSResidual > rms && initRMSResidual/rms < 1.5)
365 : {
366 0 : runlong = true;
367 0 : os << LogIO::NORMAL3 << "Run hogbom for longer iterations b/c it's approaching convergence. initial rms " << initRMSResidual << " rms " << rms << LogIO::POST;
368 : }
369 : //option 2: use model flux to detect convergence
370 : /*float modelFlux = 0.0;
371 : modelFlux = sum(model);
372 : if (initModelFlux != 0 && (abs(initModelFlux - modelFlux)/initModelFlux < 0.01))
373 : {
374 : runlong = true;
375 : os << LogIO::NORMAL3 << "Run hogbom for longer iterations b/c it's approaching convergence. init model flux " << initModelFlux << " model flux " << modelFlux << LogIO::POST;
376 : }*/
377 :
378 0 : switchedToHogbom(runlong);
379 :
380 0 : if (itsNumNoChange >= 2)
381 0 : itsNumNoChange = 0;
382 : }
383 0 : if (!itsSwitchedToHogbom && itsNumNoChange >= 2)
384 : {
385 0 : os << LogIO::NORMAL3 << "Switched to hogbom at iteration "<< ii << " b/c peakres rarely changes" << LogIO::POST;
386 0 : itsNumNoChange = 0;
387 :
388 0 : os << LogIO::NORMAL3 << "total flux " << totalFlux << " model flux " << sum(model) << LogIO::POST;
389 0 : bool runlong = false;
390 :
391 : //option 1: use rms residual to detect convergence
392 0 : if (initRMSResidual > rms && initRMSResidual/rms < 1.5)
393 : {
394 0 : runlong = true;
395 0 : os << LogIO::NORMAL3 << "Run hogbom for longer iterations b/c it's approaching convergence. initial rms " << initRMSResidual << " rms " << rms << LogIO::POST;
396 : }
397 : //option 2: use model flux to detect convergence
398 : /*float modelFlux = 0.0;
399 : modelFlux = sum(model);
400 : if (initModelFlux != 0 && (abs(initModelFlux - modelFlux)/initModelFlux < 0.01))
401 : {
402 : runlong = true;
403 : os << LogIO::NORMAL3 << "Run hogbom for longer iterations b/c it's approaching convergence. init model flux " << initModelFlux << " model flux " << modelFlux << LogIO::POST;
404 : }*/
405 :
406 0 : switchedToHogbom(runlong);
407 : }
408 : }
409 :
410 0 : if (!itsSwitchedToHogbom)
411 : {
412 0 : if (itsNumIterNoGoodAspen.size() >= 10)
413 0 : itsNumIterNoGoodAspen.pop_front(); // only track the past 10 iters
414 0 : if (itsOptimumScaleSize == 0)
415 0 : itsNumIterNoGoodAspen.push_back(1); // Zhang 2018 fused-Asp approach
416 : else
417 0 : itsNumIterNoGoodAspen.push_back(0);
418 : }
419 :
420 : // Now add to the total flux
421 0 : totalFlux += (itsStrengthOptimum*itsGain);
422 0 : itsTotalFlux = totalFlux;
423 :
424 0 : if(ii == itsStartingIter)
425 : {
426 0 : itsMaximumResidual = abs(itsPeakResidual);
427 0 : tmpMaximumResidual = itsMaximumResidual;
428 0 : os <<LogIO::NORMAL3 << "Initial maximum residual is " << itsMaximumResidual;
429 0 : if( !itsMask.null() )
430 0 : os <<LogIO::NORMAL3 << " within the mask ";
431 :
432 0 : os <<LogIO::NORMAL3 << LogIO::POST;
433 : }
434 :
435 : //save the min peak residual
436 0 : if (abs(minMaximumResidual) > abs(itsPeakResidual))
437 0 : minMaximumResidual = abs(itsPeakResidual);
438 :
439 : // Various ways of stopping:
440 : // 0. stop if below cycle threshold.- same as MS-Clean
441 0 : if (abs(itsPeakResidual) < threshold())
442 : {
443 0 : os << "Reached stopping threshold " << threshold() << " at iteration "<<
444 0 : ii << LogIO::POST;
445 0 : os << "peakres is " << abs(itsPeakResidual) << LogIO::POST;
446 0 : converged = 1;
447 0 : itsSwitchedToHogbom = false;
448 0 : os << LogIO::NORMAL3 << "final rms residual " << rms << ", model flux " << sum(model) << LogIO::POST;
449 :
450 0 : break;
451 : }
452 : // 1. stop if below threshold. 1e-6 is an experimental number
453 0 : if (abs(itsStrengthOptimum) < (1e-6 * itsFusedThreshold))
454 : {
455 : //cout << "Reached stopping threshold " << 1e-6 * itsFusedThreshold << " at iteration "<< ii << endl;
456 0 : os << LogIO::NORMAL3 << "Reached stopping threshold " << 1e-6 * itsFusedThreshold << " at iteration "<<
457 0 : ii << LogIO::POST;
458 0 : os <<LogIO::NORMAL3 << "Optimum flux is " << abs(itsStrengthOptimum) << LogIO::POST;
459 0 : converged = 1;
460 0 : itsSwitchedToHogbom = false;
461 0 : os << LogIO::NORMAL3 << "final rms residual " << rms << ", modelflux " << sum(model) << LogIO::POST;
462 :
463 0 : break;
464 : }
465 : // 2. negatives on largest scale?
466 0 : if ((itsNscales > 1) && itsStopAtLargeScaleNegative &&
467 0 : itsOptimumScale == (itsNInitScales - 1) &&
468 0 : itsStrengthOptimum < 0.0)
469 :
470 : {
471 0 : os <<LogIO::NORMAL3 << "Reached negative on largest scale" << LogIO::POST;
472 0 : converged = -2;
473 0 : break;
474 : }
475 : // 3. stop point mode at work
476 0 : if (itsStopPointMode > 0)
477 : {
478 0 : if (itsOptimumScale == 0)
479 0 : stopPointModeCounter++;
480 : else
481 0 : stopPointModeCounter = 0;
482 :
483 0 : if (stopPointModeCounter >= itsStopPointMode)
484 : {
485 : os <<LogIO::NORMAL3 << "Cleaned " << stopPointModeCounter <<
486 : " consecutive components from the smallest scale, stopping prematurely"
487 0 : << LogIO::POST;
488 0 : itsDidStopPointMode = true;
489 0 : converged = -1;
490 0 : break;
491 : }
492 : }
493 : //4. Diverging large scale
494 : //If actual value is 50% above the maximum residual. ..good chance it will not recover at this stage
495 : /*if(((abs(itsStrengthOptimum)-abs(tmpMaximumResidual)) > (abs(tmpMaximumResidual)/2.0))
496 : && !(itsStopAtLargeScaleNegative))
497 : {
498 : cout << "Diverging due to large scale?" << endl;
499 : os <<LogIO::NORMAL3 << "Diverging due to large scale?" << LogIO::POST;
500 : os <<LogIO::NORMAL3 << "itsStrengthOptimum " << itsStrengthOptimum << " tmp " << tmpMaximumResidual << LogIO::POST;
501 : //clean is diverging most probably due to the large scale
502 : converged=-2;
503 : break;
504 : }*/
505 : //5. Diverging for some other reason; may just need another CS-style reconciling
506 0 : if((abs(itsStrengthOptimum)-abs(tmpMaximumResidual)) > (abs(tmpMaximumResidual)/2.0) ||
507 0 : (abs(itsPeakResidual)-abs(tmpMaximumResidual)) > (abs(tmpMaximumResidual)/2.0) ||
508 0 : (abs(itsPeakResidual)-abs(minMaximumResidual)) > (abs(minMaximumResidual)/2.0))
509 : {
510 0 : os << LogIO::NORMAL3 << "Diverging due to unknown reason" << LogIO::POST;
511 0 : os << LogIO::NORMAL3 << "tmpMaximumResidual " << abs(tmpMaximumResidual) << " itsStrengthOptimum " << abs(itsStrengthOptimum) << " itsPeakResidual " << abs(itsPeakResidual) << LogIO::POST;
512 0 : os << LogIO::NORMAL3 << "minMaximumResidual " << abs(minMaximumResidual) << LogIO::POST;
513 :
514 0 : converged=-3;
515 0 : itsSwitchedToHogbom = false;
516 0 : os << LogIO::NORMAL3 << "final rms residual " << rms << ", modelflux " << sum(model) << LogIO::POST;
517 :
518 0 : break;
519 : }
520 :
521 0 : if (itsIteration == itsStartingIter + 1)
522 0 : os <<LogIO::NORMAL3 << "iteration MaximumResidual CleanedFlux" << LogIO::POST;
523 0 : if ((itsIteration % (itsMaxNiter/10 > 0 ? itsMaxNiter/10 : 1)) == 0)
524 : {
525 : //Good place to re-up the fiducial maximum residual
526 : //tmpMaximumResidual=abs(itsStrengthOptimum);
527 0 : os <<LogIO::NORMAL3 << itsIteration <<" "<< abs(itsPeakResidual) <<" "
528 0 : << totalFlux <<LogIO::POST;
529 : }
530 :
531 :
532 0 : IPosition blc(itsPositionOptimum - support/2);
533 0 : IPosition trc(itsPositionOptimum + support/2 - 1);
534 : // try 2.5 sigma
535 : /*Int sigma5 = (Int)(5 * itsOptimumScaleSize / 2);
536 : IPosition blc(itsPositionOptimum - sigma5);
537 : IPosition trc(itsPositionOptimum + sigma5 -1);*/
538 :
539 0 : LCBox::verify(blc, trc, inc, model.shape());
540 0 : IPosition blcPsf(blc);
541 0 : IPosition trcPsf(trc);
542 : //blcDirty = blc; // update blcDirty/trcDirty is bad for Asp
543 : //trcDirty = trc;
544 :
545 : // Update the model image
546 0 : Matrix<Float> modelSub = model(blc, trc);
547 : Float scaleFactor;
548 0 : scaleFactor = itsGain * itsStrengthOptimum;
549 0 : Matrix<Float> scaleSub = (itsScale)(blcPsf,trcPsf);
550 0 : modelSub += scaleFactor * scaleSub;
551 :
552 : // Now update the residual image
553 : // PSF * model
554 0 : Matrix<Complex> cWork;
555 0 : cWork = ((*itsXfr)*(itsScaleXfr)); //Asp's
556 0 : Matrix<Float> itsPsfConvScale = Matrix<Float>(psfShape_p);
557 0 : fft.fft0(itsPsfConvScale, cWork, false);
558 0 : fft.flip(itsPsfConvScale, false, false); //need this if conv with 1 scale; don't need this if conv with 2 scales
559 0 : Matrix<Float> psfSub = (itsPsfConvScale)(blcPsf, trcPsf);
560 0 : Matrix<Float> dirtySub=(*itsDirty)(blc,trc);
561 :
562 : /* debug info
563 : float maxvalue;
564 : IPosition peakpos;
565 : findMaxAbs(psfSub, maxvalue, peakpos);
566 : cout << "psfSub pos " << peakpos << " maxval " << psfSub(peakpos) << endl;
567 : cout << "dirtySub pos " << peakpos << " val " << dirtySub(peakpos) << endl;
568 : findMaxAbs(itsPsfConvScale, maxvalue, peakpos);
569 : cout << "itsPsfConvScale pos " << peakpos << " maxval " << itsPsfConvScale(peakpos) << endl;
570 : cout << "itsDirty pos " << peakpos << " val " << (*itsDirty)(peakpos) << endl;
571 : findMaxAbs(dirtySub, maxvalue, peakpos);
572 : cout << "dirtySub pos " << peakpos << " maxval " << dirtySub(peakpos) << endl;
573 : findMaxAbs((*itsDirty), maxvalue, peakpos);
574 : cout << "itsDirty pos " << peakpos << " maxval " << (*itsDirty)(peakpos) << endl;
575 : cout << "itsPositionOptimum " << itsPositionOptimum << endl;
576 : cout << " maxPsfSub " << max(fabs(psfSub)) << " maxPsfConvScale " << max(fabs(itsPsfConvScale)) << " itsGain " << itsGain << endl;*/
577 0 : os <<LogIO::NORMAL3 << "itsStrengthOptimum " << itsStrengthOptimum << LogIO::POST;
578 :
579 : // subtract the peak that we found from the dirty image
580 0 : dirtySub -= scaleFactor * psfSub;
581 :
582 : // further update the model and residual with the remaining aspen of the active-set
583 : // This is no longer needed since we found out using the last Aspen to update model/residual is good enough
584 : /*if (itsOptimumScaleSize != 0)
585 : {
586 : bool aspenUnchanged = true;
587 : if ((Int)itsGoodAspActiveSet.size() <= 1)
588 : aspenUnchanged = false;
589 :
590 : for (scale = 0; scale < (Int)itsGoodAspActiveSet.size() - 1; ++scale)
591 : // -1 because we counted the latest aspen in the previous step already
592 : {
593 : if (itsPrevAspActiveSet[scale] == itsGoodAspActiveSet[scale])
594 : continue;
595 :
596 : cout << "I have active-set to adjust" << endl;
597 : aspenUnchanged = false;
598 : // "center" is unchanged for aspen
599 : IPosition blc1(itsGoodAspCenter[scale] - support/2);
600 : IPosition trc1(itsGoodAspCenter[scale] + support/2 - 1);
601 : LCBox::verify(blc1, trc1, inc, model.shape());
602 :
603 : IPosition blcPsf1(blc1);
604 : IPosition trcPsf1(trc1);
605 :
606 : Matrix<Float> modelSub1 = model(blc1, trc1);
607 : Matrix<Float> dirtySub1 = (*itsDirty)(blc1,trc1);
608 :
609 : // First remove the previous values of aspen in the active-set
610 : cout << "aspclean: restore with previous scale " << itsPrevAspActiveSet[scale];
611 : cout << " amp " << itsPrevAspAmplitude[scale] << endl;
612 :
613 : makeScaleImage(itsScale, itsPrevAspActiveSet[scale], itsPrevAspAmplitude[scale], itsGoodAspCenter[scale]);
614 : itsScaleXfr.resize();
615 : fft.fft0(itsScaleXfr, itsScale);
616 : Matrix<Float> scaleSubPrev = (itsScale)(blcPsf1,trcPsf1);
617 : const float scaleFactorPrev = itsGain * itsPrevAspAmplitude[scale];
618 : // restore the model image...
619 : modelSub1 -= scaleFactorPrev * scaleSubPrev;
620 : // restore the residual image
621 : Matrix<Complex> cWorkPrev;
622 : cWorkPrev = ((*itsXfr)*(itsScaleXfr));
623 : Matrix<Float> itsPsfConvScalePrev = Matrix<Float>(psfShape_p);
624 : fft.fft0(itsPsfConvScalePrev, cWorkPrev, false);
625 : fft.flip(itsPsfConvScalePrev, false, false); //need this if conv with 1 scale; don't need this if conv with 2 scales
626 : Matrix<Float> psfSubPrev = (itsPsfConvScalePrev)(blcPsf1, trcPsf1);
627 : dirtySub1 += scaleFactorPrev * psfSubPrev;
628 :
629 : // Then update with the new values of aspen in the active-set
630 : cout << "aspclean: update with new scale " << itsGoodAspActiveSet[scale];
631 : cout << " amp " << itsGoodAspAmplitude[scale] << endl;
632 : makeScaleImage(itsScale, itsGoodAspActiveSet[scale], itsGoodAspAmplitude[scale], itsGoodAspCenter[scale]);
633 : itsScaleXfr.resize();
634 : fft.fft0(itsScaleXfr, itsScale);
635 : Matrix<Float> scaleSubNew = (itsScale)(blcPsf1,trcPsf1);
636 : const float scaleFactorNew = itsGain * itsGoodAspAmplitude[scale];
637 :
638 : // Now do the addition of the active-set scales to the model image...
639 : modelSub1 += scaleFactorNew * scaleSubNew;
640 : // Now subtract the active-set scales from the residual image
641 : Matrix<Complex> cWorkNew;
642 : cWorkNew = ((*itsXfr)*(itsScaleXfr));
643 : Matrix<Float> itsPsfConvScaleNew = Matrix<Float>(psfShape_p);
644 : fft.fft0(itsPsfConvScaleNew, cWorkNew, false);
645 : fft.flip(itsPsfConvScaleNew, false, false); //need this if conv with 1 scale; don't need this if conv with 2 scales
646 : Matrix<Float> psfSubNew = (itsPsfConvScaleNew)(blcPsf1, trcPsf1);
647 : dirtySub1 -= scaleFactorNew * psfSubNew;
648 : } //update updating model/residual from aspen in active-set
649 :
650 : // switch to hogbom if aspen is unchaned?
651 : / *if (!itsSwitchedToHogbom && aspenUnchanged)
652 : {
653 : cout << "Switched to hogbom b/c aspen is unchanged" << endl;
654 : switchedToHogbom();
655 : }* /
656 : }*/
657 :
658 : // update peakres
659 0 : itsPrevPeakResidual = itsPeakResidual;
660 0 : maxVal=0;
661 0 : posmin = 0;
662 0 : minVal=0;
663 0 : posmax = 0;
664 0 : minMaxMasked(minVal, maxVal, posmin, posmax, (*itsDirty), itsInitScaleMasks[0]);
665 0 : itsPeakResidual = (fabs(maxVal) > fabs(minVal)) ? fabs(maxVal) : fabs(minVal);
666 0 : os <<LogIO::NORMAL3 << "current peakres " << itsPeakResidual << LogIO::POST;
667 :
668 0 : if (!itsSwitchedToHogbom &&
669 0 : (fabs(itsPeakResidual - itsPrevPeakResidual) < 1e-4)) //peakres rarely changes
670 0 : itsNumNoChange += 1;
671 : //cout << "after: itsDirty optPos " << (*itsDirty)(itsPositionOptimum) << endl;
672 :
673 : // If we switch to hogbom (i.e. only have 0 scale size),
674 : // we still need to do the following Aspen update to get the new optimumStrength
675 0 : if (itsSwitchedToHogbom)
676 : {
677 0 : if (itsNumHogbomIter == 0)
678 : {
679 0 : itsSwitchedToHogbom = false;
680 :
681 0 : os << LogIO::NORMAL3 << "switched back to Asp." << LogIO::POST;
682 :
683 : //option 1: use rms residual to detect convergence
684 0 : if (!(initRMSResidual > rms && initRMSResidual/rms < 1.5))
685 : {
686 0 : os << "Reached convergence at iteration "<< ii << " b/c hogbom finished" << LogIO::POST;
687 0 : converged = 1;
688 0 : os << LogIO::NORMAL3 << "initial rms " << initRMSResidual << " final rms residual " << rms << LogIO::POST;
689 :
690 0 : break;
691 : }
692 : //option 2: use model flux to detect convergence
693 : /*float modelFlux = 0.0;
694 : modelFlux = sum(model);
695 : if (!(initModelFlux != 0 && (abs(initModelFlux - modelFlux)/initModelFlux < 0.01)))
696 : {
697 : os << "Reached convergence at iteration "<< ii << " b/c hogbom finished" << LogIO::POST;
698 : converged = 1;
699 : os << LogIO::NORMAL3 << "initial model flux " << initModelFlux << " final model flux " << modelFlux << LogIO::POST;
700 :
701 : break;
702 : }*/
703 : }
704 : else
705 0 : itsNumHogbomIter -= 1;
706 : }
707 :
708 0 : tempScaleSizes.clear();
709 0 : tempScaleSizes = getActiveSetAspen();
710 0 : tempScaleSizes.push_back(0.0); // put 0 scale
711 0 : defineAspScales(tempScaleSizes);
712 : }
713 : // End of iteration
714 :
715 0 : vector<Float> sumFluxByBins(itsBinSizeForSumFlux,0.0);
716 0 : vector<Float> rangeFluxByBins(itsBinSizeForSumFlux+1,0.0);
717 :
718 0 : getFluxByBins(vecItsOptimumScaleSize,vecItsStrengthOptimum,itsBinSizeForSumFlux,sumFluxByBins,rangeFluxByBins);
719 :
720 :
721 :
722 0 : os << " The number of bins for collecting the sum of Flux: " << itsBinSizeForSumFlux << endl;
723 :
724 0 : for (Int ii = 0; ii < itsBinSizeForSumFlux ; ii++)
725 : {
726 0 : os << " Bin " << ii << "(" << rangeFluxByBins[ii] * itsGain << " , " << rangeFluxByBins[ii+1] * itsGain << "). Sum of Flux : " << sumFluxByBins[ii] * itsGain << LogIO :: POST;
727 : }
728 :
729 : // memory used
730 : //itsUsedMemoryMB = double(HostInfo::memoryUsed()/1024);
731 : //cout << "Memory allocated in aspclean " << itsUsedMemoryMB << " MB." << endl;
732 :
733 0 : if(!converged)
734 0 : os << "Failed to reach stopping threshold" << LogIO::POST;
735 :
736 0 : return converged;
737 : }
738 :
739 :
740 0 : Bool AspMatrixCleaner::destroyAspScales()
741 : {
742 0 : destroyInitScales();
743 0 : destroyScales();
744 :
745 0 : for(uInt scale=0; scale < itsDirtyConvInitScales.nelements(); scale++)
746 0 : itsDirtyConvInitScales[scale].resize();
747 :
748 0 : itsDirtyConvInitScales.resize(0, true);
749 :
750 0 : return true;
751 : }
752 :
753 0 : Bool AspMatrixCleaner::destroyInitScales()
754 : {
755 0 : for(uInt scale=0; scale < itsInitScales.nelements(); scale++)
756 0 : itsInitScales[scale].resize();
757 0 : for(uInt scale=0; scale < itsInitScaleXfrs.nelements(); scale++)
758 0 : itsInitScaleXfrs[scale].resize();
759 0 : for(uInt scale=0; scale < itsPsfConvInitScales.nelements(); scale++)
760 0 : itsPsfConvInitScales[scale].resize();
761 :
762 0 : itsInitScales.resize(0, true);
763 0 : itsInitScaleXfrs.resize(0, true);
764 0 : itsPsfConvInitScales.resize(0, true);
765 :
766 0 : return true;
767 : }
768 :
769 0 : Bool AspMatrixCleaner::destroyInitMasks()
770 : {
771 0 : for(uInt scale=0; scale<itsInitScaleMasks.nelements();scale++)
772 0 : itsInitScaleMasks[scale].resize();
773 :
774 0 : itsInitScaleMasks.resize(0);
775 :
776 0 : return true;
777 : }
778 :
779 :
780 0 : float AspMatrixCleaner::getPsfGaussianWidth(ImageInterface<Float>& psf)
781 : {
782 0 : LogIO os( LogOrigin("AspMatrixCleaner","getPsfGaussianWidth",WHERE) );
783 :
784 0 : GaussianBeam beam;
785 : try
786 : {
787 0 : StokesImageUtil::FitGaussianPSF(psf, beam);
788 : }
789 0 : catch(AipsError &x)
790 : {
791 0 : os << "Error in fitting a Gaussian to the PSF : " << x.getMesg() << LogIO::POST;
792 0 : throw( AipsError("Error in fitting a Gaussian to the PSF" + x.getMesg()) );
793 : }
794 :
795 0 : CoordinateSystem cs = psf.coordinates();
796 0 : String dirunit = cs.worldAxisUnits()(0);
797 0 : Vector<String> unitas = cs.worldAxisUnits();
798 0 : unitas(0) = "arcsec";
799 0 : unitas(1) = "arcsec";
800 0 : cs.setWorldAxisUnits(unitas);
801 :
802 0 : os << "major width " << beam.getMajor("arcsec") << " in " << cs.worldAxisUnits()(0) << LogIO::POST;
803 0 : os << "minor width " << beam.getMinor("arcsec") << LogIO::POST;
804 0 : os << " pixel sizes are " << abs(cs.increment()(0)) << " and ";
805 0 : os << abs(cs.increment()(1)) << LogIO::POST;
806 0 : const auto xpixels = beam.getMajor("arcsec") / abs(cs.increment()(0));
807 0 : const auto ypixels = beam.getMinor("arcsec") / abs(cs.increment()(1));
808 0 : os << "xpixels " << xpixels << " ypixels " << ypixels << LogIO::POST;
809 :
810 0 : itsPsfWidth = float(ceil((xpixels + ypixels)/2));
811 0 : os << "PSF width: " << itsPsfWidth << " pixels." << LogIO::POST;
812 :
813 0 : return itsPsfWidth;
814 : }
815 :
816 : // Make a single initial scale size image by Gaussian
817 0 : void AspMatrixCleaner::makeInitScaleImage(Matrix<Float>& iscale, const Float& scaleSize)
818 : {
819 0 : LogIO os( LogOrigin("AspMatrixCleaner","makeInitScaleImage",WHERE) );
820 :
821 0 : const Int nx = iscale.shape()(0);
822 0 : const Int ny = iscale.shape()(1);
823 0 : iscale = 0.0;
824 :
825 0 : const Double refi = nx/2;
826 0 : const Double refj = ny/2;
827 :
828 0 : if(scaleSize == 0.0)
829 0 : iscale(Int(refi), Int(refj)) = 1.0;
830 : else
831 : {
832 0 : AlwaysAssert(scaleSize>0.0, AipsError);
833 :
834 : /*const Int mini = max(0, (Int)(refi - scaleSize));
835 : const Int maxi = min(nx-1, (Int)(refi + scaleSize));
836 : const Int minj = max(0, (Int)(refj - scaleSize));
837 : const Int maxj = min(ny-1, (Int)(refj + scaleSize));*/
838 0 : os << "Initial scale size " << scaleSize << " pixels." << LogIO::POST;
839 :
840 : //Gaussian2D<Float> gbeam(1.0/(sqrt(2*M_PI)*scaleSize), 0, 0, scaleSize, 1, 0);
841 :
842 : // has to make the whole scale image
843 0 : for (int j = 0; j < ny; j++)
844 : {
845 0 : for (int i = 0; i < nx; i++)
846 : {
847 : //const int px = i - refi;
848 : //const int py = j - refj;
849 : //iscale(i,j) = gbeam(px, py); // gbeam with the above def is equivalent to the following
850 0 : iscale(i,j) = (1.0/(sqrt(2*M_PI)*scaleSize))*exp(-(pow(i-refi,2) + pow(j-refj,2))*0.5/pow(scaleSize,2)); //this is for 1D, but represents Sanjay's and gives good init scale
851 : //iscale(i,j) = (1.0/(2*M_PI*pow(scaleSize,2)))*exp(-(pow(i-refi,2) + pow(j-refj,2))*0.5/pow(scaleSize,2)); // this is for 2D, gives unit area but bad init scale (always picks 0)
852 : }
853 : }
854 :
855 : }
856 0 : }
857 :
858 : // Make a single scale size image by Gaussian
859 0 : void AspMatrixCleaner::makeScaleImage(Matrix<Float>& iscale, const Float& scaleSize, const Float& amp, const IPosition& center)
860 : {
861 :
862 0 : const Int nx = iscale.shape()(0);
863 0 : const Int ny = iscale.shape()(1);
864 0 : iscale = 0.0;
865 :
866 0 : if(scaleSize == 0.0)
867 0 : iscale(Int(center[0]), Int(center[1])) = 1.0;
868 : else
869 : {
870 0 : AlwaysAssert(scaleSize>0.0, AipsError);
871 :
872 : /* const Double refi = nx/2;
873 : const Double refj = ny/2;
874 : const Int mini = max(0, (Int)(refi - scaleSize));
875 : const Int maxi = min(nx-1, (Int)(refi + scaleSize));
876 : const Int minj = max(0, (Int)(refj - scaleSize));
877 : const Int maxj = min(ny-1, (Int)(refj + scaleSize));*/
878 : //cout << "makeScaleImage: scalesize " << scaleSize << " center " << center << " amp " << amp << endl;
879 :
880 : ////Gaussian2D<Float> gbeam(1.0 / (sqrt(2*M_PI)*scaleSize), center[0], center[1], scaleSize, 1, 0);
881 :
882 : // all of the following was trying to replicate Sanjay's code but doesn't work
883 : //const Float d = sqrt(pow(1.0/itsPsfWidth, 2) + pow(1.0/scaleSize, 2));
884 : //Gaussian2D<Float> gbeam(amp / (sqrt(2*M_PI)*scaleSize), center[0], center[1], scaleSize, 1, 0);
885 : //Gaussian2D<Float> gbeam(amp * sqrt(2*M_PI)/d, center[0], center[1], scaleSize * d, 1, 0);
886 : //Gaussian2D<Float> gbeam(amp / (2*M_PI), center[0], center[1], scaleSize, 1, 0);
887 : //Gaussian2D<Float> gbeam(amp / pow(2,scaleSize-1), center[0], center[1], itsInitScaleSizes[scaleSize], 1, 0);
888 :
889 0 : for (int j = 0; j < ny; j++)
890 : {
891 0 : for (int i = 0; i < nx; i++)
892 : {
893 : //const int px = i;
894 : //const int py = j;
895 : // iscale(i,j) = gbeam(px, py); // this is equivalent to the following with the above gbeam definition
896 : // This is for 1D, but represents Sanjay's and gives good init scale
897 : // Note that "amp" is not used in the expression
898 0 : iscale(i,j) = (1.0/(sqrt(2*M_PI)*scaleSize))*exp(-(pow(i-center[0],2) + pow(j-center[1],2))*0.5/pow(scaleSize,2));
899 :
900 : // This is for 2D, gives unit area but bad init scale (always picks 0)
901 : // iscale(i,j) = (1.0/(2*M_PI*pow(scaleSize,2)))*exp(-(pow(i-center[0],2) + pow(j-center[1],2))*0.5/pow(scaleSize,2));
902 : }
903 : }
904 :
905 : }
906 0 : }
907 :
908 :
909 0 : void AspMatrixCleaner::getLargestScaleSize(ImageInterface<Float>& psf)
910 : {
911 0 : LogIO os( LogOrigin("AspMatrixCleaner","getLargestScaleSize",WHERE) );
912 :
913 : //cout << "user's largest scale " << itsUserLargestScale << endl;
914 :
915 0 : itsLargestInitScale = 5.0 * itsPsfWidth; // default in pixels
916 0 : const Int nx = psfShape_p(0);
917 0 : const Int ny = psfShape_p(1);
918 :
919 0 : CoordinateSystem cs = psf.coordinates();
920 0 : String dirunit = cs.worldAxisUnits()(0);
921 0 : Vector<String> unitas = cs.worldAxisUnits();
922 0 : unitas(0) = "arcsec";
923 0 : unitas(1) = "arcsec";
924 0 : cs.setWorldAxisUnits(unitas);
925 :
926 : //cout << "ncoord " << cs.nCoordinates() << endl;
927 : //cout << "coord type " << cs.type(0) << endl;
928 :
929 :
930 0 : const float baseline = 100.0; // default shortest baseline = 100m (for both ALMA and VLA)
931 :
932 0 : for (uInt i = 0; i < cs.nCoordinates(); i++)
933 : {
934 0 : if (cs.type(i) == Coordinate::SPECTRAL)
935 : {
936 0 : SpectralCoordinate speccoord(cs.spectralCoordinate(i));
937 : //Double startfreq = 0.0, startpixel = -0.5;
938 0 : Double endfreq = 0.0, endpixel = 0.5;
939 : //speccoord.toWorld(startfreq, startpixel);
940 0 : speccoord.toWorld(endfreq, endpixel);
941 : //Double midfreq = (endfreq + startfreq) / 2.0;
942 : //cout << "coord i " << i << " MFS end frequency range : " << endfreq/1.0e+9 << " GHz" << endl;
943 :
944 0 : float nu = float(endfreq); // 1e9;
945 : // largest scale = ( (c/nu)/baseline ) converted from radians to degrees to arcsec
946 0 : itsLargestInitScale = float(ceil(((3e+8/nu)/baseline) * 180.0/3.14 * 60.0 * 60.0 / abs(cs.increment()(0))));
947 :
948 : // if user provides largest scale, use it instead.
949 0 : if (itsUserLargestScale > 0)
950 : {
951 0 : if (itsUserLargestScale > itsLargestInitScale)
952 0 : itsStopAtLargeScaleNegative = true;
953 :
954 0 : itsLargestInitScale = itsUserLargestScale;
955 :
956 : // ensure the largest scale is smaller than imsize/4/sqrt(2pi) = imsize/10 (could try imsize/4 later)
957 0 : if(itsLargestInitScale > min(nx/10, ny/10))
958 : {
959 0 : os << LogIO::WARN << "Scale size of " << itsLargestInitScale << " pixels is too big for an image size of " << nx << " x " << ny << " pixels. This scale size will be reset. " << LogIO::POST;
960 0 : itsLargestInitScale = float(ceil(min(nx/10, ny/10))); // based on the idea of MultiTermMatrixCleaner::verifyScaleSizes()
961 : }
962 :
963 0 : return;
964 : }
965 :
966 : // make a conservative largest allowed scale, 5 is a trial number
967 0 : itsLargestInitScale = float(ceil(itsLargestInitScale / 5.0));
968 :
969 : // ensure the largest scale is smaller than imsize/4/sqrt(2pi) = imsize/10 (could try imsize/4 later)
970 0 : if(itsLargestInitScale > min(nx/10, ny/10))
971 : {
972 0 : os << LogIO::WARN << "Scale size of " << itsLargestInitScale << " pixels is too big for an image size of " << nx << " x " << ny << " pixels. This scale size will be reset. " << LogIO::POST;
973 0 : itsLargestInitScale = float(ceil(min(nx/10, ny/10))); // based on the idea of MultiTermMatrixCleaner::verifyScaleSizes()
974 : }
975 :
976 : //cout << "largest scale " << itsLargestInitScale << endl;
977 :
978 0 : return;
979 : }
980 : }
981 :
982 : }
983 :
984 0 : void AspMatrixCleaner::setInitScales()
985 : {
986 0 : LogIO os(LogOrigin("AspMatrixCleaner", "setInitScales()", WHERE));
987 :
988 : // if user does not provide the largest scale, use the default.
989 0 : if (itsUserLargestScale < 0)
990 : {
991 0 : itsInitScaleSizes = {0.0f, itsPsfWidth, 2.0f*itsPsfWidth, 4.0f*itsPsfWidth, 8.0f*itsPsfWidth};
992 0 : return;
993 : }
994 : else
995 : {
996 0 : const Int nx = psfShape_p(0);
997 0 : const Int ny = psfShape_p(1);
998 :
999 : // ensure the largest scale is smaller than imsize/4/sqrt(2pi) = imsize/10 (could try imsize/4 later)
1000 0 : if(itsUserLargestScale> min(nx/10, ny/10))
1001 : {
1002 0 : os << LogIO::WARN << "`largestscale " << itsUserLargestScale << " pixels is too big for an image size of " << nx << " x " << ny << " pixels. This scale size will be reset. " << LogIO::POST;
1003 0 : itsUserLargestScale = float(ceil(min(nx/10, ny/10))); // based on the idea of MultiTermMatrixCleaner::verifyScaleSizes()
1004 : }
1005 :
1006 0 : int numscale = floor(itsUserLargestScale / itsPsfWidth);
1007 0 : if (numscale == 0)
1008 : {
1009 0 : itsNInitScales = 1;
1010 0 : itsInitScaleSizes.resize(itsNInitScales);
1011 0 : itsInitScaleSizes[0] = 0.0f;
1012 0 : os << LogIO::WARN << "`largestscale` " << itsUserLargestScale << " is smaller than the psf width. Only 0 scale is used" << LogIO::POST;
1013 : }
1014 : else
1015 : {
1016 0 : Int scale = 1;
1017 0 : while (((itsPsfWidth * pow(2, scale-1)) < itsUserLargestScale) && (scale < 5))
1018 : {
1019 0 : itsInitScaleSizes.push_back(itsPsfWidth * pow(2, scale-1));
1020 0 : scale++;
1021 : }
1022 :
1023 0 : if (scale <= 4) // restricted the # init scales based on `largestscale"
1024 0 : itsInitScaleSizes.push_back(itsUserLargestScale);
1025 :
1026 0 : itsNInitScales = itsInitScaleSizes.size();
1027 : }
1028 :
1029 : }
1030 :
1031 : }
1032 :
1033 : /* for shortest baseline approach
1034 : void AspMatrixCleaner::setInitScalesOld()
1035 : {
1036 : LogIO os(LogOrigin("AspMatrixCleaner", "setInitScales()", WHERE));
1037 :
1038 : // Validate scales
1039 : //os << "Creating " << itsNInitScales << " initial scales" << LogIO::POST;
1040 : itsInitScaleSizes[0] = 0.0f;
1041 : itsInitScaleSizes[1] = itsPsfWidth;
1042 : //os << "scale 1 = 0.0 pixels" << LogIO::POST;
1043 : //os << "scale 2 = " << itsInitScaleSizes[1] << " pixels" << LogIO::POST;
1044 :
1045 : // based on the normalized power-law distribution of ratio(i) = pow(10.0, (Float(i)-2.0)/2.0) for i >= 1
1046 : // 1. there is enough difference to make 5 scales
1047 : if ((itsLargestInitScale - itsPsfWidth) >= 4.0)
1048 : {
1049 : vector<Float> ratio = {0.09, 0.31, 1.0};
1050 :
1051 : for (Int scale = 2; scale < itsNInitScales; scale++)
1052 : {
1053 : itsInitScaleSizes[scale] =
1054 : ceil(itsPsfWidth + ((itsLargestInitScale - itsPsfWidth) * ratio[scale- 2]));
1055 : //os << "scale " << scale+1 << " = " << itsInitScaleSizes[scale]
1056 : //<< " pixels" << LogIO::POST;
1057 : }
1058 : }
1059 : // 2. there is NOT enough difference to make 5 scales, so just make 4 scales
1060 : else if ((itsLargestInitScale - itsPsfWidth) >= 2.0)
1061 : {
1062 : vector<Float> ratio = {0.31, 1.0};
1063 : itsNInitScales = 4;
1064 : itsInitScaleSizes.resize(itsNInitScales);
1065 :
1066 : for (Int scale = 2; scale < itsNInitScales; scale++)
1067 : {
1068 : itsInitScaleSizes[scale] =
1069 : ceil(itsPsfWidth + ((itsLargestInitScale - itsPsfWidth) * ratio[scale- 2]));
1070 : //os << "scale " << scale+1 << " = " << itsInitScaleSizes[scale]
1071 : //<< " pixels" << LogIO::POST;
1072 : }
1073 : }
1074 : // 3. there is NOT enough difference to make 4 scales, so just make 3 scales
1075 : else
1076 : {
1077 : itsNInitScales = 3;
1078 : itsInitScaleSizes.resize(itsNInitScales);
1079 :
1080 : itsInitScaleSizes[2] = itsLargestInitScale;
1081 : //os << "scale 2" << " = " << itsInitScaleSizes[2] << " pixels" << LogIO::POST;
1082 : }
1083 : }
1084 : */
1085 :
1086 0 : void AspMatrixCleaner::setInitScaleXfrs(const Float width)
1087 : {
1088 0 : if(itsInitScales.nelements() > 0)
1089 0 : destroyAspScales();
1090 :
1091 0 : if (itsSwitchedToHogbom)
1092 : {
1093 0 : itsNInitScales = 1;
1094 0 : itsInitScaleSizes.resize(itsNInitScales, false);
1095 0 : itsInitScaleSizes = {0.0f};
1096 : }
1097 : else
1098 : {
1099 0 : itsNInitScales = 5;
1100 0 : itsInitScaleSizes.resize(itsNInitScales, false);
1101 : // shortest baseline approach below is no longer used (see CAS-940 in Jan 2022). Switched back to the original approach.
1102 : // set initial scale sizes from power-law distribution with min scale=PsfWidth and max scale = c/nu/baseline
1103 : // this step can reset itsNInitScales if the largest scale allowed is small
1104 : //setInitScalesOld();
1105 : // old approach may cause Asp to pick unreasonable large scale but this be avoided by setting `largestscalesize`.
1106 : // try 0, width, 2width, 4width and 8width which is also restricted by `largestscale` if provided
1107 : //itsInitScaleSizes = {0.0f, width, 2.0f*width, 4.0f*width, 8.0f*width};
1108 0 : setInitScales();
1109 : }
1110 :
1111 0 : itsInitScales.resize(itsNInitScales, false);
1112 0 : itsInitScaleXfrs.resize(itsNInitScales, false);
1113 0 : fft = FFTServer<Float,Complex>(psfShape_p);
1114 0 : for (int scale = 0; scale < itsNInitScales; scale++)
1115 : {
1116 0 : itsInitScales[scale] = Matrix<Float>(psfShape_p);
1117 0 : makeInitScaleImage(itsInitScales[scale], itsInitScaleSizes[scale]);
1118 : //cout << "made itsInitScales[" << scale << "] = " << itsInitScaleSizes[scale] << endl;
1119 0 : itsInitScaleXfrs[scale] = Matrix<Complex> ();
1120 0 : fft.fft0(itsInitScaleXfrs[scale], itsInitScales[scale]);
1121 : }
1122 0 : }
1123 :
1124 : // calculate the convolutions of the psf with the initial scales
1125 0 : void AspMatrixCleaner::setInitScalePsfs()
1126 : {
1127 0 : itsPsfConvInitScales.resize((itsNInitScales+1)*(itsNInitScales+1), false);
1128 0 : itsNscales = itsNInitScales; // # initial scales. This will be updated in defineAspScales later.
1129 :
1130 0 : Matrix<Complex> cWork;
1131 :
1132 0 : for (Int scale=0; scale < itsNInitScales; scale++)
1133 : {
1134 : //cout << "Calculating convolutions of psf for initial scale size " << itsInitScaleSizes[scale] << endl;
1135 : //PSF * scale
1136 0 : itsPsfConvInitScales[scale] = Matrix<Float>(psfShape_p);
1137 0 : cWork=((*itsXfr)*(itsInitScaleXfrs[scale])*(itsInitScaleXfrs[scale]));
1138 0 : fft.fft0((itsPsfConvInitScales[scale]), cWork, false);
1139 0 : fft.flip(itsPsfConvInitScales[scale], false, false);
1140 :
1141 0 : for (Int otherscale = scale; otherscale < itsNInitScales; otherscale++)
1142 : {
1143 0 : AlwaysAssert(index(scale, otherscale) < Int(itsPsfConvInitScales.nelements()),
1144 : AipsError);
1145 :
1146 : // PSF * scale * otherscale
1147 0 : itsPsfConvInitScales[index(scale,otherscale)] = Matrix<Float>(psfShape_p);
1148 0 : cWork=((*itsXfr)*(itsInitScaleXfrs[scale])*(itsInitScaleXfrs[otherscale]));
1149 0 : fft.fft0(itsPsfConvInitScales[index(scale,otherscale)], cWork, false);
1150 : }
1151 : }
1152 0 : }
1153 :
1154 : // Set up the masks for the initial scales (i.e. 0, 1.5width, 5width and 10width)
1155 0 : Bool AspMatrixCleaner::setInitScaleMasks(const Array<Float> arrmask, const Float& maskThreshold)
1156 : {
1157 0 : LogIO os(LogOrigin("AspMatrixCleaner", "setInitScaleMasks()", WHERE));
1158 :
1159 0 : destroyMasks();
1160 :
1161 0 : Matrix<Float> mask(arrmask);
1162 0 : itsMask = new Matrix<Float>(mask.shape());
1163 0 : itsMask->assign(mask);
1164 0 : itsMaskThreshold = maskThreshold;
1165 0 : noClean_p=(max(*itsMask) < itsMaskThreshold) ? true : false;
1166 :
1167 0 : if(itsMask.null() || noClean_p)
1168 0 : return false;
1169 :
1170 : // make scale masks
1171 0 : if(itsInitScaleSizes.size() < 1)
1172 : {
1173 : os << "Initial scales are not yet set - cannot set initial scale masks"
1174 0 : << LogIO::EXCEPTION;
1175 : }
1176 :
1177 0 : AlwaysAssert((itsMask->shape() == psfShape_p), AipsError);
1178 :
1179 0 : Matrix<Complex> maskFT;
1180 0 : fft.fft0(maskFT, *itsMask);
1181 0 : itsInitScaleMasks.resize(itsNInitScales);
1182 : // Now we can do all the convolutions
1183 0 : Matrix<Complex> cWork;
1184 0 : for (int scale=0; scale < itsNInitScales; scale++)
1185 : {
1186 : // Mask * scale
1187 : // Allow only 10% overlap by default, hence 0.9 is a default mask threshold
1188 : // if thresholding is not used, just extract the real part of the complex mask
1189 0 : itsInitScaleMasks[scale] = Matrix<Float>(itsMask->shape());
1190 0 : cWork=((maskFT)*(itsInitScaleXfrs[scale]));
1191 0 : fft.fft0(itsInitScaleMasks[scale], cWork, false);
1192 0 : fft.flip(itsInitScaleMasks[scale], false, false);
1193 0 : for (Int j=0 ; j < (itsMask->shape())(1); ++j)
1194 : {
1195 0 : for (Int k =0 ; k < (itsMask->shape())(0); ++k)
1196 : {
1197 0 : if(itsMaskThreshold > 0)
1198 0 : (itsInitScaleMasks[scale])(k,j) = (itsInitScaleMasks[scale])(k,j) > itsMaskThreshold ? 1.0 : 0.0;
1199 : }
1200 : }
1201 0 : Float mysum = sum(itsInitScaleMasks[scale]);
1202 0 : if (mysum <= 0.1) {
1203 0 : os << LogIO::WARN << "Ignoring initial scale " << itsInitScaleSizes[scale] <<
1204 0 : " since it is too large to fit within the mask" << LogIO::POST;
1205 : }
1206 :
1207 : }
1208 :
1209 0 : Int nx = itsInitScaleMasks[0].shape()(0);
1210 0 : Int ny = itsInitScaleMasks[0].shape()(1);
1211 :
1212 : /* Set the edges of the masks according to the scale size */
1213 : // Set the values OUTSIDE the box to zero....
1214 0 : for(Int scale=0; scale < itsNInitScales; scale++)
1215 : {
1216 0 : Int border = (Int)(itsInitScaleSizes[scale]*1.5);
1217 : // bottom
1218 0 : IPosition blc1(2, 0 , 0 );
1219 0 : IPosition trc1(2, nx-1, border);
1220 0 : IPosition inc1(2, 1);
1221 0 : LCBox::verify(blc1, trc1, inc1, itsInitScaleMasks[scale].shape());
1222 0 : (itsInitScaleMasks[scale])(blc1, trc1) = 0.0;
1223 : // top
1224 0 : blc1[0]=0; blc1[1] = ny-border-1;
1225 0 : trc1[0] = nx-1; trc1[1] = ny-1;
1226 0 : LCBox::verify(blc1, trc1, inc1, itsInitScaleMasks[scale].shape());
1227 0 : (itsInitScaleMasks[scale])(blc1, trc1) = 0.0;
1228 : // left
1229 0 : blc1[0]=0; blc1[1]=border;
1230 0 : trc1[0]=border; trc1[1] = ny-border-1;
1231 0 : LCBox::verify(blc1, trc1, inc1, itsInitScaleMasks[scale].shape());
1232 0 : (itsInitScaleMasks[scale])(blc1, trc1) = 0.0;
1233 : // right
1234 0 : blc1[0] = nx-border-1; blc1[1]=border;
1235 0 : trc1[0] = nx; trc1[1] = ny-border-1;
1236 0 : LCBox::verify(blc1, trc1, inc1, itsInitScaleMasks[scale].shape());
1237 0 : (itsInitScaleMasks[scale])(blc1,trc1) = 0.0;
1238 : }
1239 :
1240 0 : return true;
1241 : }
1242 :
1243 0 : void AspMatrixCleaner::maxDirtyConvInitScales(float& strengthOptimum, int& optimumScale, IPosition& positionOptimum)
1244 : {
1245 0 : LogIO os(LogOrigin("AspMatrixCleaner", "maxDirtyConvInitScales()", WHERE));
1246 :
1247 : // /* We still need the following to define a region. Using minMaxMasked itself is NOT sufficient and results in components outside of mask.
1248 :
1249 0 : const int nx = itsDirty->shape()[0];
1250 0 : const int ny = itsDirty->shape()[1];
1251 :
1252 0 : IPosition blcDirty(itsDirty->shape().nelements(), 0);
1253 0 : IPosition trcDirty(itsDirty->shape() - 1);
1254 :
1255 0 : if(!itsMask.null())
1256 : {
1257 0 : os << LogIO::NORMAL3 << "Finding initial scales for Asp using given mask" << LogIO::POST;
1258 0 : if (itsMaskThreshold < 0)
1259 : {
1260 : os << LogIO::NORMAL3
1261 : << "Mask thresholding is not used, values are interpreted as weights"
1262 0 : <<LogIO::POST;
1263 : }
1264 : else
1265 : {
1266 : // a mask that does not allow for clean was sent
1267 0 : if(noClean_p)
1268 0 : return;
1269 :
1270 : os << LogIO::NORMAL3
1271 0 : << "Finding initial scales with mask values above " << itsMaskThreshold
1272 0 : << LogIO::POST;
1273 : }
1274 :
1275 0 : AlwaysAssert(itsMask->shape()(0) == nx, AipsError);
1276 0 : AlwaysAssert(itsMask->shape()(1) == ny, AipsError);
1277 0 : Int xbeg=nx-1;
1278 0 : Int ybeg=ny-1;
1279 0 : Int xend=0;
1280 0 : Int yend=0;
1281 0 : for (Int iy=0;iy<ny;iy++)
1282 : {
1283 0 : for (Int ix=0;ix<nx;ix++)
1284 : {
1285 0 : if((*itsMask)(ix,iy)>0.000001)
1286 : {
1287 0 : xbeg=min(xbeg,ix);
1288 0 : ybeg=min(ybeg,iy);
1289 0 : xend=max(xend,ix);
1290 0 : yend=max(yend,iy);
1291 : }
1292 : }
1293 : }
1294 0 : blcDirty(0)=xbeg;
1295 0 : blcDirty(1)=ybeg;
1296 0 : trcDirty(0)=xend;
1297 0 : trcDirty(1)=yend;
1298 : }
1299 : else
1300 0 : os << LogIO::NORMAL3 << "Finding initial scales using the entire image" << LogIO::POST; //*/
1301 :
1302 :
1303 0 : Vector<Float> maxima(itsNInitScales);
1304 0 : Block<IPosition> posMaximum(itsNInitScales);
1305 :
1306 : /*int nth = itsNInitScales;
1307 : #ifdef _OPENMP
1308 : nth = min(nth, omp_get_max_threads());
1309 : #endif*/
1310 :
1311 : //#pragma omp parallel default(shared) private(scale) num_threads(nth)
1312 : //{
1313 : // #pragma omp for // genie pragma seems to sometimes return wrong value to maxima on tesla
1314 :
1315 : /* debug info
1316 : Float maxVal=0;
1317 : IPosition posmin((*itsDirty).shape().nelements(), 0);
1318 : Float minVal=0;
1319 : IPosition posmax((*itsDirty).shape().nelements(), 0);
1320 : minMaxMasked(minVal, maxVal, posmin, posmax, (*itsDirty), itsInitScaleMasks[0]);
1321 : cout << "orig itsDirty : min " << minVal << " max " << maxVal << endl;
1322 : cout << "posmin " << posmin << " posmax " << posmax << endl; */
1323 :
1324 0 : IPosition gip;
1325 0 : gip = IPosition(2, nx, ny);
1326 0 : Block<casacore::Matrix<Float>> vecWork_p;
1327 0 : vecWork_p.resize(itsNInitScales);
1328 :
1329 0 : for (int scale = 0; scale < itsNInitScales; ++scale)
1330 : {
1331 : // Find absolute maximum of each smoothed residual
1332 0 : vecWork_p[scale].resize(gip);
1333 0 : Matrix<Float> work = (vecWork_p[scale])(blcDirty,trcDirty);
1334 0 : work = 0.0;
1335 0 : work = work + (itsDirtyConvInitScales[scale])(blcDirty,trcDirty);
1336 :
1337 0 : maxima(scale) = 0;
1338 0 : posMaximum[scale] = IPosition(itsDirty->shape().nelements(), 0);
1339 :
1340 : /* debug info
1341 : Float maxVal=0;
1342 : Float minVal=0;
1343 : IPosition posmin(itsDirty->shape().nelements(), 0);
1344 : IPosition posmax(itsDirty->shape().nelements(), 0);
1345 : minMaxMasked(minVal, maxVal, posmin, posmax, itsDirtyConvInitScales[scale], itsInitScaleMasks[scale]);
1346 : cout << "DirtyConvInitScale " << scale << ": min " << minVal << " max " << maxVal << endl;
1347 : cout << "posmin " << posmin << " posmax " << posmax << endl; */
1348 :
1349 : // Note, must find peak from the (blcDirty, trcDirty) subregion to ensure components are within mask
1350 0 : if (!itsMask.null())
1351 : {
1352 0 : findMaxAbsMask(vecWork_p[scale], itsInitScaleMasks[scale],
1353 0 : maxima(scale), posMaximum[scale]);
1354 : }
1355 : else
1356 0 : findMaxAbs(vecWork_p[scale], maxima(scale), posMaximum[scale]);
1357 :
1358 0 : if (itsNormMethod == 2)
1359 : {
1360 0 : if (scale > 0)
1361 : {
1362 : float normalization;
1363 : //normalization = 2 * M_PI / pow(itsInitScaleSizes[scale], 2); //sanjay's
1364 : //normalization = 2 * M_PI / pow(itsInitScaleSizes[scale], 1); // this looks good on M31 but bad on G55
1365 0 : normalization = sqrt(2 * M_PI *itsInitScaleSizes[scale]); //GSL. Need to recover and re-norm later
1366 0 : maxima(scale) /= normalization;
1367 : } //sanjay's code doesn't normalize peak here.
1368 : // Norm Method 2 may work fine with GSL with derivatives, but Norm Method 1 is still the preferred approach.
1369 : }
1370 : }
1371 : //}//End parallel section
1372 :
1373 : // Find the peak residual among the 4 initial scales, which will be the next Aspen
1374 0 : for (int scale = 0; scale < itsNInitScales; scale++)
1375 : {
1376 0 : if(abs(maxima(scale)) > abs(strengthOptimum))
1377 : {
1378 0 : optimumScale = scale;
1379 0 : strengthOptimum = maxima(scale);
1380 0 : positionOptimum = posMaximum[scale];
1381 : }
1382 : }
1383 :
1384 0 : if (optimumScale > 0)
1385 : {
1386 : //const float normalization = 2 * M_PI / (pow(1.0/itsPsfWidth, 2) + pow(1.0/itsInitScaleSizes[optimumScale], 2)); // sanjay
1387 0 : const float normalization = sqrt(2 * M_PI / (pow(1.0/itsPsfWidth, 2) + pow(1.0/itsInitScaleSizes[optimumScale], 2))); // this is good.
1388 :
1389 : // norm method 2 recovers the optimal strength and then normalize it to get the init guess
1390 0 : if (itsNormMethod == 2)
1391 0 : strengthOptimum *= sqrt(2 * M_PI *itsInitScaleSizes[optimumScale]); // this is needed if we also first normalize and then compare.
1392 :
1393 0 : strengthOptimum /= normalization;
1394 : // cout << "normalization " << normalization << " strengthOptimum " << strengthOptimum << endl;
1395 : }
1396 :
1397 0 : AlwaysAssert(optimumScale < itsNInitScales, AipsError);
1398 : }
1399 :
1400 : // ALGLIB
1401 0 : vector<Float> AspMatrixCleaner::getActiveSetAspen()
1402 : {
1403 0 : LogIO os(LogOrigin("AspMatrixCleaner", "getActiveSetAspen()", WHERE));
1404 :
1405 0 : if(int(itsInitScaleXfrs.nelements()) == 0)
1406 0 : throw(AipsError("Initial scales for Asp are not defined"));
1407 :
1408 0 : if (!itsSwitchedToHogbom &&
1409 0 : accumulate(itsNumIterNoGoodAspen.begin(), itsNumIterNoGoodAspen.end(), 0) >= 5)
1410 : {
1411 0 : os << "Switched to hogbom because of frequent small components." << LogIO::POST;
1412 0 : switchedToHogbom();
1413 : }
1414 :
1415 0 : if (itsSwitchedToHogbom)
1416 0 : itsNInitScales = 1;
1417 : else
1418 0 : itsNInitScales = itsInitScaleSizes.size();
1419 :
1420 : // Dirty * initial scales
1421 0 : Matrix<Complex> dirtyFT;
1422 0 : fft.fft0(dirtyFT, *itsDirty);
1423 0 : itsDirtyConvInitScales.resize(0);
1424 0 : itsDirtyConvInitScales.resize(itsNInitScales); // 0, 1width, 2width, 4width and 8width
1425 : //cout << "itsInitScaleSizes.size() " << itsInitScaleSizes.size() << " itsInitScales.size() " << itsInitScales.size() << " NInitScales # " << itsNInitScales << endl;
1426 0 : for (int scale=0; scale < itsNInitScales; scale++)
1427 : {
1428 0 : Matrix<Complex> cWork;
1429 :
1430 0 : itsDirtyConvInitScales[scale] = Matrix<Float>(itsDirty->shape());
1431 0 : cWork=((dirtyFT)*(itsInitScaleXfrs[scale]));
1432 0 : fft.fft0((itsDirtyConvInitScales[scale]), cWork, false);
1433 0 : fft.flip((itsDirtyConvInitScales[scale]), false, false);
1434 :
1435 : //cout << "remake itsDirtyConvInitScales " << scale << " max itsInitScales[" << scale << "] = " << max(fabs(itsInitScales[scale])) << endl;
1436 : //cout << " max itsInitScaleXfrs[" << scale << "] = " << max(fabs(itsInitScaleXfrs[scale])) << endl;
1437 : }
1438 :
1439 0 : float strengthOptimum = 0.0;
1440 0 : int optimumScale = 0;
1441 0 : IPosition positionOptimum(itsDirty->shape().nelements(), 0);
1442 0 : itsGoodAspActiveSet.resize(0);
1443 0 : itsGoodAspAmplitude.resize(0);
1444 0 : itsGoodAspCenter.resize(0);
1445 :
1446 0 : maxDirtyConvInitScales(strengthOptimum, optimumScale, positionOptimum);
1447 :
1448 0 : os << LogIO::NORMAL3 << "Peak among the smoothed residual image is " << strengthOptimum << " and initial scale: " << optimumScale << LogIO::POST;
1449 : // cout << " its itsDirty is " << (*itsDirty)(positionOptimum);
1450 : // cout << " at location " << positionOptimum[0] << " " << positionOptimum[1] << " " << positionOptimum[2];
1451 :
1452 :
1453 0 : itsStrengthOptimum = strengthOptimum;
1454 0 : itsPositionOptimum = positionOptimum;
1455 0 : itsOptimumScale = optimumScale;
1456 0 : itsOptimumScaleSize = itsInitScaleSizes[optimumScale];
1457 :
1458 : // initial scale size = 0 gives the peak res, so we don't
1459 : // need to do the LBFGS optimization for it
1460 0 : if (itsOptimumScale == 0)
1461 0 : return {};
1462 : else
1463 : {
1464 : // the new aspen is always added to the active-set
1465 0 : vector<Float> tempx;
1466 0 : vector<IPosition> activeSetCenter;
1467 :
1468 0 : tempx.push_back(strengthOptimum);
1469 0 : tempx.push_back(itsInitScaleSizes[optimumScale]);
1470 0 : activeSetCenter.push_back(positionOptimum);
1471 :
1472 : // initialize alglib option
1473 0 : unsigned int length = tempx.size();
1474 0 : real_1d_array x;
1475 0 : x.setlength(length);
1476 :
1477 : // initialize starting point
1478 0 : for (unsigned int i = 0; i < length; i+=2)
1479 : {
1480 0 : x[i] = tempx[i];
1481 0 : x[i+1] = tempx[i+1];
1482 : }
1483 :
1484 0 : ParamAlglibObj optParam(*itsDirty, *itsXfr, activeSetCenter, fft);
1485 : ParamAlglibObj *ptrParam;
1486 0 : ptrParam = &optParam;
1487 :
1488 0 : real_1d_array s = "[1,1]";
1489 0 : double epsg = 1e-3;
1490 0 : double epsf = 1e-3;
1491 0 : double epsx = 1e-3;
1492 0 : ae_int_t maxits = 5;
1493 0 : minlbfgsstate state;
1494 0 : minlbfgscreate(1, x, state);
1495 0 : minlbfgssetcond(state, epsg, epsf, epsx, maxits);
1496 0 : minlbfgssetscale(state, s);
1497 0 : minlbfgsreport rep;
1498 0 : alglib::minlbfgsoptimize(state, objfunc_alglib, NULL, (void *) ptrParam);
1499 0 : minlbfgsresults(state, x, rep);
1500 : //double *x1 = x.getcontent();
1501 : //cout << "x1[0] " << x1[0] << " x1[1] " << x1[1] << endl;
1502 :
1503 : // end alglib bfgs optimization
1504 :
1505 0 : double amp = x[0]; // i
1506 0 : double scale = x[1]; // i+1
1507 :
1508 0 : if (fabs(scale) < 0.4)
1509 : {
1510 0 : scale = 0;
1511 0 : amp = (*itsDirty)(itsPositionOptimum); // This is to avoid divergence due to amp being too large.
1512 : // amp=strengthOptimum gives similar results
1513 : }
1514 : else
1515 0 : scale = fabs(scale);
1516 :
1517 0 : itsGoodAspAmplitude.push_back(amp); // active-set amplitude
1518 0 : itsGoodAspActiveSet.push_back(scale); // active-set
1519 :
1520 0 : itsStrengthOptimum = amp;
1521 0 : itsOptimumScaleSize = scale;
1522 0 : itsGoodAspCenter = activeSetCenter;
1523 :
1524 : // debug
1525 0 : os << LogIO::NORMAL3 << "optimized strengthOptimum " << itsStrengthOptimum << " scale size " << itsOptimumScaleSize << LogIO::POST;
1526 : //cout << "optimized strengthOptimum " << itsStrengthOptimum << " scale size " << itsOptimumScaleSize << endl;
1527 :
1528 : } // finish bfgs optimization
1529 :
1530 0 : AlwaysAssert(itsGoodAspCenter.size() == itsGoodAspActiveSet.size(), AipsError);
1531 0 : AlwaysAssert(itsGoodAspAmplitude.size() == itsGoodAspActiveSet.size(), AipsError);
1532 :
1533 : // debug info
1534 : /*for (unsigned int i = 0; i < itsAspAmplitude.size(); i++)
1535 : {
1536 : //cout << "After opt AspApm[" << i << "] = " << itsAspAmplitude[i] << endl;
1537 : //cout << "After opt AspScale[" << i << "] = " << itsAspScaleSizes[i] << endl;
1538 : //cout << "After opt AspCenter[" << i << "] = " << itsAspCenter[i] << endl;
1539 : cout << "AspScale[ " << i << " ] = " << itsAspScaleSizes[i] << " center " << itsAspCenter[i] << endl;
1540 : }*/
1541 :
1542 0 : return itsGoodAspActiveSet; // return optimized scale
1543 : }
1544 :
1545 :
1546 : // gsl lbfgs
1547 : /*
1548 : vector<Float> AspMatrixCleaner::getActiveSetAspen()
1549 : {
1550 : LogIO os(LogOrigin("AspMatrixCleaner", "getActiveSetAspen()", WHERE));
1551 :
1552 : if(int(itsInitScaleXfrs.nelements()) == 0)
1553 : throw(AipsError("Initial scales for Asp are not defined"));
1554 :
1555 : if (!itsSwitchedToHogbom &&
1556 : accumulate(itsNumIterNoGoodAspen.begin(), itsNumIterNoGoodAspen.end(), 0) >= 5)
1557 : {
1558 : os << "Switched to hogbom because of frequent small components." << LogIO::POST;
1559 : switchedToHogbom();
1560 : }
1561 :
1562 : if (itsSwitchedToHogbom)
1563 : itsNInitScales = 1;
1564 : else
1565 : itsNInitScales = itsInitScaleSizes.size();
1566 :
1567 : // Dirty * initial scales
1568 : Matrix<Complex> dirtyFT;
1569 : FFTServer<Float,Complex> fft(itsDirty->shape());
1570 : fft.fft0(dirtyFT, *itsDirty);
1571 : itsDirtyConvInitScales.resize(0);
1572 : itsDirtyConvInitScales.resize(itsNInitScales); // 0, 1width, 2width, 4width and 8width
1573 :
1574 : for (int scale=0; scale < itsNInitScales; scale++)
1575 : {
1576 : Matrix<Complex> cWork;
1577 :
1578 : itsDirtyConvInitScales[scale] = Matrix<Float>(itsDirty->shape());
1579 : cWork=((dirtyFT)*(itsInitScaleXfrs[scale]));
1580 : fft.fft0((itsDirtyConvInitScales[scale]), cWork, false);
1581 : fft.flip((itsDirtyConvInitScales[scale]), false, false);
1582 :
1583 : // cout << "remake itsDirtyConvInitScales " << scale << " max itsInitScales[" << scale << "] = " << max(fabs(itsInitScales[scale])) << endl;
1584 : // cout << " max itsInitScaleXfrs[" << scale << "] = " << max(fabs(itsInitScaleXfrs[scale])) << endl;
1585 : }
1586 :
1587 : float strengthOptimum = 0.0;
1588 : int optimumScale = 0;
1589 : IPosition positionOptimum(itsDirty->shape().nelements(), 0);
1590 : itsGoodAspActiveSet.resize(0);
1591 : itsGoodAspAmplitude.resize(0);
1592 : itsGoodAspCenter.resize(0);
1593 : //itsPrevAspActiveSet.resize(0);
1594 : //itsPrevAspAmplitude.resize(0);
1595 :
1596 : maxDirtyConvInitScales(strengthOptimum, optimumScale, positionOptimum);
1597 : os << "Peak among the smoothed residual image is " << strengthOptimum << " and initial scale: " << optimumScale << LogIO::POST;
1598 : // cout << " its itsDirty is " << (*itsDirty)(positionOptimum);
1599 : // cout << " at location " << positionOptimum[0] << " " << positionOptimum[1] << " " << positionOptimum[2];
1600 :
1601 : // memory used
1602 : //itsUsedMemoryMB = double(HostInfo::memoryUsed()/1024);
1603 : //cout << "Memory allocated in getActiveSetAspen " << itsUsedMemoryMB << " MB." << endl;
1604 :
1605 : itsStrengthOptimum = strengthOptimum;
1606 : itsPositionOptimum = positionOptimum;
1607 : itsOptimumScale = optimumScale;
1608 : itsOptimumScaleSize = itsInitScaleSizes[optimumScale];
1609 :
1610 : // initial scale size = 0 gives the peak res, so we don't
1611 : // need to do the LBFGS optimization for it
1612 : if (itsOptimumScale == 0)
1613 : return {};
1614 : else
1615 : {
1616 : // AlwaysAssert(itsAspScaleSizes.size() == itsAspAmplitude.size(), AipsError);
1617 : // AlwaysAssert(itsAspScaleSizes.size() == itsAspCenter.size(), AipsError);
1618 :
1619 : // No longer needed. heuristiclly determine active set for speed up
1620 : /*Float resArea = 0.0;
1621 : Int nX = itsDirty->shape()(0);
1622 : Int nY = itsDirty->shape()(1);
1623 :
1624 : for (Int j = 0; j < nY; ++j)
1625 : {
1626 : for(Int i = 0; i < nX; ++i)
1627 : resArea += abs((*itsDirty)(i,j));
1628 : }
1629 : const Float lamda = 318; //M31 new Asp - gold
1630 :
1631 : const Float threshold = lamda * resArea;
1632 :
1633 : vector<Float> tempx;
1634 : vector<IPosition> activeSetCenter;
1635 :
1636 : vector<pair<Float,int>> vp; //(LenDev, idx)
1637 :
1638 : sort(vp.begin(),vp.end(), [](const pair<Float,int> &l, const pair<Float,int> &r) {return l.first > r.first;});
1639 :
1640 : // select the top 5
1641 : vector<int> goodvp;
1642 : for (unsigned int i = 0; i < vp.size(); i++)
1643 : {
1644 : if (i >= 20)
1645 : break;
1646 : goodvp.push_back(vp[i].second);
1647 : }
1648 : sort(goodvp.begin(), goodvp.end(), [](const int &l, const int &r) {return l > r;});
1649 :
1650 : for (unsigned int i = 0; i < goodvp.size(); i++)
1651 : {
1652 : tempx.push_back(itsAspAmplitude[goodvp[i]]);
1653 : tempx.push_back(itsAspScaleSizes[goodvp[i]]);
1654 : activeSetCenter.push_back(itsAspCenter[goodvp[i]]);
1655 : itsAspAmplitude.erase(itsAspAmplitude.begin() + goodvp[i]);
1656 : itsAspScaleSizes.erase(itsAspScaleSizes.begin() + goodvp[i]);
1657 : itsAspCenter.erase(itsAspCenter.begin() + goodvp[i]);
1658 : itsAspGood.erase(itsAspGood.begin() + goodvp[i]);
1659 : }* /
1660 :
1661 : // the new aspen is always added to the active-set
1662 : vector<Float> tempx;
1663 : vector<IPosition> activeSetCenter;
1664 :
1665 : tempx.push_back(strengthOptimum);
1666 : tempx.push_back(itsInitScaleSizes[optimumScale]);
1667 : activeSetCenter.push_back(positionOptimum);
1668 :
1669 : // GSL: set the initial guess
1670 : unsigned int length = tempx.size();
1671 : gsl_vector *x = NULL;
1672 : x = gsl_vector_alloc(length);
1673 : gsl_vector_set_zero(x);
1674 :
1675 : for (unsigned int i = 0; i < length; i+=2)
1676 : {
1677 : gsl_vector_set(x, i, tempx[i]);
1678 : gsl_vector_set(x, i+1, tempx[i+1]);
1679 :
1680 : // No longer needed. save aspen before optimization
1681 : // itsPrevAspAmplitude.push_back(tempx[i]); // active-set amplitude before bfgs
1682 : // itsPrevAspActiveSet.push_back(tempx[i+1]); // prev active-set before bfgs
1683 : }
1684 :
1685 : // GSL optimization
1686 : //fdf
1687 : gsl_multimin_function_fdf my_func;
1688 : gsl_multimin_fdfminimizer *s = NULL;
1689 :
1690 : // setupSolver
1691 : ParamObj optParam(*itsDirty, *itsXfr, activeSetCenter);
1692 : ParamObj *ptrParam;
1693 : ptrParam = &optParam;
1694 : // fdf
1695 : const gsl_multimin_fdfminimizer_type *T;
1696 : T = gsl_multimin_fdfminimizer_vector_bfgs2;
1697 : s = gsl_multimin_fdfminimizer_alloc(T, length);
1698 : my_func.n = length;
1699 : my_func.f = my_f;
1700 : my_func.df = my_df;
1701 : my_func.fdf = my_fdf;
1702 : my_func.params = (void *)ptrParam;
1703 :
1704 : // fdf
1705 : const float InitStep = gsl_blas_dnrm2(x);
1706 : gsl_multimin_fdfminimizer_set(s, &my_func, x, InitStep, 0.1);
1707 :
1708 : // ---------- BFGS algorithm begin ----------
1709 : // fdf
1710 : findComponent(5, s); // has to be > =5
1711 : //---------- BFGS algorithm end ----------
1712 :
1713 : // update x is needed here.
1714 : gsl_vector *optx = NULL;
1715 : optx = gsl_multimin_fdfminimizer_x(s); //fdf
1716 : // end GSL optimization
1717 :
1718 : // put the updated latest Aspen back to the active-set. Permanent list is no longer needed.
1719 : //for (unsigned int i = 0; i < length; i+= 2)
1720 : //{
1721 : double amp = gsl_vector_get(optx, 0); // i
1722 : double scale = gsl_vector_get(optx, 1); // i+1
1723 : scale = (scale = fabs(scale)) < 0.4 ? 0 : scale;
1724 : itsGoodAspAmplitude.push_back(amp); // active-set amplitude
1725 : itsGoodAspActiveSet.push_back(scale); // active-set
1726 :
1727 : // No longer needed. permanent list that doesn't get clear
1728 : //itsAspAmplitude.push_back(amp);
1729 : //itsAspScaleSizes.push_back(scale);
1730 : //itsAspCenter.push_back(activeSetCenter[i/2]);
1731 : //}
1732 :
1733 : //itsStrengthOptimum = itsAspAmplitude[itsAspAmplitude.size() -1]; // the latest aspen is the last element of x
1734 : //itsOptimumScaleSize = itsAspScaleSizes[itsAspScaleSizes.size() -1]; // the latest aspen is the last element of x
1735 : itsStrengthOptimum = amp;
1736 : itsOptimumScaleSize = scale;
1737 : itsGoodAspCenter = activeSetCenter;
1738 :
1739 : // debug
1740 : //os << "optimized strengthOptimum " << itsStrengthOptimum << " scale size " << itsOptimumScaleSize << LogIO::POST;
1741 :
1742 : // free GSL stuff
1743 : gsl_multimin_fdfminimizer_free(s); //fdf
1744 : gsl_vector_free(x);
1745 : //gsl_vector_free(optx); // Dont do it. Free this causes seg fault!!!
1746 :
1747 : } // finish bfgs optimization
1748 :
1749 : AlwaysAssert(itsGoodAspCenter.size() == itsGoodAspActiveSet.size(), AipsError);
1750 : AlwaysAssert(itsGoodAspAmplitude.size() == itsGoodAspActiveSet.size(), AipsError);
1751 :
1752 : // debug info
1753 : /*for (unsigned int i = 0; i < itsAspAmplitude.size(); i++)
1754 : {
1755 : //cout << "After opt AspApm[" << i << "] = " << itsAspAmplitude[i] << endl;
1756 : //cout << "After opt AspScale[" << i << "] = " << itsAspScaleSizes[i] << endl;
1757 : //cout << "After opt AspCenter[" << i << "] = " << itsAspCenter[i] << endl;
1758 : cout << "AspScale[ " << i << " ] = " << itsAspScaleSizes[i] << " center " << itsAspCenter[i] << endl;
1759 : }* /
1760 :
1761 : return itsGoodAspActiveSet; // return optimized scale
1762 : }*/
1763 :
1764 : // Define the Asp scales without doing anything else
1765 0 : void AspMatrixCleaner::defineAspScales(vector<Float>& scaleSizes)
1766 : {
1767 0 : sort(scaleSizes.begin(), scaleSizes.end());
1768 : // No longe needed since we only update with the latest Aspen. Remove the duplicated scales
1769 : // scaleSizes.erase(unique(scaleSizes.begin(),scaleSizes.end(),[](Float l, Float r) { return abs(l - r) < 1e-3; }), scaleSizes.end());
1770 :
1771 0 : itsNscales = Int(scaleSizes.size());
1772 0 : itsScaleSizes.resize(itsNscales);
1773 0 : itsScaleSizes = Vector<Float>(scaleSizes); // make a copy that we can call our own
1774 :
1775 : // analytically calculate component scale by Asp 2016
1776 0 : if (itsUseZhang)
1777 : {
1778 0 : for (Int i = 0; i < itsNscales; i++)
1779 : {
1780 0 : if (itsScaleSizes[i] >= itsPsfWidth)
1781 0 : itsScaleSizes[i] = sqrt(pow(itsScaleSizes[i], 2) - pow(Float(itsPsfWidth), 2));
1782 : }
1783 : }
1784 : // end Asp 2016
1785 :
1786 0 : itsScalesValid = true;
1787 0 : }
1788 :
1789 0 : void AspMatrixCleaner::switchedToHogbom(bool runlong)
1790 : {
1791 0 : LogIO os(LogOrigin("AspMatrixCleaner", "switchedToHogbom", WHERE));
1792 :
1793 0 : itsSwitchedToHogbom = true;
1794 0 : itsNthHogbom += 1;
1795 0 : itsNumIterNoGoodAspen.resize(0);
1796 : //itsNumHogbomIter = ceil(100 + 50 * (exp(0.05*itsNthHogbom) - 1)); // zhang's formula
1797 : //itsNumHogbomIter = ceil(50 + 2 * (exp(0.05*itsNthHogbom) - 1)); // genie's formula
1798 0 : itsNumHogbomIter = 51; // genie's formula - removed itsNthHogbom to remove the state dependency. The diff from the above is neligible
1799 :
1800 0 : if (runlong)
1801 : //itsNumHogbomIter = ceil(500 + 50 * (exp(0.05*itsNthHogbom) - 1));
1802 0 : itsNumHogbomIter = 510;
1803 :
1804 0 : os << LogIO::NORMAL3 << "Run hogbom for " << itsNumHogbomIter << " iterations." << LogIO::POST;
1805 0 : }
1806 :
1807 0 : void AspMatrixCleaner::setOrigDirty(const Matrix<Float>& dirty){
1808 0 : itsOrigDirty=new Matrix<Float>(dirty.shape());
1809 0 : itsOrigDirty->assign(dirty);
1810 0 : }
1811 :
1812 :
1813 0 : void AspMatrixCleaner::getFluxByBins(const vector<Float>& scaleSizes,const vector<Float>& optimum, Int binSize, vector<Float>& sumFluxByBins,vector<Float>& rangeFluxByBins) {
1814 :
1815 0 : double maxScaleSize = *std::max_element(scaleSizes.begin(),scaleSizes.end());
1816 0 : double minScaleSize = *std::min_element(scaleSizes.begin(),scaleSizes.end());
1817 0 : double deltaScale = (maxScaleSize-minScaleSize) / binSize;
1818 :
1819 :
1820 0 : for(Int j=0; j < binSize+1; j++)
1821 : {
1822 0 : rangeFluxByBins[j] = minScaleSize+j*deltaScale;
1823 0 : if ( j == binSize)
1824 0 : rangeFluxByBins[j] = maxScaleSize;
1825 : }
1826 :
1827 0 : for(Int i=0; i < Int(scaleSizes.size()); i++)
1828 0 : for(Int j=0; j < binSize+1; j++)
1829 : {
1830 0 : if ( scaleSizes[i] < rangeFluxByBins[j+1] && (scaleSizes[i] >= rangeFluxByBins[j] ))
1831 0 : sumFluxByBins[j] += optimum[i];
1832 : }
1833 :
1834 :
1835 0 : }
1836 :
1837 :
1838 :
1839 : } //# NAMESPACE CASA - END
|