# -*- coding: utf-8 -*- ################################################################################ #### License and Permissions #### ################################################################################ # Herschel/SPIRE Spectral Feature Finder: A program to extract significant # spectral features from SPIRE spectral data. # Copyright (C) 2020 (authors listed below). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or any # later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see https://www.gnu.org/licenses/gpl-3.0.html # # Acknowledging or Citing the Feature Finder Software or Products: # If you plan to use this notebook for research or work publications we # ask that you please cite the following Feature Finder Paper: # # * R Hopwood, I Valtchanov, L D Spencer, J Scott, C Benson, N Marchili, # N Hładczuk, E T Polehampton, N Lu, G Makiwa, D A Naylor, B G Gom, G Noble, # M J Griffin, The Herschel SPIRE Fourier Transform Spectrometer Spectral # Feature Finder I. The Spectral Feature Finder and Catalogue, Monthly # Notices of the Royal Astronomical Society, Volume 496, Issue 4, August # 2020, Pages 4874–4893, https://doi.org/10.1093/mnras/staa1612 # # * and provide link and refer to DOI https://doi.org/10.5270/esa-lysf2yi ################################################################################ #### FTS Spectral Feature Finder functions 9/4/20 #### ################################################################################ ######## Functions ####### # 7. Plotting # 8. Products I/O # 9. GoF/LineEvidence # 10. Redshift # 11. Neutral Carbon Check # 12. Feature finding ################################################################################ #### 6. Import statements #### ################################################################################ from java.lang.Math import PI from herschel.share.unit import * from java.awt import Color from herschel.ia.gui.plot.renderer import FillClosureType from herschel.ia.gui.plot.renderer import PComponentEngine from herschel.ia.gui.plot.renderer import ChartFitMode import re c = Constant.SPEED_OF_LIGHT.value/1000.0 COLadder = Double1d([461.0407682,576.2679305,691.4730763,806.6518060,921.7997000,\ 1036.9123930,1151.9854520,1267.0144860,1381.9951050,1496.9229090]) #Characteristic Difference Array for CO Ladder CDA = MEAN(COLadder[1:] - COLadder[:-1])*Double1d(range(10)) ################################################################################ #### 7. Plotting #### ################################################################################ ###Plotting colours### teal = Color(0.2,0.4,1.0) lightTeal = Color(179,226,205) orange = Color.ORANGE lightOrange = Color(253,205,172) red = Color.RED blue = Color.BLUE darkRed = Color(0.55,0.0,0.0) darkBlue = Color(0.0,0.0,0.55) cyan = Color.CYAN green = Color.GREEN darkGreen = Color(0.0,0.6,0.0) purple = Color(0.8,0.0,1.0) black = Color.BLACK lightGray = Color.lightGray tickBlue = Color(0.0,0.7,1.0) ###Postcard fill colours for the avoid regions fillColourSlw = Color(0.5,0.0,0.0,0.2) fillColourSsw = Color(0.0,0.0,0.5,0.2) ###Colours for plotting features found, which are linked to the snrThresholds threshColours = [black,lightOrange,darkGreen,purple,cyan,teal] threshColours+=threshColours #snrThresholds = [100.0, 50.0, 30.0, 10.0, 5.0, 3.0] ############################################################################# #### Plotting functions #### ############################################################################# ###MAIN DIAGNOSTIC PLOTS def diagnosticPlots(specIn, conIn, modelIn, features, indexFeatures, snrs, thresholds, plotCol, \ sincModels, obsid, name, det_i,\ zoomPlots=False,saveMainPlots=False,closePlots=True,plotPath='',\ zoomPlotPath=None,checkCutResults=False, snrCut=5.0,\ seePlots=True, res='HR'): ''' Plots the continuum subtracted spectrum, the total model and the residual and marks the features foundfeatures found, coloured by < snrCut and snrThreshold Input: - specIn: spectrum to plot - conIn: continuum to subtract (array) - modelIn: total fitted model (array) - features: frequencies of features found - indexFeatures: feature names - snrs: SNRs corresponding to features or if checkCutResults these should be snrsIter - thresholds: snrThresholds where features were found - plotCol: colours linked to the snrThresholds - sincModels: fitted sincs - obsid: observation ID - name: name to use for plot title - det_i: detector index, to check if it is the first detector in the list - zoomPlots: set to True for zoomPlots - saveMainPlots: set to True to save the main plots - closePlots: closes the main plots if saveMainPlots==True. Set to False to stop closure - plotPath: path where the main plots will be saved if saveMainPlots == True - zoomPlotPath: path where the zoom plots will be saved (zoomPlots are always saved) - checkCutResults: set to a snrThreshold to obtain the plots at that cut - snrCut: minimum SNR for features - seePlots: set to False for quicker execution (automatic if closePlots is True) - res: for the zoom plot x-range Output: - plot1: the diagnostic plot ''' #Create the plot plot1 = PlotXY(seePlots) #Mark features < snrCut as dashed grey #Mark features > snrCut using colours linked to the snrThresholds #See the colour reference plot (black, lightOrange, darkGreen, purple, cyan, teal) inLegLow = 1 inLegHigh = 1 for ii in range(len(features)): feature = features[ii] if checkCutResults: snr = snrsIter[ii][0] else: snr = snrs[ii] threshold = thresholds[ii] col = plotCol[threshold] if ABS(snr) < snrCut: #Features that didn't make the cut plot1.addLayer(LayerXY(Double1d([feature,feature]),\ Double1d([MIN(specIn.flux-conIn)/3.-3,MAX(specIn.flux-conIn)+1]),\ color=Color.LIGHT_GRAY,stroke=1.25,\ line=Style.DASHED,name = 'Features < snrCut',inLegend=inLegLow)) inLegLow=0 else: #Features included in the output catalogue, coloured by iteration plot1.addLayer(LayerXY(Double1d([feature,feature]),\ Double1d([MIN(specIn.flux-conIn)/3.-3,MAX(specIn.flux-conIn)+1]),\ color=col,stroke=1.25,\ name = 'Features >= snrCut',inLegend=inLegHigh)) inLegHigh = 0 #plot1.addLayer(LayerXY(specIn.wave,finalResidualArray,name='Residual',color=Color.GREEN)) plot1.addLayer(LayerXY(specIn.wave,specIn.flux-conIn,name='Spectrum',color=Color.BLUE)) plot1.addLayer(LayerXY(specIn.wave,modelIn-conIn,name='Fitted model',color=Color.RED)) ###### # zoom in on each feature ###### if zoomPlots: for go in range(len(features)): modPara = sincModels[go].getFittedParameters() sinc = SincModel() sinc.setParameters(modPara) sinc = sinc(specIn.wave) layTemp = LayerXY(specIn.wave,sinc,name='Fitted sinc',color=Color.BLACK,line=Style.DASHED,stroke=1.0) plot1.addLayer(layTemp) feature = features[go] featureInd = indexFeatures[go] if checkCutResults: snr = snrsIter[go][0] else: snr = snrs[go] snrIter = snrsIter[go] threshold = thresholds[go] if res == 'HR': rng = [feature-15,feature+15] else: rng = [feature-45,feature+45] xr=[rng[0],rng[1]] sel = specIn.wave.where((specIn.wave > rng[0]) & (specIn.wave < rng[1])) yr=[MIN((specIn.flux-conIn)[sel])-0.1,MAX((specIn.flux-conIn)[sel])+0.2] plot1 = addAxis(plot1,xtitle='Frequency [GHz]',ytitle='%s [%s]'%(specIn.fluxDescription,specIn.fluxUnit.dialogName),\ xrange=xr,yrange=yr,\ width=0,height=0,legend=1,legendFS=12,title='%s %s %s'%(featureInd, name,det)) plot1.addAnnotation(Annotation(xr[0]+1.5,yr[1]-0.2*yr[1],'Feature (GHz):',fontSize=10)) plot1.addAnnotation(Annotation(xr[0]+1.5,yr[1]-0.3*yr[1],'SNR:',fontSize=10)) plot1.addAnnotation(Annotation(xr[0]+1.5,yr[1]-0.4*yr[1],'SNR iter:',fontSize=10)) plot1.addAnnotation(Annotation(xr[0]+1.5,yr[1]-0.5*yr[1],'Threshold:',fontSize=10)) plot1.addAnnotation(Annotation(xr[0]+7,yr[1]-0.2*yr[1],'%0.2f'%(feature),fontSize=10)) plot1.addAnnotation(Annotation(xr[0]+7,yr[1]-0.3*yr[1],'%0.1f'%(snr),fontSize=10)) plot1.addAnnotation(Annotation(xr[0]+7,yr[1]-0.4*yr[1],'%0.1f'%(snrIter[0]),fontSize=10)) plot1.addAnnotation(Annotation(xr[0]+7,yr[1]-0.5*yr[1],'%0.1f'%(threshold),fontSize=10)) plot1.saveAsPDF('%s%i_%s_%0.1f_%s.pdf'%(zoomPlotPath,obsid,specIn.meta['channelName'].value,feature,featureInd)) plot1.removeLayer(layTemp) plot1.clearAnnotations() #rescale plot plot1 = addAxis(plot1,xtitle='Frequency [GHz]',ytitle='%s [%s]'%(specIn.fluxDescription,specIn.fluxUnit.dialogName),\ xrange=[MIN(specIn.wave),MAX(specIn.wave)],yrange=[MIN(specIn.flux-conIn),MAX(specIn.flux-conIn)],\ width=0,height=0,legend=1,legendFS=12,title='%s %s'%(name,det)) if saveMainPlots: plot1.saveAsPDF('%s%i_%s.pdf'%(plotPath,obsid,det)) if closePlots: plot1.close() return plot1 ###POSTCARDS def preparePostcardPlots(spec, avoid, seePostCards=False): ''' Prepares the postcard plot per obsid Input: - spec: input spectrum - avoid: avoided region at the ends of the bands to plot shaded - seePostCards: set to False to hide the postcard plots Output: - plot2: plot to add fitted features and spectrum to - boxMinMax: used for setting the yrange ''' plot2 = PlotXY(seePostCards) #Create shaded regions boxMinMax = [MIN([MIN(spec[0]['SLWC3'].flux),MIN(spec[0]['SSWD4'].flux)]),\ MAX([MAX(spec[0]['SLWC3'].flux)*2,MAX(spec[0]['SSWD4'].flux)*2])] boxX = Double1d([0,0,avoid,avoid,0]) boxY = Double1d([boxMinMax[0]-boxMinMax[1],boxMinMax[1],boxMinMax[1],boxMinMax[0]-boxMinMax[1],boxMinMax[0]-boxMinMax[1]]) #Add avoid regions, coloured depending on the detector layB1 = LayerXY(boxX+spec[0]['SLWC3'].wave[0],boxY,stroke=0,color=Color(0,0,0,0)) layB1.style.fillClosureType=FillClosureType.SELF layB1.style.fillPaint=fillColourSlw layB1.style.fillEnabled=1 layB2 = LayerXY(boxX+spec[0]['SLWC3'].wave[-1]-avoid,boxY,stroke=0,color=Color(0,0,0,0)) layB2.style.fillClosureType=FillClosureType.SELF layB2.style.fillPaint=fillColourSlw layB2.style.fillEnabled=1 layB3 = LayerXY(boxX+spec[0]['SSWD4'].wave[0],boxY,stroke=0,color=Color(0,0,0,0)) layB3.style.fillClosureType=FillClosureType.SELF layB3.style.fillPaint=fillColourSsw layB3.style.fillEnabled=1 layB4 = LayerXY(boxX+spec[0]['SSWD4'].wave[-1]-avoid,boxY,stroke=0,color=Color(0,0,0,0)) layB4.style.fillClosureType=FillClosureType.SELF layB4.style.fillPaint=fillColourSsw layB4.style.fillEnabled=1 plot2.addLayer(layB1) plot2.addLayer(layB2) plot2.addLayer(layB3) plot2.addLayer(layB4) return plot2, boxMinMax def postcardPlots(plot2, spec, det, det_i, cutFeatures, cutSnrs, cutPeaks, \ cutContinuumValues, overlap, boxMinMax, withTicks = True): ''' Adds the spectrum to and marks the found features > snrCut on the prepared postcard plot Input: - plot2: plot prepared by preparePostcarPlots - spec: the spectrum to plot - det: the detector to plot - det_i: to check if its the first detector in the last - cutFeatures: frequencies of features found > snrCut - cutSnrs: SNRs corresponding to cutFeatures, for scaling the feature marks - cutPeaks: fitted peaks corresponding to cutFeatures - cutContinuumValues: continuum values corresponding to cutFeatures - overlap: overlap region between the bands - boxMinMax: output from preparePostcardPlots and used to define ymin ''' lineThicks = Double1d(cutFeatures.size)+0.75 if det[:3] == 'SLW': linCol = red specCol = darkRed posOverLap = cutFeatures.where(cutFeatures > overlap[0]) lineThicks[posOverLap] = 1.5 else: linCol = tickBlue specCol = darkBlue plot2.addLayer(LayerXY(spec[0][det].wave,spec[0][det].flux,color=specCol)) ymin = boxMinMax[0] ##add the features if withTicks: for go in range(cutFeatures.size): feature = cutFeatures[go] snr = cutSnrs[go] lineThick = lineThicks[go] totalPeak = cutPeaks[go] + cutContinuumValues[go] subPeak = cutPeaks[go] if snr > 0: plot2.addLayer(LayerXY(Double1d([feature,feature]),Double1d([totalPeak+(boxMinMax[1]-boxMinMax[0])*0.05,totalPeak+(boxMinMax[1]-boxMinMax[0])*0.05*LOG(snr)]),\ color=linCol,stroke = lineThick)) ymin = boxMinMax[0] else: ymin = boxMinMax[0]-(boxMinMax[1]-boxMinMax[0])*0.05 plot2.addLayer(LayerXY(Double1d([feature,feature]),Double1d([totalPeak-(boxMinMax[1]-boxMinMax[0])*0.05*LOG(ABS(snr)),totalPeak-(boxMinMax[1]-boxMinMax[0])*0.05]),\ color=linCol,stroke = lineThick)) if det_i == 0: plot2.setChartFitMode(ChartFitMode.FIT_CONTAINER_SIZE) plot2 = addAxis(plot2,xtitle='Frequency [GHz]',ytitle='%s [%s]'%(spec[0][det].fluxDescription,spec[0][det].fluxUnit.dialogName),\ xrange=[430,1580],yrange=[ymin,boxMinMax[1]*0.65],width=800,height=450,\ legend=0,legendFS=12,title='%s Observation ID %s'%(spec.meta['object'].value,obsid)) plot2.setPlotSize(herschel.ia.gui.plot.renderer.DoubleDimension2D(6, 3)) return plot2 def postcardPlotsAddSnr(plot2, det, cutFeatures, cutSnrs, cutPeaks, \ cutContinuumValues, overlap, boxMinMax, shift=0.0, colourShift=[0.0,0.0,0.0]): ''' Adds the spectrum to and marks the found features > snrCut on the prepared postcard plot Input: - plot2: plot prepared by preparePostcarPlots - spec: the spectrum to plot - det: the detector to plot - det_i: to check if its the first detector in the last - cutFeatures: frequencies of features found > snrCut - cutSnrs: SNRs corresponding to cutFeatures, for scaling the feature marks - cutPeaks: fitted peaks corresponding to cutFeatures - cutContinuumValues: continuum values corresponding to cutFeatures - overlap: overlap region between the bands - boxMinMax: output from preparePostcardPlots and used to define ymin ''' lineThicks = Double1d(cutFeatures.size)+0.75 if det[:3] == 'SLW': linCol = Color(0.8-colourShift[0],0.0,0.0) specCol = darkRed posOverLap = cutFeatures.where(cutFeatures > overlap[0]) lineThicks[posOverLap] = 1.5 else: linCol = Color(0.0,0.5-colourShift[1],0.8-colourShift[2]) specCol = darkBlue ##add the features for go in range(cutFeatures.size): feature = cutFeatures[go] + shift snr = cutSnrs[go] lineThick = lineThicks[go] totalPeak = cutPeaks[go] + cutContinuumValues[go] subPeak = cutPeaks[go] if snr > 0: plot2.addLayer(LayerXY(Double1d([feature,feature]),Double1d([totalPeak+(boxMinMax[1]-boxMinMax[0])*0.05,totalPeak+(boxMinMax[1]-boxMinMax[0])*0.05*LOG(snr)]),\ color=linCol,stroke = lineThick)) ymin = boxMinMax[0] else: ymin = boxMinMax[0]-(boxMinMax[1]-boxMinMax[0])*0.05 plot2.addLayer(LayerXY(Double1d([feature,feature]),Double1d([totalPeak-(boxMinMax[1]-boxMinMax[0])*0.05*LOG(ABS(snr)),totalPeak-(boxMinMax[1]-boxMinMax[0])*0.05]),\ color=linCol,stroke = lineThick)) return plot2 #Other plotting functions def addAxis(plot,xtitle='Frequency [GHz]',ytitle='Flux density [Jy]',xrange=0,\ yrange=0,width=0,height=0,legend=0,legendFS=10,title=''): ''' Handy way to add a title, axis titles, axis ranges, resize the plot window and handle the legend ''' plot.xtitle=xtitle plot.ytitle=ytitle plot.titleText=title plot.legend.visible=legend plot.legend.fontSize=legendFS if xrange: plot.xrange=xrange if yrange: plot.yrange=yrange if width: plot.width=width if height: plot.height=height return plot def plotColours(colours,snrThresholds): ''' Plots a colour reference for the snrThreshold colours Takes a list of colours and snrThresholds ''' plotCol = PlotXY() ystr = [''] for go in range(len(colours)): plotCol.addLayer(LayerXY(Double1d([0,1]),Double1d([len(colours)-go,len(colours)-go]),stroke=2.0,color=colours[go],name='> %s SNR'%snrThresholds[go])) ystr.append('%i'%(snrThresholds[len(colours)-go-1])) plotCol = addAxis(plotCol,xtitle='',ytitle='SNR colour',xrange=[0,1],yrange=[0,go+2],\ width=0,height=300,legend=1,legendFS=12,title='Colour reference') ystr.append('') plotCol.xaxis.tick.label.visible=False plotCol.xaxis.getTick().setMinorNumber(0) plotCol.xaxis.getTick().setNumber(0) plotCol.xaxis.getTick().visible=False plotCol.yaxis.getTick().setMinorNumber(0) plotCol.yaxis.tick.setNumber(go+2) plotCol.yaxis.tick.label.setFixedStrings(ystr) plotCol.yaxis.tick.label.setOrientation(1) return plotCol ################################################################################ #### 8. Products I/O #### ################################################################################ ###Get product from the archive def getProduct(obsid, res='HR', useHsa=True, mapping=False, cubeName='', fitsFile=None): ''' Retrives product from the HSA Input: - obsid: observation ID - res: resolution of product - useHsa: set to False if the pool is stored locally - mapping: set to True to obtain the SLW and SSW cubes, otherwise the pointsource calibrated SDS is extracted Output: - obs: the observation context - product: the extracted product - name: name used for plotting ''' product = True if useHsa: query = MetaQuery(herschel.ia.obs.ObservationContext, 'p', 'p.obsid==%s'%obsid,1) refs = hsa_storage.select(query) try: obs = hsa_storage.load(refs[-1].urn).product except: product = False name = '***Couldnt access 0x%X %i***'%(obsid,obsid) return False, product, name obs = getObservation(obsid, useHsa=True) else: if fitsFile is None: try: obs = getObservation(obsid) except: product = False name = '***Couldnt access 0x%X %i***'%(obsid,obsid) return False, product, name else: obs = fitsReader(fitsFile) name = obs.meta['object'].value product = obs.copy() if obs.meta['processResolution'].value in [res, 'H+LR']: return obs, product, name else: name = ('***Defined resolution for 0x%X %i does not match the ' 'observation.***'%(obsid,obsid)) return False, False, name if ('FAILED' in obs.quality.meta["state"].string): product = False name = "0x%X %i failed, skipping it"%(obsid, obsid) if product: try: if obs.meta['processedAs'].value != 'H+LR': if res != obs.meta['processedAs'].value: product = False name = '***Defined resolution for 0x%X %i does not match the observation.***'%(obsid,obsid) except: product = False name = '***No processedAs: failed? 0x%X %i***'%(obsid,obsid) if product: if mapping: #Extract the cube product = [obs.refs["level2"].product.refs["%s_%s_%s"%(res,'SLW',cubeName)].product] product.append(obs.refs["level2"].product.refs["%s_%s_%s"%(res,'SSW',cubeName)].product) else: product = obs.refs['level2'].product.refs['%s_spectrum_point'%(res)].product name = '%s %s'%(obs.meta['object'].value,obsid) else: if mapping: product = ['failed'] else: product = 'failed' return obs, product, name def sortArrays(sorter, toBeSorted): ''' Sorts a list of arrays depending on sorter Input: - sorter: array to use for sorting - toBeSorted: list of arrays to sort following the order of sorter Output: - sorterSorted: reordered sorter - sortedOut: the sorted list of toBeSorted - ind: sorting indices ''' ind = SORT.BY_INDEX(sorter.copy()) sorter = sorter[Selection(ind)] sortedOut = [] for arr in toBeSorted: arr = arr[Selection(ind)] sortedOut.append(arr) return sorter,sortedOut,ind def prepareOutputTable(columns, names, tableDescription='', description=False, \ units=False, addMeta=False, metas=False, metaNames=False,\ metaType=False, metaDescription=False, metaUnit=False): ''' Prepares a TableDataset Input: - columns: columns to add to the table - names: column names - description: column description - units: column units - addMeta: set to True to add metadata using metas, metaNames and metaType - metas: list of metadata to add - metaNames: list of metadata names, corresponding to metas - metaType: list of metadata type, corresponding to metas 'S' = StringParameter, 'D' = DoubleParameter, 'L' = LongParameter - metaDescription: metas description - metaUnit: metas units Output: - outputTable: filled table ''' outputTable = TableDataset(description = tableDescription) #Add the flag definitions flagName = ['flagGood','flagGoodNoisy','flagPoor','flagPoorNoisy'] flagDes = ['Good fit in lower noise region','Good fit in noisy region','Poor fit in lower noise region','Poor fit in noisy region'] flagVal = ['0.0','0.1','1.0','1.1'] for fName,fDes,fVal in zip(flagName,flagDes,flagVal): outputTable.meta['%s'%(fName)]=StringParameter(fVal) outputTable.meta['%s'%(fName)].description = fDes for col,nam in zip(columns,names): outputTable['%s'%nam] = Column(col) if description: for des,nam in zip(description,names): if des: outputTable['%s'%nam].description = des if units: for unit,nam in zip(units,names): if unit: outputTable['%s'%nam].unit = unit if addMeta: for met,metNam,typ in zip(metas,metaNames,metaType): if typ == 'S': outputTable.meta['%s'%metNam]=StringParameter(met) elif typ == 'D': outputTable.meta['%s'%metNam]=DoubleParameter(met) elif typ == 'L': outputTable.meta['%s'%metNam]=LongParameter(met) if metaDescription: for des,metNam in zip(metaDescription,metaNames): if des: outputTable.meta['%s'%metNam].description = des if metaUnit: for unit,metNam in zip(metaUnit,metaNames): if unit: outputTable.meta['%s'%metNam].unit = unit return outputTable def printFeaturesVerbose(cutFeatures, cutSnrs, name): ''' Prints all features found > snrCut and their finalSnr''' print 'In ' + name + ', found:' for feature, snr in zip(cutFeatures,cutSnrs): print 'feature at '+str(feature)+'GHz, with SNR '+str(snr) if len(features) == 0: print 'no features' return #for the log file def addToLog(message,fileName): file = open(fileName,'a') file.write('%s\n'%message) file.close() ################################################################################ #### 9. GoF/ToFE #### ################################################################################ ###GoF def gOfF(featureList, specIn, models, rng=3): """ Calculates the goodness of fit per feature found Inputs: - featureList: input features found - specIn: continuum subtracted spectrum - specModel: fitted models - rng: +/- range over which the goodness of fit is calculated (GHz) default is 3 GHz Returns: - gFit: the goodness of fit per feature """ gFit = Double1d() #for each feature found, use the individual model for i in range(len(featureList)): featureFrq=featureList[i] specOr = specIn.copy() #get somewhere to put the individual feature model specModel = specIn.copy() #get the fitted sinc for that feature modPara = models[i].getFittedParameters() sinc = SincModel() sinc.setParameters(modPara) sinc = sinc(specIn.wave) specModel.flux = sinc #now make the full model omitting the sinc of the show modelSubOne = specIn.copy() modelSubOne.flux = modelSubOne.flux-modelSubOne.flux for jj in range(len(models)): if jj != i: modPara = models[jj].getFittedParameters() sincTemp = SincModel() sincTemp.setParameters(modPara) modelSubOne.flux+= sincTemp(specIn.wave) #subtract the model omitting the sinc of the show specOr.flux = specOr.flux-modelSubOne.flux stats1 = statistics(ds=specOr, ranges=(featureFrq-rng, featureFrq+rng)) stats2 = statistics(ds=specModel, ranges=(featureFrq-rng, featureFrq+rng)) specTmp = specIn.copy() specTmp.flux = ((specOr.flux-stats1.getValueAt(0,0))\ *(specModel.flux-stats2.getValueAt(0,0))/(stats1.getValueAt(0,1)*stats2.getValueAt(0,1))) stats2 = statistics(ds=specTmp, ranges=(featureFrq-rng, featureFrq+rng)) gFit.append(stats2.getValueAt(0,0)) return gFit #Tested. Not found useful def gOfFbadRemover(gOfFmidCut,featuresFound,featuresFoundErr,continuumPeakResult,\ continuumResult,peaksAll,snrIterResult,snrThresholdsResult,\ flagWidthsAll,sincModels): ''' Removes features found that are below the goodness threshold ''' pos = gOfFmidCut.where(gOfFmidCut > 0.54) if pos.toInt1d().size > 0: gOfFmidCut,featuresFound,featuresFoundErr,continuumPeakResult,\ continuumResult,peaksAll,snrIterResult,snrThresholdsResult,\ flagWidthsAll,sincModels = \ gOfFmidCut[pos],featuresFound[pos],featuresFoundErr[pos],continuumPeakResult[pos],\ continuumResult[pos],peaksAll[pos],snrIterResult[pos],snrThresholdsResult[pos],\ flagWidthsAll[pos],sincModels[pos] return gOfFmidCut,featuresFound,featuresFoundErr,continuumPeakResult,\ continuumResult,peaksAll,snrIterResult,snrThresholdsResult,\ flagWidthsAll,sincModels def getGofFflags(gOfFdets): ''' obtain the GoF flags ''' gOfFflagsOut=Int1d() for gg in gOfFdets: if gg >= 0.64: gOfFflagsOut.append(0) elif (gg < 0.64): gOfFflagsOut.append(1) return gOfFflagsOut ###Total Fit Evidence def fitEvidence(spec,featureList,normalise=True, plotIt=True, verbose=True, useWeights=False, polyOrder=3): """ INPUTS: - spec: a detector spectrum - featureList: list of features found [GHz] - normalise: default = True, to normalise the frequency axis to numbers close to 1 - plotIt: if diagnostic plots are to be produced. The plot shows the data, the model and the residual. On the residual curve you will see: - filled black triangles for features with high odds of being real - filled red triangles are features that are less evident - open red triangles are dubious - verbose: whether to print full details to the console OUTPUTS: - diff: the difference of the evidence for a model with all features and a model with the feature in question removed, i.e. diff[k] is the difference of the evidences for the total model and the model with k-th feature removed. - oddsOut: feature odds """ pi = Math.PI oddsOut = Double1d() # normalise x to be in a good interval around 1.0 x = spec.wave if (normalise): scaleF = 1.0/MAX(x) else: scaleF = 1.0 x = x*scaleF # rescale to be numbers close to 1 y = spec.flux yerr = spec['error'].data if (useWeights): w = 1.0/yerr**2 # weights else: w = Double1d(yerr.length(),1) lineX = featureList.copy()*scaleF # Select only spectral features within the data range for this detector sel = lineX.where((lineX > MIN(x)) & (lineX < MAX(x))) lines = lineX[sel] nlines = lines.length() # Define the models to fit the data # First, start with a n'th order polynomial for the continuum m = PolynomialModel(polyOrder) models = m # Get the actual spectral resolution, will need it to fix the sinc line spectralResolution = spec.meta['actualResolution'].value*scaleF # Create a Sinc model for each spectral feature to be fitted for line in lines: # Create an initial guess for the model fit parameters selLine = x.where(ABS(x - line) == MIN(ABS(x - line))) initalAmplitude = y[selLine][0] # Initial Sinc parameters: [peak, feature frequency, width] sincParamGuess = Double1d([initalAmplitude, line, spectralResolution/pi]) m = SincModel() m.setParameters(sincParamGuess) # Fix the Sinc width m.keepFixed(Int1d([2])) models.addModel(m) # Carry out the global fit fitter = LevenbergMarquardtFitter(x,models) param = fitter.fit(y,w) totalModel = models(x) residual = y - totalModel scale = fitter.autoScale() np = models.getNumberOfParameters() prior = Double1d( np+1 ) + 1000.0 logpr = fitter.getEvidence( prior ) if (plotIt): plt = PlotXY() l1 = LayerXY(x/scaleF,y) l2 = LayerXY(x/scaleF,totalModel) l3 = LayerXY(x/scaleF,residual) plt.addLayer(l1) plt.addLayer(l2) plt.addLayer(l3) plt.xaxis.title.text = "Frequency (GHz)" plt.yaxis.title.text = "Flux (%s)"%spec.getFluxUnit().toString().split('[')[0] ### Loop over each feature, removing it and refitting to get the evidence of this new model diff = Double1d(nlines) if verbose: print "Input feature, total model evidence, evidence without the feature, difference, odds" for k in range(nlines): # remove the k-th feature from the list mask = Bool1d(nlines,1) mask.set(k,0) lineOne = lines.get(mask) # add the models m = PolynomialModel(polyOrder) models = m for line in lineOne: # Create an initial guess for the model fit parameters selLine = x.where(ABS(x - line) == MIN(ABS(x - line))) initalAmplitude = y[selLine][0] # Initial Sinc parameters: [peak, feature frequency, width] sincParamGuess = Double1d([initalAmplitude, line, spectralResolution/pi]) m = SincModel() m.setParameters(sincParamGuess) # Fix the Sinc width m.keepFixed(Int1d([2])) models.addModel(m) # Carry out the global fit fitter = LevenbergMarquardtFitter(x,models) param = fitter.fit(y,w) totalModel = models(x) residual = y - totalModel scale = fitter.autoScale() np0 = models.getNumberOfParameters() prior0 = Double1d( np0+1 ) + 1000.0 logpr0 = fitter.getEvidence( prior0 ) diff[k] = (logpr - logpr0) odds = 10**(logpr - logpr0) if (verbose): print lines[k]/scaleF, logpr, logpr0, diff[k], odds oddsOut.append(odds) if (plotIt): sel = diff.where(diff >= 1) nsel = sel.length() if (nsel > 0): lix = lines[sel]/scaleF l4 = LayerXY(lix,Float1d(nsel)) l4.setLine(Style.NONE) l4.setSymbol(Style.FTRIANGLE) l4.setColor(java.awt.Color.BLACK) plt.addLayer(l4) sel = diff.where((diff < 1).and(diff >= -5)) nsel = sel.length() if (nsel > 0): lix = lines[sel]/scaleF l5 = LayerXY(lix,Float1d(nsel)) l5.setLine(Style.NONE) l5.setSymbol(Style.FTRIANGLE) l5.setColor(java.awt.Color.RED) plt.addLayer(l5) sel = diff.where((diff < -5)) nsel = sel.length() if (nsel > 0): lix = lines[sel]/scaleF l6 = LayerXY(lix,Float1d(nsel)) l6.setLine(Style.NONE) l6.setSymbol(Style.TRIANGLE) l6.setColor(java.awt.Color.RED) plt.addLayer(l6) return diff, oddsOut def getFitEvidenceFlags(result): ''' obtain the total fit evidence flags for diff ''' flagsOut = Int1d() for diff in result: if diff >=-6: flagsOut.append(0) elif (diff < -6): flagsOut.append(1) return flagsOut def getCombinedFlags(gOfFdets, results): ''' Assign a combined GofF and ToFE flag ''' if results == None: results = Double1d(len(gOfFdets)) if len(gOfFdets)!=len(results): print 'Length of GofF and ToFE results must be equal' stop flagsOut = Double1d() for go in range(len(gOfFdets)): gg = gOfFdets[go] diff = results[go] if (gg > 0.65) and (diff >= -6): flagsOut.append(0) else: flagsOut.append(1) return flagsOut def getCombinedFlags2(combinedFlags,featuresFound,overlap,noisyRegions): ''' Add to the combined flag depending on the frequency of the feature found 0.0: good fit in lower noise region 0.1: good fit in noisy region 1.0: poor fit in lower noise region 1.1: poor fit in noisy region ''' #Find positions for noisy regions and add 0.1 to the flags addToFlag = Double1d(combinedFlags.size)+0.1 featuresFound = Double1d(featuresFound) pos = featuresFound.where((featuresFound > noisyRegions[0]) & (featuresFound < overlap[0])) if pos.toInt1d().size > 0: addToFlag[pos]=0.0 pos = featuresFound.where((featuresFound < noisyRegions[1]) & (featuresFound > overlap[1])) if pos.toInt1d().size > 0: addToFlag[pos]=0.0 return combinedFlags+addToFlag ################################################################################ #### 10. Redshift #### ################################################################################ def estimateFinalRedshift1(freqPeak,freqSnr,freqErr,maxVelocity): """ Matches up features found to 12CO lines. Procedure is based on comparing the differences between features found and the differences between 12CO rest frequencies. If the result is not satisfactory, estimateFinalRedshift2 is used Inputs: - freqPeak: found features > SNR cut - freqSnr: SNR of found features - freqErr: feature position uncertainties - maxVelocity: maximum assumed radial velocity of source Returns: - coLines: features matched to 12CO - coSnrs: SNRs of coLines - coErr: fitted frequency errors associated to coLines - coRest: 12CO rest frequencies corresponding to coLines lines - zFlag: maximum number of matches per coLines - N: number of candidate 12CO features """ coRest, matchSum = Double1d([]), Int1d([]) Tolerance = CDA/(1+(c/maxVelocity)) Tolinary = {CDA[i]:Tolerance[i] for i in range(len(CDA))} i = -1 for freq in freqPeak: i += 1 matchSum.append(0) for sign in [1.0,-1.0]: #generate the difference array for a given feature and all the other features inspectLine = (freqPeak-freq)*sign inspectLine = inspectLine[inspectLine.where(inspectLine >= 0.0)] #inspect each difference array to see how well it matches with the reference array for ref in CDA: if len(inspectLine.where(ABS(inspectLine - ref) <= Tolinary[ref]).toInt1d()) > 0: matchSum[i]+=1 # Subtract 1 due to double counting of 0.0 diff entry matchSum -= 1 maxMatch = MAX(matchSum) maxMatchIndex = matchSum.where(matchSum == maxMatch) coLines = freqPeak[maxMatchIndex] coSnrs = freqSnr[maxMatchIndex] coErr = freqErr[maxMatchIndex] for freq in coLines: restIndex = COLadder.where(ABS(COLadder - freq) == MIN(ABS(COLadder - freq))) coRest.append(COLadder[restIndex][0]) if len(coLines) < 1: raise ValueError zFlag='M%i'%maxMatch N = len(coLines) #Determine if there are multiple lines attributed to the same rest frequency. #If so, propagate lines with highest snr if N > 1: kCoLines,kCoSnrs,kCoErr,kCoRest = Double1d([]),Double1d([]),Double1d([]),Double1d([]) for dub in COLadder: match = coRest.where(coRest == dub) if len(match.toInt1d()) > 0: keepIndex = coSnrs.where(coSnrs == MAX(coSnrs[match])) kCoLines.append(coLines[keepIndex]) kCoSnrs.append(coSnrs[keepIndex]) kCoErr.append(coErr[keepIndex]) kCoRest.append(coRest[keepIndex]) coLines,coSnrs,coErr,coRest = kCoLines,kCoSnrs,kCoErr,kCoRest N = len(coLines) velocities = ((1-(coLines/coRest))*c) while STDDEV(velocities) > 100.0: ind = velocities.where(MAX(ABS(velocities-MEDIAN(velocities))) == ABS(velocities-MEDIAN(velocities))).toInt1d()[0] coLines.delete(ind,1) coErr.delete(ind,1) coSnrs.delete(ind,1) coRest.delete(ind,1) velocities = (((coRest/coLines)-1)*c) N = len(coLines) zFlag = 'M%i'%N return coLines,coErr,coRest,zFlag,N def estimateFinalRedshift2(freqPeak,freqSnr,freqErr,window=50.0): """ Matches up features found to NII. *Used when estimateFinalRedshift1 gives poor results* Inputs: - freqPeak: found features > SNR cut - freqSnr: SNR of found features - freqErr: feature position uncertainties - window: search window Returns: - maxLines: lines found in the given windows with the highest snr - maxSnrs: SNR corresponding to maxLines - maxErr: error corresponding to maxLines - freqRest: rest frequencies corresponding to maxLines """ NIIind = freqPeak.where(ABS(freqPeak-1461.13141) <= 60.0).toInt1d() NIIn = len(NIIind) NIIind = Selection(NIIind) if (NIIn > 0) and (len(freqPeak) < 11) and (max(freqSnr[NIIind]) >= 10.0): NIILines = freqPeak[NIIind] NIISnr = freqSnr[NIIind] NIIErr = freqErr[NIIind] indMax = NIISnr.where(NIISnr == max(NIISnr)).toInt1d()[0] maxLines = Double1d([NIILines[indMax]]) maxSnrs = Double1d([NIISnr[indMax]]) maxErr = Double1d([NIIErr[indMax]]) freqRest = Double1d([1461.13141]) return maxLines,maxSnrs,maxErr,freqRest else: return None,None,None,None #Main redshift implementation function def redshiftFF(frequency,freqErr,freqSnr,maxVelocity=6000.0,velocityDelta=2000.0,N_min=7): """ Estimates redshift based on final features found with positive snr > snrCut estimateFinalRedshift1 is applied first and if the result is not satifactory estimateFinalRedshift2 is applied. Inputs: - frequency: found features > SNR cut - freqErr: feature position uncertainties - freqSnr: SNR of found features - maxVelocity: maximum assumed absolute value of radial velocity of source - velocityDelta: increments maxVelocity - N_min: minimum number of candidate 12CO lines for good estimate Returns: - estimated redshift - error on estimated redshift - zFlag: flag indicating the maximum number of matches between the CDA and the difference array generated for each line. "M1" is bad "M10" is best. "NII" if nitrogen line is used. - N: total number of lines with the maximum number of matches. "1" is bad "10" is best """ frequency,freqErr,freqSnr = Double1d(frequency),Double1d(freqErr),Double1d(freqSnr) #Remove negative posSnrIndex = freqSnr.where(freqSnr > 0.0) frequency = frequency[posSnrIndex] freqErr = freqErr[posSnrIndex] freqSnr = freqSnr[posSnrIndex] if len(frequency) < 1: return Double.NaN, Double.NaN, 'nan', 0, 'nan' zFlag,N = 'nan', 0 while (N < N_min or (zFlag in ['M1','M2','nan'])) and (maxVelocity <= 12000): #Try first redshift estimate (based on CDA) coLines,coErr,coRest,zFlag,N = estimateFinalRedshift1(frequency,freqSnr,freqErr,maxVelocity) maxVelocity += velocityDelta if N < N_min: #Not enough CO lines to be reliable. Check for NII try: coLines,coSnrs,coErr,coRest = estimateFinalRedshift2(frequency,freqSnr,freqErr,50.0) if coRest[0] == 1461.13141: #If NII line is found zFlag, N = 'NII', 1 except: return Double.NaN, Double.NaN, 'nan', 0, 'nan' if len(coLines) > 1: return MEDIAN(((coRest/coLines)-1)*c), STDDEV(((coRest/coLines)-1)*c),zFlag,N,Double1d(coLines) else: return MEDIAN(((coRest/coLines)-1)*c), MEDIAN((c*coRest*coErr)/coLines**2),zFlag,N,Double1d(coLines) ################################################################################ #### 10. Neutral Carbon Check #### ################################################################################ def makeFitter(specIn, testLines, extraLines, extraSnr, contSubSpec, polyPar, search): engine, fixed = 1, [2] deltaSigma = 1.18448225 p2 = deltaSigma / PI if search: #Initial search parameters if specIn['flux'].unit.name == "Jy": tolerance = 1.0E20 else: tolerance = 1.0E-10 else: #Final optimizing parameters if specIn['flux'].unit.name == "Jy": tolerance = 1.0E-10 else: tolerance = 1.0E-40 fitter = SpectrumFitter(specIn, False) fitter.useFitter(engine) fitter.setTolerance(tolerance) fitter.addModel('Polynomial',[3],list(polyPar)) modNew = [None]*(len(extraLines)+len(testLines)) #add test lines for i in range(len(testLines)): modNew[i] = fitter.addModel('sinc', testLines[i]) modNew[i].setFixed(fixed) #add extral lines for l in range(len(extraLines)): cent = extraLines[l] snr_ = extraSnr[l] pos = specIn.wave.where(MIN(ABS(specIn.wave - cent)) == ABS(specIn.wave - cent)) amp = contSubSpec[pos]*snr_ modNew[l+len(testLines)] = fitter.addModel('sinc', [amp[0],cent,p2]) modNew[l+len(testLines)].setFixed([1,2]) fitter.doGlobalFit() fitter.residual() return fitter, modNew def checkCI10(specIn, CIparams, polyPar, velocity, window=20.0): # Look for CI 1-0 CI10, CI21 = 492.160651, 809.34197 velocity = ((CI21/CIparams[1])-1.0)*c lineM, maxAmp = CI10/((velocity/c)+1.0), 0.33*CIparams[0] p2 = CIparams[2] fixed = [2] minIndex = specIn.wave.where(MIN(ABS((lineM-window) - specIn.wave)) == ABS((lineM-window) - specIn.wave)).toInt1d() maxIndex = specIn.wave.where(MIN(ABS((lineM+window) - specIn.wave)) == ABS((lineM+window) - specIn.wave)).toInt1d() specT = specIn.copy() specT = extract(selection=[0], ds=specT, ranges=(specIn.wave[minIndex],specIn.wave[maxIndex-1])) fitterMax, modNew = makeFitter(specIn=specT, testLines=[[maxAmp,lineM,p2]], \ extraLines=[], extraSnr=[], contSubSpec=[], \ polyPar=polyPar, search=False) CI10Res = SUM(ABS(fitterMax.getResidual())) CI10param = modNew[0].fittedParameters CI10Err = modNew[0].fittedStdDev if (ABS(CI10param[1] - lineM) > 0.8): return [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], 0.0 resSpec = specT.copy() resSpec.flux = fitterMax.getResidual() CI10snr = findResidualSnr(resSpec, CI10param[1], CI10param[0], snrRange=[5.0,20.0], \ subtractBaseline=True, baselineOrder=3, ignoreFlag=None, \ avoid=0.0) return CI10param, CI10Err, CI10snr def checkNeutralC(specIn, feature, snr, polyPar, velocity, window=25.0, apod=False, fwhm=None): """ Performs a check for the neutral carbon line adjacent the 12CO(7-6) transition. Returns False if no change. Inputs: -specIn: SLW spectrum container -feature: array of input featues found in the SLW spectrum -snr: array of signal to noise correspoinding to featues -polyPar: array of SLW countinuum parameters -velocity: source velocity [km/s] -window: defines the radius of the spectral sample -apod: if using apodized data -fwhm: if there is a predefined fwhm Returns: -removeFreq: Original feature to be removed (ie, CO 7-6 / CI 2-1) if a new line is found -outFreq: list of new freatures with order [12CO(7-6), CI(2-1), CI(1-0)] -outErr: list of new frequency errors with same order -outSnr: list of new SNR with same order """ feature, snr = Double1d(feature), Double1d(snr) CI10found = False outFreq, outErr, outSnr = [], [], [] removeFreq = None # remove negative snr feature posSnr = snr.where(snr > 0.0) snr, feature = snr[posSnr], feature[posSnr] if (len(feature) == 0) or (velocity is Double.NaN): return removeFreq, outFreq, outErr, outSnr # expected frequency position of CO(7-6), CI, and their average fact = 1.0/((velocity/c) + 1) CO, CI, CI10 = 806.65181*fact, 809.34197*fact, 492.160651*fact deltaF = fact*2.69 deltaSigma = 1.18448225 p2 = deltaSigma / PI # check if there is a feautre within 0.8 GHz of expeccted freq isCO, isCI, isCI10 = MIN(ABS(feature-CO)) <= 0.8, MIN(ABS(feature-CI)) <= 0.8, MIN(ABS(feature-CI10)) <= 0.8 #calculate continuum subtracted spectrum contSubSpec = specIn.flux - (polyPar[0] + polyPar[1]*specIn.wave + polyPar[2]*specIn.wave**2 + polyPar[3]*specIn.wave**3) if isCI and (not isCI10): CIline = feature[feature.where(MIN(ABS(feature - CI)) == ABS(feature - CI))][0] CI21amp_i = contSubSpec[contSubSpec.where(MIN(ABS(specIn.wave-CIline)) == ABS(specIn.wave-CIline))] CI10params, CI10Err, CI10snr = checkCI10(specIn, [CI21amp_i[0], CIline, p2], polyPar, velocity, window) if CI10snr >= 5.0: isCI10 = True outFreq.append(CI10params[1]) outErr.append(CI10Err[1]) outSnr.append(CI10snr) if isCO and (not isCI): ind = feature.where(MIN(ABS(feature-CO)) == ABS(feature-CO)).toInt1d()[0] freqOrig = feature[ind] COline = feature[feature.where(MIN(ABS(feature - CO)) == ABS(feature - CO))][0] ind = specIn.wave.where(MIN(ABS(specIn.wave - COline)) == ABS(specIn.wave - COline)).toInt1d()[0] xl = feature.where((feature >= freqOrig-window).and(feature <= freqOrig+window).and(feature != COline)) elif isCI and (not isCO): ind = feature.where(MIN(ABS(feature-CI)) == ABS(feature-CI)).toInt1d()[0] freqOrig = feature[ind] CIline = feature[feature.where(MIN(ABS(feature - CI)) == ABS(feature - CI))][0] ind = specIn.wave.where(MIN(ABS(specIn.wave - CIline)) == ABS(specIn.wave - CIline)).toInt1d()[0] xl = feature.where((feature >= freqOrig-window).and(feature <= freqOrig+window).and(feature != CIline)) else: return removeFreq, outFreq, outErr, outSnr #define sample spectrum index limits minIndex = specIn.wave.where(MIN(ABS((freqOrig-window) - specIn.wave)) == ABS((freqOrig-window) - specIn.wave)).toInt1d() maxIndex = specIn.wave.where(MIN(ABS((freqOrig+window) - specIn.wave)) == ABS((freqOrig+window) - specIn.wave)).toInt1d() #check for additional feature in this range extraLines = feature[xl] extraSnr = snr[xl] # set up the spectral sample to be fit specT = specIn.copy() specT = extract(selection=[0], ds=specT, ranges=(specIn.wave[minIndex],specIn.wave[maxIndex])) contSubSpec = contSubSpec[minIndex[0]:maxIndex[0]+1] maxAmp = MAX(contSubSpec) caseModels, caseResiduals = [], Double1d() # case 1 (primary feature only) lineM = specT.wave[specT.flux.where(MAX(specT.flux) == specT.flux)][0] testLines = [[maxAmp,lineM,p2]] fitterMax, modNew = makeFitter(specT, testLines, extraLines, extraSnr, contSubSpec, polyPar, search=True) residualMaxFit = SUM(ABS(fitterMax.getResidual())) caseResiduals.append(residualMaxFit) caseModels.append(modNew) res_i = residualMaxFit fittedPar = modNew[0].fittedParameters AMP, CENT = fittedPar[:2] resSpec = specT.copy() resSpec.flux = fitterMax.getResidual() # case 2 (secondary to the left) testLines = [[AMP*0.3,CENT-(2.69*fact),p2], [AMP,CENT,p2]] fitterMax, modNew = makeFitter(specT, testLines, extraLines, extraSnr, contSubSpec, polyPar, search=True) residualMaxFit = SUM(ABS(fitterMax.getResidual())) caseResiduals.append(residualMaxFit) caseModels.append(modNew) # case 3 (secondary to the right) testLines = [[AMP,CENT,p2], [AMP*0.3,CENT+(2.69*fact),p2]] fitterMax, modNew = makeFitter(specT, testLines, extraLines, extraSnr, contSubSpec, polyPar, search=True) residualMaxFit = SUM(ABS(fitterMax.getResidual())) caseResiduals.append(residualMaxFit) caseModels.append(modNew) #find residual minimizing model outInd = caseResiduals.where(MIN(caseResiduals) == caseResiduals).toInt1d() if outInd[0] == 0: return removeFreq, outFreq, outErr, outSnr bestModels = caseModels[outInd[0]] lineListOut = Double1d() m = -1 for mod in bestModels: m += 1 pars = mod.fittedParameters if pars[1] in extraLines: continue else: lineListOut.append(pars[1]) if (MIN(ABS(lineListOut - CO)) <= 0.8) and (MIN(ABS(lineListOut - CI)) <= 0.8): COind = lineListOut.where(ABS(lineListOut - CO) == MIN(ABS(lineListOut - CO))).toInt1d()[0] CIind = lineListOut.where(ABS(lineListOut - CI) == MIN(ABS(lineListOut - CI))).toInt1d()[0] else: return removeFreq, outFreq, outErr, outSnr COparams = bestModels[COind].fittedParameters CIparams = bestModels[CIind].fittedParameters if COparams[1] == CIparams[1]: return removeFreq, outFreq, outErr, outSnr #Final optimizing fit testLines = [[COparams[0],COparams[1],p2], [CIparams[0],CIparams[1],p2]] fitterMax, modNew = makeFitter(specT, testLines, extraLines, extraSnr, contSubSpec, polyPar, search=False) lineListOut = Double1d() m = -1 for mod in modNew: m += 1 pars = mod.fittedParameters if pars[1] in extraLines: continue else: lineListOut.append(pars[1]) if (MIN(ABS(lineListOut - CO)) <= 0.8) and (MIN(ABS(lineListOut - CI)) <= 0.8): COind = lineListOut.where(ABS(lineListOut - CO) == MIN(ABS(lineListOut - CO))).toInt1d()[0] CIind = lineListOut.where(ABS(lineListOut - CI) == MIN(ABS(lineListOut - CI))).toInt1d()[0] else: return removeFreq, outFreq, outErr, outSnr COparams = bestModels[COind].fittedParameters COErr = bestModels[COind].fittedStdDev CIparams = bestModels[CIind].fittedParameters CIErr = bestModels[CIind].fittedStdDev finalResidual = fitterMax.getResidual() res_f = SUM(ABS(finalResidual)) resSpec = specT.copy() resSpec.flux = fitterMax.getResidual() COsnr = findResidualSnr(resSpec, COparams[1], COparams[0], snrRange=[5.0,20.0], \ subtractBaseline=True, baselineOrder=3, ignoreFlag=None, \ avoid=0.0) CIsnr = findResidualSnr(resSpec, CIparams[1], CIparams[0], snrRange=[5.0,20.0], \ subtractBaseline=True, baselineOrder=3, ignoreFlag=None, \ avoid=0.0) if MIN([COsnr, CIsnr]) < 5.0: return removeFreq, outFreq, outErr, outSnr #add 12CO line outFreq.append(COparams[1]) outErr.append(COErr[1]) outSnr.append(COsnr) #add CI line outFreq.append(CIparams[1]) outErr.append(CIErr[1]) outSnr.append(CIsnr) removeFreq = freqOrig if not isCI10: CI10params, CI10Err, CI10snr = checkCI10(specIn, CIparams, polyPar, velocity, window) if CI10snr >= 5.0: outFreq.append(CI10params[1]) outErr.append(CI10Err[1]) outSnr.append(CI10snr) return removeFreq, outFreq, outErr, outSnr ########### def getLrSensitivityCurve(specIn,det,nRep): if det[:3] == 'SLW': wave = Double1d([474.688687,524.688687,574.6886871,624.688687,674.688687,724.688687,\ 774.688687,824.688687,874.688687,924.688687,974.688687,1024.688687]) noise = Double1d([0.005589289548116073,0.007050785203565035,0.00538479092743205,\ 0.0027678183497640467,0.0024452789314772737,0.0021398242148737206,\ 0.0018364384522615057,0.002720349889066923,0.003758280646095778,\ 0.004029375703674937,0.0064850201607990175,3.764427510732575E-14]) else: wave = Double1d([969.3462427,1019.3462427,1069.3462427,1119.3462427,1169.3462427,\ 1219.3462427,1269.3462427,1319.3462427,1369.3462427,1419.3462427,1469.3462427,\ 1519.3462427,1569.3462427]) noise = Double1d([0.004307472270066939,0.0040769967997953994,0.004416264419098215,\ 0.0031318220560894597,0.004492314740320095,0.004032574376229342,0.0033361176771723538,\ 0.0038103695254187208,0.0035936107295122337,0.0032458640783488463,0.0042881934331586375,\ 0.004473192049105586,0.001780052852451424]) scaleFactor = SQRT(6.4*2*nRep/3600.) interp = LinearInterpolator(wave,noise,1) noiseOut = interp(specIn.wave) return noiseOut/scaleFactor ################################################################################ #### 11. Feature finding #### ################################################################################ ###Used to update the frequencies of already found features after doGlobalFit() def updateFeatureFrequencies(sincModels): """ Inputs - sincModels: list of sinc models fitted to the already found features Outputs - newFeatureFrequencies: updated frequencies for the already found features """ newFeatureFrequencies = [] for m in sincModels: newFeatureFrequencies.append(m.getFittedParameters()[1]) return newFeatureFrequencies ###Used to update or recreate feature flagging def flagSpec1d(specIn, features, flags, flagWidth, recreate = False): """ Inputs - specIn: input spectrum1d - features: input features as a list - flags: existing flags Python dictionary - flagWidth: the width of flagging required - recreate: set to flag the associated detector from scratch Outputs - flags: updated feature flagging pyDictionary """ if recreate: flags[specIn.meta['channelName'].value]['p1']=Double1d(features) flags[specIn.meta['channelName'].value]['fL']=Double1d(features)-flagWidth flags[specIn.meta['channelName'].value]['fR']=Double1d(features)+flagWidth else: flags[specIn.meta['channelName'].value]['p1'].append(Double1d(features)) flags[specIn.meta['channelName'].value]['fL'].append(Double1d(features)-flagWidth) flags[specIn.meta['channelName'].value]['fR'].append(Double1d(features)+flagWidth) return flags ###Used for the preliminary fit to the continuum### def findJumps(specIn, jumpThreshold, resampleResolution=None, weight=True,\ flag=True, equalWeight=False): """ Find strong features in the spectrum by looking for rapid changes (jumps) Inputs: - specIn: input spectrum - jumpThreshold: size of jump required (in standard deviations) - resampleResolution: resolution used for resampling (GHz) - weight: set weight column = 0 for strong features if True - flag: set flag column = 1 for strong features if True - equalWeight: set initial weight to a constant (1.0) set equalWeight=True for apodized spectra Returns: - resampSpec: resampled spectrum - specOut: original spectrum that has been flagged and weighted """ if equalWeight: size = len(specIn.wave) specIn.setError(Double1d([1.0] * size)) #Pick a method for resampling: 'gaussian', 'trapezoidal', or 'euler' resampScheme = 'trapezoidal' #Resample the spectrum and find the resampled difference spectrum if resampleResolution != None: freqBin = resampleResolution / 4.0 # half distance between resampled points resampSpec = resample(ds=specIn, density=True, resolution=resampleResolution,\ scheme=resampScheme, unit='GHz') diff = resampSpec.copy() diff.setFlux(ABS(resampSpec.flux - SHIFT(resampSpec.flux, 1))) else: resampSpec = None freqBin = (specIn.wave[1] - specIn.wave[0])/2.0 diff = specIn.copy() diff.setFlux(ABS(specIn.flux - SHIFT(specIn.flux, 1))) #Find the rms for the difference spectrum stats = statistics(ds=diff) pos = diff.flux.where(IS_NAN) diff.flux[pos] = 0 rms1 = stats['rms_1'].getData()[0] featFreq = diff.wave[diff.flux.where(ABS(diff.flux) > jumpThreshold*rms1)] specOut = specIn.copy() weightOut = specIn.weight flagOut = specIn.flag #Loop over jumps and prepare flags and weights for i in xrange(featFreq.length()): near = specOut.wave.where((specOut.wave > featFreq[i] - 3*freqBin) & \ (specOut.wave < featFreq[i] + freqBin)) if weight: weightOut[near] = 0 if flag: flagOut[near] = 1 #set the weights and flags in the input spectrum specOut.setAutoConversion(False) specOut.setFlag(flagOut) specOut.setWeight(weightOut) return resampSpec, specOut ###Used for the preliminary subtraction of to the continuum### def removeContinuum(specIn, polyOrder=3): """ Makes a preliminary fit to the continuum prior to feature finding and returns this model and the continuum subtracted spectrum Inputs: - specIn: input spectrum -> This should be the weighted and flagged output from findJumps But not the resampled output - polyOrder: degree of the polynomial to be fitted Returns: - specOut: residual flux stored in "flux" column - fitter: the fitter - continuumModel: the fitted polynomial model - totalModel: the fitted polynomial as a Double1d() """ #Pick the fitting engine: : 1 = L-M, 2 = Amoeba, 3 = Linear # 4 = MP, 5 = Conjugated-Grad engine = 1 #Use the SpectrumFitter fitter = SpectrumFitter(specIn, False) fitter.useFitter(engine) #Add a polynominal model continuumModel = fitter.addModel('Polynomial', [polyOrder], Double1d(polyOrder+1).toArray()) #Set weights to avoid strong features (input should be output from findJumps) fitter.setWeight(specIn.weight) fitter.doGlobalFit() fitter.residual() totalModel = fitter.getTotalModel() continuumSubtracted = fitter.getResidual() #Create the continuum subtracted output specOut = specIn.copy() specOut.setFlux(continuumSubtracted) specOut.setDescription('Continuum subtracted spectrum') return specOut, fitter, continuumModel, totalModel #Used to take the final SNR def findResidualSnr(residualSpec, featureFq, peak, snrRange=[5.0,25.0], \ subtractBaseline=True, baselineOrder=3, ignoreFlag=None, \ avoid=10.0): """ Calculates SNR using the fitted feature peak and the RMS of the full residual. Inputs: - residualSpec: residual spectrum - featureFq: frequency of the fitted feature - peak: fitted peak - snrRange: range either side of feature for noise estimate [GHz] - subtractBaseline: extra BLS as default with poly order 3 - baselineOrder: order of the BLS polynominal - ignoreFlag: regions to ignore Returns: - snr: SNR of (fitted peak/residual) """ residual = residualSpec.flux wave = residualSpec.wave rms = Double1d(len(wave)) snr = Double1d(len(wave)) snrSpec = residualSpec.copy() #Find the region for BLS and SNR estimate for the inputted feature pos = wave.where((ABS(wave-featureFq) < snrRange[1]).and(ABS(wave-featureFq) \ > snrRange[0]).and(\ residualSpec.wave < (MAX(residualSpec.wave)-avoid)).and(\ residualSpec.wave > (MIN(residualSpec.wave)+avoid)).and(\ residualSpec.flag != ignoreFlag)) if subtractBaseline: try: poly = PolynomialModel(baselineOrder) poly.setParameters(Double1d(baselineOrder+1)) fluxFitter = Fitter(wave[pos], poly) baseLine = fluxFitter.fit(residual[pos]) fluxBaseline = poly(wave[pos]) rms = QRMS((residual[pos]-fluxBaseLine)-MEAN(residual[pos]-fluxBaseline)) except: rms = QRMS(residual[pos]-MEAN(residual[pos])) else: rms = QRMS(residual[pos]-MEAN(residual[pos])) if rms != 0: snr = peak / rms else: snr = 0 return snr ###Main finding functions### def listFeatures(specSNR, snrThreshold, flags, mergeRadius=10.0, avoidFlag=1, sign='pos'): """ Compiles a list of potential features found in the SNR spectrum. Inputs: - specSNR: input SNR spectrum (flux/error column) - snrThreshold: SNR threshold for potential feature - mergeRadius: width within which features are merged [GHz] - avoidFlag: value of flag to avoid - sign: indicates +ve or -ve features to find (use 'pos' or 'neg') Returns: - featureFreq: list of potential new feature frequencies - featureSNR: list of potential new feature SNRs """ #Use the SNR spectrum to find potential peaks above the inputted snrThreshold if sign == 'pos': peaksAll = specSNR.flux.where((specSNR.flux > snrThreshold) & (specSNR.flag != avoidFlag)) elif sign == 'neg': peaksAll = specSNR.flux.where((specSNR.flux < (-1.0*snrThreshold)) & (specSNR.flag != avoidFlag)) else: raise ValueError("listFeatures needs 'sign' setting to 'pos' or 'neg'.") #peaks = specSNR.flux.where((ABS(specSNR.flux) > snrThreshold) & (specSNR.flag != avoidFlag)) #peaksAll = specSNR.flux.where((ABS(specSNR.flux) > snrThreshold) & \ # (specSNR.flag != avoidFlag)).toInt1d() #Do this properly #peaksAll = specSNR.flux.where((ABS(specSNR.flux) > snrThreshold).and(\ # specSNR.wave < (MAX(specSNR.wave)-avoid)).and(\ # specSNR.wave > (MIN(specSNR.wave)+avoid))).toInt1d() peaks = Int1d() #see if any peaks sit within an [fL:fR] range. for ff in peaksAll.toInt1d(): pp = specSNR.wave[ff] #look for +ve pL and -ve pR pL = pp - flags[specSNR.meta['channelName'].value]['fL'] pR = pp - flags[specSNR.meta['channelName'].value]['fR'] pos = pL.where((pL > 0) & (pR < 0)) if pos.length(): continue else: peaks.append(ff) peaks = Selection(peaks) nPeaks = peaks.length() freqPeaks = specSNR.wave[peaks] snrPeaks = specSNR.flux[peaks] #Sort the output by SNR if sign == 'pos': sortedIndices = REVERSE(SORT.BY_INDEX((snrPeaks))) else: sortedIndices = REVERSE(SORT.BY_INDEX(ABS(snrPeaks))) sortedFreq = freqPeaks[Selection(sortedIndices)] sortedSNR = snrPeaks[Selection(sortedIndices)] #snrPeaks = snrPeaks[Selection(sortedIndicies)] processed = Bool1d(nPeaks) featureFreq = [] featureSNR = [] #Loop over the peaks found and merge the fainter nearby peaks for i in xrange(nPeaks): if processed[i]: continue nearby = sortedFreq.where(ABS(sortedFreq - sortedFreq[i]) < mergeRadius) featureFreq.append(sortedFreq[i]) featureSNR.append(sortedSNR[i]) processed[nearby] = True #Now sort the output by frequency sortedIndices = SORT.BY_INDEX(featureFreq) featureFreq = list(Double1d(featureFreq)[Selection(sortedIndices)]) featureSNR = list(Double1d(featureSNR)[Selection(sortedIndices)]) return featureFreq, featureSNR def fitFeatures(specIn, residualFlag2, residualFlag3, newFeatures, newSnr, continuum, \ flags, sincModels, featuresFound, snrThresholdsResult, snrIterResult,\ fwhm=None,snrThreshold=3.0, flagValue=1, flagWidth=5.0, apod=False, fitter=None,\ snrCap=500, additMods=[], noiseFloor=0.0, avoid=None, limitValue=2.0, maxIter=None,\ sign=None, checkValue=2.0): """ Adds new features to the total model and fits to all features Inputs: - specIn: spectrum to be fitted - newFeatures: position of new features to add to model [GHz] - newSnr: SNR of new features - continuum: fitted continuum (array) - flags: pyDictionary containing the found feature flagging - sincModles: a list of all sinc models already fitted - fwhm: FWHM of sinc profiles [GHz]. If None then actualResolution is used - snrThreshold: minimum fitted SNR for features - flagValue: value for flagging at the region on fitted features > snrThreshold - flagWidth: HALF the width of region flagged for features found [GHz] - apod: set to True if specIn is an apodized spectrum - fitter: existing fitter to add the new features to (or to create if None) - snrCap: maximum SNR allowed when calculating the initial peak guess - additMods: fitted models, which may be updated by checkFit - noiseFloor: used for calculating a dynamic flag width - avoid: avoid width at the edge of the bands [GHz] - limitValue: to restrict the initial feature position during the global fit - maxIter: maximum fitting iterations allowed - sign: indicates +ve or -ve features to keep (use 'pos' or 'neg') - checkValue: if a feature has moved greater than this, remove it Returns: - specOut: residual spectrum with flagging updated for new features - fitter: instance of the spectrumFitter used to fit old+new features and polynomial - newFeaturesResult: new fitted feature frequencies > snrThreshold [GHz] - newSnrResult: new fitted feature SNRs - newPeakResult: new fitted peaks [flux units] - newModelResult: new fitted models - newErrResult: error on new fitted feature frequencies [GHz] - continuumResult: continuum value at position of new fitted features [flux units] - additMods: fitted models, which may be updated by checkFit - flags: pyDictionary containing the found feature flagging - alreadyFoundFeatures: updated freqencies of found features """ alreadyFoundFeatures = updateFeatureFrequencies(sincModels) if avoid == None: avoid = 10.0 #Choose the fitting engine: 1 = L-M, 2 = Amoeba, 3 = Linear # 4 = MP, 5 = Conjugated-Grad engine = 1 N = len(newFeatures) newModels = [None] * N newFeaturesResult ,newErrResult, newSnrResult = [],[],[] newPeakResult, newModelResult ,newContinuumResult = [],[],[] fL = flags[specIn.meta['channelName'].value]['fL'] fR = flags[specIn.meta['channelName'].value]['fR'] if fwhm == None: #deltaSigma = specIn.meta['actualResolution'].value deltaSigma = 1.18448225 else: deltaSigma = fwhm / 1.20671 #Select the fitter if on 1st iteration if fitter == None: fitter = SpectrumFitter(specIn, False) fitter.useFitter(engine) #set the fitting tolerance if specIn['flux'].unit.name == "Jy": tolerance = 1.0E-10 else: tolerance = 1.0E-40 fitter.setTolerance(tolerance) #Set the maximum number of iterations if maxIter != None: fitter.setMaxIter(maxIter) #Loop over the inputted features to add to the total model if apod: p2 = 1.5*1.20671*deltaSigma/(2.0*SQRT(2.0*LOG(2.0))) else: p2 = deltaSigma / PI for i in xrange(N): if newSnr[i] <= 500: p0 = newSnr[i] * specIn['error'].data[i] else: p0 = snrCap * specIn['error'].data[i] p1 = newFeatures[i] params = [p0, p1, p2] if apod: newModels[i] = fitter.addModel('gauss', params) else: newModels[i] = fitter.addModel('sinc', params) newModels[i].setFixed([2]) #limit the feature positions, to stop them wander off and even falling out #of the frequency bands limitFeature = None extraConstraints = False if extraConstraints: if limitValue != None: newModels[i].setLimits(1,p1-limitValue,p1+limitValue) limitFeature = True # #limit the feature potions to stop them wandering into flagged regions, # #if no nearby flagged regions in one direction, make sure to avoid the # #avoid region # #find the nearest fR on the left, or use avoid regions # ###NEW BIT limit only those features near to a flagged zone, withing the flagWidth? # limitsApplied = False # limitOnLimit = 8.0 # if limitFeature != None: # if limitFeature: # limitsApplied = True # leftLimit = MIN(specIn.wave)+avoid # if fR.size > 0: # try: # posLeftLimit = fR.where(min(iiii for iiii in (p1-fR) if iiii > 0) == (p1-fR)) # if p1-fR[posLeftLimit][0] > limitOnLimit: # leftLimit = fR[posLeftLimit][0] # #print 'left limit applied' # except: # pass # #find nearest fL on the right, or use avoid # rightLimit = MAX(specIn.wave)-avoid # if fL.size > 0: # try: # posRightLimit = fL.where(min(iiii for iiii in (fL-p1) if iiii > 0) == (fL-p1)) # if fL[posRightLimit][0]-p1 > limitOnLimit: # rightLimit = fL[posRightLimit][0] # #print 'right limit applied' # except: # pass # newModels[i].setLimits(1,leftLimit,rightLimit) # #Perform the fit # removeLimits = 0 try: fitter.doGlobalFit() except: # #Try removing the limits, if set # if limitsApplied: # for mgo in range(len(newModels)): # newModels[mgo].setLimits(1,0,0) # try: # fitter.doGlobalFit() # removeLimits = 1 # except: # for mgo in range(len(newModels)): # m = newModels[mgo] # fitter.removeModel(m) # featuresOut = updateFeatureFrequencies(sincModels) # return specIn, fitter, newFeaturesResult, newSnrResult, newPeakResult, newModelResult, \ # newErrResult, newContinuumResult, additMods,\ # flags, featuresOut, residualFlag2, residualFlag3,\ # removeLimits # else: for mgo in range(len(newModels)): m = newModels[mgo] fitter.removeModel(m) featuresOut = updateFeatureFrequencies(sincModels) return specIn, fitter, newFeaturesResult, newSnrResult, newPeakResult, newModelResult, \ newErrResult, newContinuumResult, additMods,\ flags, featuresOut, residualFlag2, residualFlag3 fitter.residual() #Find the total model and residual totalModel = fitter.getTotalModel() residual = fitter.getResidual() #Loop over the fitted newModels to get the fitted parameters #the fitting error and the SNR using the fitted sinc and the error column for mgo in range(len(newModels)): m = newModels[mgo] ff = newFeatures[mgo] #get the fitted parameters params = m.getFittedParameters() p0 = params[0] p1 = params[1] #get the error on the fitted parameters parametersStdDev = m.getFittedStdDev() measuredFreqErr = parametersStdDev[1] #create a sinc using the fitted parameters sinc = SincModel() sinc.setParameters(params) sinc = sinc(specIn.wave) #find the snr snrFull = sinc/specIn['error'].data pos = specIn.wave.where(ABS(sinc) == MAX(ABS(sinc))) snr = snrFull[pos] #get the continuum value at the same position contVal = continuum[pos] ###Remove any feature that isn't the right sign if sign == 'pos': #raise ValueError('sign:%s, snr:%s'%(sign,snr)) if snr[0] < 0.: fitter.removeModel(m) continue if sign == 'neg': #raise ValueError('sign:%s, snr:%s'%(sign,snr)) if snr[0] > 0.: fitter.removeModel(m) continue ###Remove any feature that has snr < 500 if sign == 'neg': if snr[0] < -500: fitter.removeModel(m) ###Remove any feature that has wandered well away if ABS(p1-ff) > checkValue: fitter.removeModel(m) ###Remove any feature model with SNR < snrThreshold if extraConstraints: if ABS(snr) >= snrThreshold: ###Only keep features inside of the avoid regions if (p1 > (MIN(specIn.wave)+avoid)) & (p1 < (MAX(specIn.wave)-avoid)): ###Only keep features not wandered too far if ABS(p1-ff) < checkValue: newFeaturesResult.append(p1) newErrResult.append(measuredFreqErr) newSnrResult.append(snr) newPeakResult.append(p0) newModelResult.append(m) newContinuumResult.append(contVal) else: fitter.removeModel(m) else: fitter.removeModel(m) else: fitter.removeModel(m) else: if ABS(snr) >= snrThreshold: ###Only keep features inside of the avoid regions ####Note that now all features will be inside of the avoid region ####so really don't need this check if (p1 > (MIN(specIn.wave)+avoid)) & (p1 < (MAX(specIn.wave)-avoid)): ###Only keep features at a reasonable distance from the already accepted ###This cuts down on doubles+ for partially resolved lines if len(alreadyFoundFeatures) > 0: min = MIN(ABS(p1-Double1d(alreadyFoundFeatures))) pos = Double1d(alreadyFoundFeatures).where(ABS(p1-Double1d(alreadyFoundFeatures)) == MIN(ABS(p1-Double1d(alreadyFoundFeatures)))) #pos = Double1d(alreadyFoundFeatures).where(MIN(ABS(p1-Double1d(alreadyFoundFeatures))) <= 1.2) #if pos.toInt1d().size > 0: if ABS(p1-Double1d(alreadyFoundFeatures)[pos][0]) <=1.2: #find the feature moveSinc = sincModels[pos.toInt1d()[0]] #set the position back to the one in alreadyFoundFeatures moveParams = moveSinc.getFittedParameters() moveParams[1] = alreadyFoundFeatures[pos.toInt1d()[0]] moveSinc.setParameters(list(moveParams)) #And now try fixing the position moveSinc.setFixed([1]) continue newFeaturesResult.append(p1) newErrResult.append(measuredFreqErr) newSnrResult.append(snr) newPeakResult.append(p0) newModelResult.append(m) newContinuumResult.append(contVal) #now the model is accepted, limit its position m.setLimits(1,p1-limitValue,p1+limitValue) else: fitter.removeModel(m) else: fitter.removeModel(m) #Now repeat the fit with the vetted features fitter.doGlobalFit() fitter.residual() #Find the total model and residual totalModel = fitter.getTotalModel() fitter.residual() #Find the total model and residual totalModel = fitter.getTotalModel() residual = fitter.getResidual() #Update the fitted frequencies of already found features alreadyFoundFeatures = updateFeatureFrequencies(sincModels) #now update the flagging flags = flagSpec1d(specIn, alreadyFoundFeatures, flags.copy(), flagWidth, recreate = True) #Remove limits for kept features? #Outputs specOut = specIn.copy() specOut.flux = residual near = residualFlag2.wave.where((residualFlag2.wave < (MIN(residualFlag2.wave)+avoid)) | \ (residualFlag2.wave > (MAX(residualFlag2.wave)-avoid))) residualFlag2.flag[near] = 1 residualFlag3.flag[near] = 1 #use flagWidth to flag the region round the features found if flagValue != None: for fq in newFeaturesResult: residualFlag2.flag[residualFlag2.wave.where(ABS(residualFlag2.wave - fq) <\ flagWidth)] = flagValue #flags[specIn.meta['channelName'].value]['p1'].append(Double1d(newFeaturesResult)) #flags[specIn.meta['channelName'].value]['fL'].append(Double1d(newFeaturesResult)-flagWidth) #flags[specIn.meta['channelName'].value]['fR'].append(Double1d(newFeaturesResult)+flagWidth) flags = flagSpec1d(specIn,newFeaturesResult,flags,flagWidth) return specOut, fitter, newFeaturesResult, newSnrResult, newPeakResult, newModelResult, \ newErrResult, newContinuumResult, additMods,\ flags, alreadyFoundFeatures, residualFlag2, residualFlag3 #main fuction to run the feature finding show def catalogueFeatures(specIn, snrThresholds, signs, jumpThreshold, array,flags,\ mergeRadius=20.0, resampleResolution=5.0, fwhm=None, flagWidths=None, polyOrder=3,\ apod=False, snrCut=3.0, snrRange=[4.0,25.0], snrCap=500, subtractBaseline=True,\ baselineOrder=3, checkCutResults=False, avoid=None, testGofFcut=False,\ testGofFcutLines=False, limitValue=2.0, maxIter=None, checkValue=2.0): """ Iteratively finds features with SNRs above the respective snrThreshold. The final SNR are taken from the final fitted peaks and local RMS of the full residual. Inputs: - specIn: spectrum for finding features - snrThresholds: successive SNR limits for finding features - signs: list specifying feature signs desired per SNR threshold - jumpThreshold: minimum size of jump to indicate findJumps should weight and flag a region [STDDEV] - mergeRadius: width within which features are merged by listFeatures [GHz] - resampleResolution: resolution used by findJumps for resampling [GHz] - fwhm: FWHM of sinc profiles [GHz]. If None then actualResolution is used - flagWidths: list of half widths used by fitFeatures to flag features found [GHz] - polyOrder: degree of polynomial fitted to the continuum - apod: set to True if specIn is an apodized spectrum - snrCut: final SNR cut for features found - snrRange: range either side of feature for final noise estimate [GHz] - snrCap: maximum SNR allowed when calculating the initial peak guess - subtractBaseline: set for an extra baseline subtraction when taking for the final SNR - baselineOrder: order of poly fitted for subtractBaseline - checkCutResults: set to a snrThreshold to stop fitting at that point - avoid: width of the band ends to exclude from the finding process [GHz] - testGofFcut: if set to True, GoF is run per snrThreshold - testGofFcutLines: testing GofF to remove features per SNR threshold - limitValue: limits the feature position during the fit - checkValue: if a feature has moved more than this, it is removed after the test fit Returns: - featuresFound: fitted frequencies of features found [GHz] - snrResult: final SNR of features (fitted peak/local noise) - snrIterResult: initial SNR of features, determined when found (peak/errorColumn) - cutFeaturesFound: fitted frequencies of features found > snrCut [GHz] - cutSnrResult: final SNR of features > snrCut (fitted peak/local noise) - cutSnrIterResult: initial SNR of features > snrCut, determined when found (peak/errorColumn) - snrThresholdsResult: snrThresholds at which features were found - cutSnrThresholdsResult: snrThresholds at which features > snrCut were found - finalTotalModel: final total fitted model (array) - finalResidual: final residual (array) - fitter: instance of SpectrumFitter used to fit all the models - continuumModel: polynominal model fitted to the continuum - sincModels: final sinc models fitted to the features - cutSincModels: final sinc models fitted to the features > snrCut - featuresFoundErr: error on fitted feature frequencies - cutFeaturesFoundErr: error on fitted feature frequencies for features > snrCut - initialContinuumArray: initial polynominal fit to the continuum (array) - peaksAll: fitted peaks - cutPeaks: fitted peaks for features > snrCut - cutContinuumValues: corresponding continuum values to fitted peaks for features > snrCut - outputInitialFeatureFreq: initial fitted feature frequencies - cutOutputInitialFeatureFreq: initial fitted feature frequencies for features > snrCut - indexFeatures: feature number (used to create unique feature names per obsid) - cutIndexFeatures: feature number (used to create unique feature names per obsid, for features > snrCut) - threshSincs: sincs fitted at each snrThreshold cut (list of arrays) - threshResiduals: residual for each snrThreshold cut (list of arrays) - threshTotalModels: total model for each snrThreshold cut (list of arrays) - allGofF: GoF for the features found per snrThreshold - allFeaturesForGofF: corresponding features for allGofF - allSnrForGofF: corresponding SNRs for allGofF """ #If avoid is set, flag the ends of the input spectrum if avoid: near = specIn.wave.where((specIn.wave < (MIN(specIn.wave)+avoid)) | \ (specIn.wave > (MAX(specIn.wave)-avoid))) specIn.flag[near] = 1 #Preliminary detection of bright peaks, to flag before running the initial #continuum fit *NOTE THAT NO ACTUAL FLAGGING TAKES PLACE* resampledSpec, weightedSpec = findJumps(specIn.copy(), jumpThreshold,\ resampleResolution=resampleResolution, \ weight=True, flag=False, equalWeight=apod) #Initial continuum fit #specOut is continuum subtracted and used in the first feature finder loop specOut, fitter, continuumModel, initialContinuumArray = removeContinuum(weightedSpec,\ polyOrder) continuumArray = initialContinuumArray noiseFloor = MEAN(ABS(specOut.flux))*1.0 if checkCutResults: cutResiduals = Double1d() cutTotalModels = Double1d() #Number of snrThresholds to iterate over N = len(snrThresholds) if flagWidths == None: flagWidths = [0.0] * N #Set up the outputs threshSincs, threshResiduals ,threshTotalModels = [],[],[] featuresFound, outputInitialFeatureFreq, featuresFoundErr, snrResult = [],[],[],[] continuumPeakResult, continuumResult = [],[] additMods = [] #Keep track of the SNR calculated during the iterations snrIterResult = [] #Keep track of what iteration the feature was found in snrThresholdsResult = [] #Keep track of the flagwidth used per feature flagWidthsAll = [] peaksAll ,sincModels ,cutSincModels = [],[],[] cutFeaturesFound ,cutFeaturesFoundErr = Double1d(), Double1d() cutSnrResult, cutSnrIterResult = Double1d(), Double1d() cutSnrResult2, cutSnrResult3 = Double1d(), Double1d() cutContinuumPeakValues ,cutContinuumValues, cutPeaks = Double1d(), Double1d(), Double1d() cutOutputInitialFeatureFreq, cutSnrThresholdsResult = Double1d(), Double1d() #Keep a list of the gOfF per threshold, along with the features and initial SNRs allGofF, allFeaturesForGofF, allSnrForGofF = [],[],[] ###Remove the fitting weights used for the initial continuum fit fitter.setWeight(None) # residualSpec2 = specIn.copy() residualSpec3 = specIn.copy() residualSpec2Out = specIn.copy() residualSpec3Out = specIn.copy() ###Search for features per snrThreshold for i in xrange(N): ###find the SNR using the error column snr = specOut.copy() snr.setFlux(snr.flux/snr['error'].data) ###find new features above the snrThreshold newFeatures, newSnr = listFeatures(snr, snrThresholds[i], flags, mergeRadius=mergeRadius, sign=signs[i]) ###Fit the features *newFeatures (list of new features) is added to the existing features* #fitFeatures would need amending to fit only the new features #to the residual from the previous iteration. specOut, fitter, newFeatures, newSnr, newPeaks, newModels, newFeaturesErr, \ continuumValues, additMods, flags, featuresFound,residualSpec2out, residualSpec3out,\ = fitFeatures(specIn,residualSpec2, residualSpec3, newFeatures, newSnr, continuumArray, \ flags, sincModels, featuresFound, snrThresholdsResult, snrIterResult, \ fwhm=fwhm, apod=apod, flagWidth=flagWidths[i], \ snrThreshold=snrThresholds[i],fitter=fitter, snrCap=snrCap,\ additMods=additMods,noiseFloor=noiseFloor,\ avoid=avoid,limitValue=limitValue,maxIter=maxIter,sign=signs[i]) #Update the continuum paramC = continuumModel.getFittedParameters() paramErrorC = continuumModel.getFittedStdDev() polyC = PolynomialModel(polyOrder) polyC.setParameters(paramC) continuumArray = polyC(specIn.wave) #Keep track of at which threshold the feature was found in thresholds = [snrThresholds[i]] * len(newFeatures) #Keep track of the flagWidths used for each feature widths = [flagWidths[i]] * len(newFeatures) #update flagging specIn.setFlag(specOut.flag) residualSpec2.setFlag(residualSpec2Out.flag) residualSpec3.setFlag(residualSpec3Out.flag) #get the total model and residual as arrays tempTotalMod = fitter.getTotalModel() tempResidual = fitter.getResidual() if checkCutResults: cutTotalModels.append(tempTotalMod) cutResiduals.append(tempResidual) #Add newFeatures and newPeaks to the previous lists featuresFound += newFeatures outputInitialFeatureFreq += newFeatures featuresFoundErr += newFeaturesErr continuumPeakResult += (continuumValues + newPeaks) continuumResult +=continuumValues peaksAll += newPeaks snrIterResult += newSnr snrThresholdsResult += thresholds flagWidthsAll += widths sincModels += newModels threshResiduals.append(tempResidual) threshTotalModels.append(tempTotalMod) ###go through all the models and get the sincs sincResult = [] for mm in sincModels: params = mm.getFittedParameters() sinc = SincModel() sinc.setParameters(params) sinc = sinc(specIn.wave) sincResult+=sinc threshSincs.append(sincResult) #Get gOfF for each snrThreshold if testGofFcut: gOfFmidCut = gOfF(featuresFound, specIn, sincModels, rng=gOfFRng) #what about removing bad looking features for each cut? if testGofFcutLines: gOfFmidCut,featuresFound,featuresFoundErr,continuumPeakResult,\ continuumResult,peaksAll,snrIterResult,snrThresholdsResult,\ flagWidthsAll,sincModels allGofF.append(gOfFmidCut) allFeaturesForGofF.append(featuresFound) allSnrForGofF.append(snrIterResult) if checkCutResults == snrThresholds[i]: return featuresFound, snrResult, snrIterResult, cutTotalModels, \ cutResiduals, Double1d([0]), snrThresholdsResult, Double1d([0]), \ tempTotalMod, tempResidual, fitter, continuumModel, sincModels, \ Double1d([0]), featuresFoundErr, Double1d([0]), \ Double1d([0]), Double1d([0]), Double1d([0]), \ Double1d([0]), Double1d([0]), Double1d([0]), \ Double1d([0]), Double1d([0]),\ threshSincs,threshResiduals,threshTotalModels,\ allGofF, allFeaturesForGofF, allSnrForGofF ###UPDATE FLAGWIDTHS #1. set flag to None #2. reflag avoid #3. use models to adjust the flagging per feature # checkFlag = specIn.flag.copy() # #specIn.setFlag(None) # specIn.setFlag(Int1d(specIn.flag.size)) # if avoid: # near = specIn.wave.where((specIn.wave < (MIN(specIn.wave)+avoid)) | \ # (specIn.wave > (MAX(specIn.wave)-avoid))) # specIn.flag[near] = 1 # for gogo in range(len(featuresFound)): # modPara = sincModels[gogo].getFittedParameters() # feature = modPara[1] # specIn.flag[specIn.wave.where(ABS(specIn.wave - modPara[1]) < flagWidthsAll[gogo])] = 1 #Get the total fitted model and the associated residual finalTotalModel = fitter.getTotalModel() finalResidual = fitter.getResidual() #Put the residual into a spectral container residualSpec = specIn.copy() residualSpec.flux = finalResidual residualSpec2.flux = finalResidual residualSpec3.flux = finalResidual #residualSpec2 = residualSpec.copy() ###Use the final fitted feature positions to update featuresFound #(outputInitialFeatureFreq gives the initial fitted feature position) #updateFeaturesFound = [] #for gogo in range(len(featuresFound)): # modPara = sincModels[gogo].getFittedParameters() # updateFeaturesFound.append(modPara[1]) #Now set an alternative .flag using the final features, for the final SNR estimate for gogo in range(len(featuresFound)): modPara = sincModels[gogo].getFittedParameters() feature = modPara[1] residualSpec3.flag[residualSpec3.wave.where(ABS(residualSpec3.wave - feature) < snrRange[0])] = 1 #outputInitialFeatureFreq = featuresFound #featuresFound = updateFeaturesFound ###Index the features indexFeatures = String1d(range(len(featuresFound))) cutIndexFeatures = String1d() ###For each feature find the SNR using the full residual and fitted peaks for gogo in range(len(featuresFound)): feature = featuresFound[gogo] if (feature > (MIN(specIn.wave)+avoid)) & (feature < (MAX(specIn.wave)-avoid)): featureInd = indexFeatures[gogo] err = featuresFoundErr[gogo] peak = peaksAll[gogo] snrIter = snrIterResult[gogo] threshold = snrThresholdsResult[gogo] continuumPeak = continuumPeakResult[gogo] continuumValue = continuumResult[gogo] singleMod = sincModels[gogo] initialFreq = outputInitialFeatureFreq[gogo] #find the final SNR using the fitted peak and local RMS of the full residal #no .flag snr = findResidualSnr(residualSpec, feature, peak, snrRange=snrRange, \ ignoreFlag=1, subtractBaseline=True, \ baselineOrder=baselineOrder) #Get the SNR using the old .flag snr2 = findResidualSnr(residualSpec2, feature, peak,snrRange = snrRange,\ ignoreFlag=1, subtractBaseline=True, \ baselineOrder=baselineOrder) #Get the SNR using the snrRange .flag snr3 = findResidualSnr(residualSpec3, feature, peak,snrRange = snrRange,\ ignoreFlag=1, subtractBaseline=True, \ baselineOrder=baselineOrder) #Keep the feature if it is above the SNR limit if ABS(snr3) >= snrCut: #for features with SNR > 10 remove features found that are #within 1.5-2.1 GHz to the right (asymmetry wing fit removal) #The lesser feature should have << SNR compared to the main feature if array[:3] == 'SLW': pos = Double1d(featuresFound).where(((feature-Double1d(featuresFound)) >= 1.5).\ and((feature-Double1d(featuresFound)) <= 2.1)) elif array[:3] == 'SSW': pos = Double1d(featuresFound).where(((feature-Double1d(featuresFound)) > 0.0).\ and((feature-Double1d(featuresFound)) <= 2.1)) if pos.length(): if len(pos.toInt1d()) > 1: snrCheck = Double1d() for fe in range(pos.toInt1d().size): fefe = featuresFound[pos.toInt1d()[fe]] pepe = peaksAll[pos.toInt1d()[fe]] snr3temp = findResidualSnr(residualSpec3, fefe, pepe,snrRange = snrRange,\ ignoreFlag=1, subtractBaseline=True, \ baselineOrder=baselineOrder) snrCheck.append(snr3temp) #find the strongest peak pos = snrCheck.where(snrCheck == MAX(snrCheck)) maxSnr = snrCheck[pos] else: maxSnr = findResidualSnr(residualSpec3, Double1d(featuresFound)[pos][0], Double1d(peaksAll)[pos][0],snrRange = snrRange,\ ignoreFlag=1, subtractBaseline=True, \ baselineOrder=baselineOrder) if maxSnr > snr3: if maxSnr > 10: if (maxSnr/snr3 > 4): #& (maxSnr/snr3 < 10): continue ###Now similar for -ve features on the left of a strong line #for features with SNR > 10 remove features found that are #within 1.5-2.1 GHz to the right (asymmetry wing fit removal) #The lesser feature should have << SNR compared to the main feature pos = Double1d(featuresFound).where(((feature-Double1d(featuresFound)) < 0.0).\ and((feature-Double1d(featuresFound)) >= -2.5)) if pos.length(): if len(pos.toInt1d()) > 1: snrCheck = Double1d() for fe in range(pos.toInt1d().size): fefe = featuresFound[pos.toInt1d()[fe]] pepe = peaksAll[pos.toInt1d()[fe]] snr3temp = findResidualSnr(residualSpec3, fefe, pepe,snrRange = snrRange,\ ignoreFlag=1, subtractBaseline=True, \ baselineOrder=baselineOrder) snrCheck.append(snr3temp) #find the strongest peak pos = snrCheck.where(ABS(snrCheck) == MAX(ABS(snrCheck))) maxSnr = snrCheck[pos] else: maxSnr = findResidualSnr(residualSpec3, Double1d(featuresFound)[pos][0], Double1d(peaksAll)[pos][0],snrRange = snrRange,\ ignoreFlag=1, subtractBaseline=True, \ baselineOrder=baselineOrder) if maxSnr > ABS(snr3): if maxSnr > 10: if (ABS(maxSnr/snr3) > 4): continue cutFeaturesFound.append(feature) cutIndexFeatures.append(featureInd) cutFeaturesFoundErr.append(err) cutSnrResult.append(snr) cutSnrResult2.append(snr2) cutSnrResult3.append(snr3) cutSnrIterResult.append(snrIter) cutSnrThresholdsResult.append(threshold) cutPeaks.append(peak) cutContinuumPeakValues.append(continuumPeak) cutContinuumValues.append(continuumValue) cutSincModels.append(singleMod) cutOutputInitialFeatureFreq.append(initialFreq) snrResult += [snr] return featuresFound, snrResult, snrIterResult, cutFeaturesFound, cutSnrResult, \ cutSnrIterResult, snrThresholdsResult, cutSnrThresholdsResult, \ finalTotalModel, finalResidual, fitter, continuumModel, sincModels, \ cutSincModels, featuresFoundErr, cutFeaturesFoundErr,\ initialContinuumArray, peaksAll, cutPeaks, \ cutContinuumValues, outputInitialFeatureFreq, cutOutputInitialFeatureFreq, \ indexFeatures, cutIndexFeatures,\ threshSincs,threshResiduals,threshTotalModels,\ allGofF, allFeaturesForGofF, allSnrForGofF,\ flags, cutSnrResult2, cutSnrResult3,\ residualSpec,residualSpec2,residualSpec3 ### Fits File Convenience Stuff ### # The following class is stolen directly from HICLASS class MyFitsDictionary(AbstractFitsDictionary): """Dictionary to use with FitsArchive to get proper keywords. Because HCSS can use metadata parameters with fancy names and FITS is stuck with keywords of 8 uppercase ASCII characters, a dictionary is needed to convert the meta data parameter names into FITS keywords. The present class defines a dictionary for which the HCSS name and the FITS name of a parameter are identical. This allows you to populate a HCSS dataset using the keywords you will want to see appear in the FITS file. And they will be used. When instanciating this dictionary, feed to the constructor a product created by HiClass. It will be scanned and all the meta data parameters found in its datasets will be added to the dictionary. Say you want to export a product p: >>> dico = MyFitsDictionary(p) >>> archive = FitsArchive() >>> archive.rules.append(dico) >>> archive.save(sFileName, p) """ def __init__(self, p): """p: HiClass product.""" AbstractFitsDictionary.__init__(self) self._addKeysForProduct(p) def _addKeysForProduct(self, prod): map(self._addKeysForDataset, map(prod.get, prod.keySet())) def _addKeysForDataset(self, ds): for meta_name in ds.meta.keySet(): self.set(meta_name, meta_name) metaMapper = {'obsid':'OBS_ID',\ 'object':'OBJECT',\ 'raNominal':'RA_NOM',\ 'decNominal':'DEC_NOM',\ 'mapSampling':'MAPSAMPL',\ 'resolution':'COMBRES',\ 'biasMode':'BIASMODE',\ 'flagGood':'FLAG_G_G',\ 'flagGoodNoisy':'FLAG_G_N',\ 'flagPoor':'FLAG_B_G',\ 'flagPoorNoisy':'FLAG_B_N',\ 'operationalDay':'ODNUMBER',\ 'minimumSnr':'MIN_SNR',\ 'edgeMaskWidth':'EDGEMASK',\ 'maxContinuum':'MAX_CONT',\ 'nFeaturesSlw':'N_SLW',\ 'nFeaturesSsw':'N_SSW',\ 'radVelEstimate':'RV',\ 'radVelErr':'RV_ERR',\ 'radVelFlag':'RV_FLAG',\ 'nMaxMatch':'N_MAXMAT',\ 'spatialExtent':'S_EXTENT',\ 'hpdp':'HPD_USED',\ 'calibration':'CAL_TYPE',\ 'fluxUnit':'FLUXUNIT',\ 'bgs':'BGS_USED',\ 'ciCheck':'CI_CHECK',\ } def writeFits(table, file, extraTables=None): """Writes fits files and correct for non-standard key words Inputs: - table: products table from main Feature Finder script. - file: path and filename of the output fits file. - extraTables: extra tables for mapping observations (velocity, etc.) """ product = Product(description = 'Herschel SPIRE', \ instrument = 'SPIRE', creator = 'spectralFeatureFinder') product.type = 'Class formatted FITS file' product['data'] = table if extraTables is not None: for t in extraTables: product[t.description] = t keyDictionary=MyFitsDictionary(product) for meta_name in product.meta.keySet(): keyDictionary.set(meta_name, meta_name) for meta_name in metaMapper.keys(): keyDictionary.set(metaMapper[meta_name], meta_name) fits=FitsArchive() fits.rules.append(keyDictionary) fits.save(file, product) def updateProducts(nccProducts, freqDets, freqErrDets, snrDets3, detDets, \ combinedFlags, nccFlag): """Updates products table by adding the ouptus of the neutral carbon check (NCC). Inputs: - nccProducts: output products from the neutral carbon check - freqDets: Detected frequencies. - freqErrDets: Detected frequency errors. - snrDets3: Associated snr values. - detDets: detector values. - combinedFlags: combined goodness of fit flags. - nccFlag: neutaral cabon flag indicating if line is detected using NCC. Outputs: - freqDets: updated version of the above variable. - freqErrDets: updated version of the above variable. - snrDets3: updated version of the above variable. - detDets: updated version of the above variable. - combinedFlags: updated version of the above variable. - nccFlag: updated version of the above variable. """ removeFreq, newFreq, newErr, newSnr = nccProducts ind = Selection(SORT.BY_INDEX(newFreq)) newFreq, newErr = Double1d(newFreq)[ind], Double1d(newErr)[ind] newSnr = Double1d(newSnr)[ind] if testGofFfinal: combinedFlags = list(combinedFlags) if len(newFreq) in [2, 3]: ind = Double1d(freqDets).where(removeFreq == Double1d(freqDets)).toInt1d()[0] freqDets[ind] = newFreq[-1] freqDets.insert(ind, newFreq[-2]) freqErrDets[ind] = newErr[-1] freqErrDets.insert(ind, newErr[-2]) snrDets3[ind] = newSnr[-1] snrDets3.insert(ind, newSnr[-2]) detDets[ind] = 'SLWC3' detDets.insert(ind, 'SLWC3') combinedFlags.insert(ind, combinedFlags[ind]) nccFlag[ind] = 1 nccFlag.insert(ind, 1) if len(newFreq) in [1, 3]: ind = Double1d(freqDets).where((newFreq[0] - Double1d(freqDets)) > 0.0).toInt1d() if len(ind) == 0: ind = 0 else: ind = ind[-1] + 1 freqDets.insert(ind, newFreq[0]) freqErrDets.insert(ind, newErr[0]) snrDets3.insert(ind, newSnr[0]) detDets.insert(ind, 'SLWC3') combinedFlags.insert(ind, 0.1) nccFlag.insert(ind, 1) if testGofFfinal: combinedFlags = Double1d(combinedFlags) return freqDets, freqErrDets, snrDets3, detDets, combinedFlags, nccFlag def rePlot(spec, continuum, detectors, freqDets, snrDets3, detDets, \ overlap, boxMinMax, avoid, seePostCards): """Generates new figures to include neutral carbon check modifications. Inputs: - spec: input spectrum container - continuum: fitted continuum container - detectors: detectors list - freqDets: detected frequencies - snrDets3: associated feature signal-to-noise ratios - detDets: associated feature detectors - overlap: overlap region in SLW and SSW bands - boxMinMax: rectangle describing the limits of the figure - aboid: frequency of noise edge regions of SLW and SSW bands - seePostCards: if postcards should be displayed when generated Outputs" - plot2: plot object - plotTitle: title of plot """ plotTitle='' plot2, boxMinMax = preparePostcardPlots(spec, avoid, seePostCards=seePostCards) for det_i, det in enumerate(detectors): specIn = spec[0][det] con1 = continuum[0][det].flux ind = String1d(detDets).where(String1d(detDets) == det) cutFeatures = Double1d(freqDets)[ind] cutSnrs3 = Double1d(snrDets3)[ind] cutPeaks, cutContinuumValues = [], [] for freq in cutFeatures: ind = specIn.wave.where(MIN(ABS(specIn.wave - freq)) == ABS(specIn.wave - freq)) cutContinuumValues += con1[ind] cutPeaks += [specIn.flux[ind][0] - con1[ind][0]] if det == 'SLWC3': plotTitle+=' (%s/'%(cutFeatures.size) elif det == 'SSWD4': plotTitle+='%s)'%(cutFeatures.size) plot2 = postcardPlots(plot2, spec, det, det_i, cutFeatures, cutSnrs3, \ cutPeaks, cutContinuumValues, overlap, boxMinMax) plot2.addLayer(LayerXY(specIn.wave,con1,color=Color.GREEN,stroke=1)) return plot2, plotTitle def fill(table,dem1,dem2,type='float'): """ Fills a 2D table with nan values Inputs: - table: table object to be filled - dim1: number of rows - dim2: number of columns - type: data type ('str' or 'float') Outputs: - table: filled table object """ if type == 'float': for i in range(dem2): table['%i'%i] = Column(Double1d(dem1,Double.NaN)) elif type == 'str': for i in range(dem2): table['%i'%i] = Column(String1d(dem1,'nan')) else: print 'Fill Error' return table def mappingPostProcessing(product, featureCatPerObsid, continuumTable, \ estimateRedshift, testNeutralC, testGofFfinal,\ maxVelocity=6000.0, velocityDelta=2000.0): """ Implements the CO2/NII based radial velocity estimate and neutral carbon check for mapping observations. Inputs: - product: mapping observation spectral container. - featureCatPerObsid: results table from the main routine. - continuumTable: fitted continuum parameter table from main routine. - estimateRedshift: bool, if True, will estimate radial velocity - testGofFfinal: bool, if True, will include final goodness of fit. - maxVelociy: maximum assumed velocity for the source. - velocityDelta: amount to increment maxVelocity by in order to capture higher velocity sources. Outputs: - table: updated `featureCatPerObsid` - ouput list: contains tables with SSW grid projected data - velTable: estimated radial velocity. - errTable: error associated with radial velocity estimate. - flagTable: flags representing estimate quality. - nLinesTable: number of lines present in combined spectra (SLW projected onto SSW grid) - detTable: detectors included for each pixel of the combined grid. """ ####################################################################################### # Velocity Estimate # ####################################################################################### if estimateRedshift: cubeSLW = product[0] slwDra = ABS(cubeSLW['image'].meta['cdelt1'].value) slwDdec = ABS(cubeSLW['image'].meta['cdelt2'].value) cosd_SLW=Math.cos(Math.toRadians(cubeSLW['image'].meta['crval2'].value)) slwDra /= cosd_SLW cubeSSW = product[1] sswDra = ABS(cubeSSW['image'].meta['cdelt1'].value) sswDdec = ABS(cubeSSW['image'].meta['cdelt2'].value) cosd_SSW=Math.cos(Math.toRadians(cubeSSW['image'].meta['crval2'].value)) sswDra /= cosd_SSW table = featureCatPerObsid.copy() allFrequency = table['frequency'].data allFreqErr = table['frequencyError'].data allSNR = table['SNR'].data allRow = table['row'].data allColumn = table['column'].data allDets = table['array'].data slwInd = allDets.where(allDets == 'SLW') sswInd = allDets.where(allDets == 'SSW') sswDem1 = cubeSSW.dimensions[1] #ssw rows sswDem2 = cubeSSW.dimensions[2] #ssw columns slwDem1 = cubeSLW.dimensions[1] #slw rows slwDem2 = cubeSLW.dimensions[2] #slw columns # [rows, cols] -> [dec, ra] fullRA_ssw = Double2d(sswDem1, sswDem2) fullDec_ssw = Double2d(sswDem1, sswDem2) #Output Products #Add new columns tRows = len(allFrequency) refInd = Int1d().range(tRows) table['velocity'] = Column(Double1d(tRows,Double.NaN), Speed.KILOMETERS_PER_SECOND,'radial velocity') table['velocityError'] = Column(Double1d(tRows,Double.NaN), Speed.KILOMETERS_PER_SECOND, 'radial velocity error') table['vFlag'] = Column(String1d(tRows,''), None, 'radial velocity flag') #Add new tables #outProduct = Product(description = 'Herschel SPIRE', \ # instrument = 'SPIRE', \ # creator = 'spectralFeatureFinder') #outProduct.type = 'Class formatted FITS file' velTable = TableDataset(description = "velocity") velTable = fill(velTable,sswDem1,sswDem2) errTable = TableDataset(description = "velocityError") errTable = fill(errTable,sswDem1,sswDem2) flagTable = TableDataset(description = "vFlags") flagTable = fill(flagTable,sswDem1,sswDem2,type='str') nLinesTable = TableDataset(description = "nLines") nLinesTable = fill(nLinesTable,sswDem1,sswDem2) detTable = TableDataset(description = "arrays") detTable = fill(detTable,sswDem1,sswDem2,type='str') for r in range(sswDem2): #ssw columns for d in range(sswDem1): #ssw rows #get data from SLW and SSW matching ~same location on sky specIn = cubeSSW.getSpectrum1d(d,r) ra, dec = specIn.meta['ra'].value, specIn.meta['dec'].value fullRA_ssw[d,r] = ra fullDec_ssw[d,r] = dec zLen = SQRT((table['ra'].data[slwInd] - ra)**2 + (table['dec'].data[slwInd] - dec)**2) sswI = table['ra'].data[sswInd].where((ABS(table['ra'].data[sswInd] - ra) < sswDra/100.0).and(ABS(table['dec'].data[sswInd] - dec) < sswDdec/100.0)) #slwI = table['ra'].data[slwInd].where((zLen <= SQRT((slwDra/1.95)**2 + (slwDdec/1.95)**2)).and(MIN(ABS(zLen)) == ABS(zLen))) slwI = table['ra'].data[slwInd].where((zLen <= SQRT((slwDra)**2 + (slwDdec)**2)/2.0).and(MIN(ABS(zLen)) == ABS(zLen))) if (slwI.length()+sswI.length()) < 1: continue dets = '' if slwI.length() > 0: dets = dets + 'L' if sswI.length() > 0: dets = dets + 'S' detTable[r].data[d] = dets #load data into input variables features, freqErr, snr, pixInd = Double1d(), Double1d(), Double1d(), [] #ssw features.append(allFrequency[sswInd][sswI]) freqErr.append(allFreqErr[sswInd][sswI]) snr.append(allSNR[sswInd][sswI]) pixInd.extend(refInd[sswInd][sswI]) #slw features.append(allFrequency[slwInd][slwI]) freqErr.append(allFreqErr[slwInd][slwI]) snr.append(allSNR[slwInd][slwI]) pixInd.extend(refInd[slwInd][slwI]) nLinesTable[r].data[d] = len(allFrequency[sswInd][sswI]) + len(allFrequency[slwInd][slwI]) if dets == 'LS': N_min = 7 else: N_min = 3 #Calculate Velocity velocity_new, velocityErr, zFlag, N, shiftFreq = \ redshiftFF(features,freqErr,snr,maxVelocity,velocityDelta,N_min) table['velocity'].data[Selection(pixInd)] = velocity_new table['velocityError'].data[Selection(pixInd)] = velocityErr table['vFlag'].data[Selection(pixInd)] = zFlag if zFlag == 'nan' else 'FF?' velTable[r].data[d] = velocity_new errTable[r].data[d] = velocityErr flagTable[r].data[d] = '%i,%s'%(N,zFlag) # SLW rows have velocity from last SLW/SSW pixel pair analysed. Use nearest non-Nan SSW velocity instead for r in range(slwDem2): #slw columns for d in range(slwDem1): #slw rows specIn = cubeSLW.getSpectrum1d(d,r) ra, dec = specIn.meta['ra'].value, specIn.meta['dec'].value zLen = SQRT((fullRA_ssw - ra)**2 + (fullDec_ssw - dec)**2) test_ind = zLen.where((zLen <= SQRT((slwDra)**2 + (slwDdec)**2)/2.0)) slwI = table['ra'].data[slwInd].where((ABS(table['ra'].data[slwInd] - ra) < slwDra/100.0).and(ABS(table['dec'].data[slwInd] - dec) < slwDdec/100.0)) # Candidate SSW pixels #sswI = table['ra'].data[sswInd].where((zLen <= SQRT((slwDra)**2 + (slwDdec)**2)/2.0)) if test_ind.length() < 1: continue zLen_sub = zLen[test_ind] sorted_ind = Selection(SORT.BY_INDEX(zLen_sub)) cols_sub = (test_ind.toInt1d()%sswDem2)[sorted_ind] rows_sub = (test_ind.toInt1d()/sswDem2)[sorted_ind] vel, velErr, velFlag = Double.NaN, Double.NaN, 'nan' for col, row in zip(cols_sub, rows_sub): vel = velTable[col].data[row] if Double.isNaN(vel): continue velErr = errTable[col].data[row] velFlag = 'FF?' break table['velocity'].data[slwI] = vel table['velocityError'].data[slwI] = velErr table['vFlag'].data[slwI] = velFlag ######################################################################################## # Neutral Carbon Detection # ######################################################################################## if testNeutralC: table['nccFlag'] = Column(Int1d(tRows,0), None, "neutral carbon check flag") for r in range(slwDem2): for d in range(slwDem1): #get data from SLW and SSW matching ~same location on sky specIn = cubeSLW.getSpectrum1d(d,r) #slw spectrum ra, dec = specIn.meta['ra'].value, specIn.meta['dec'].value ind = continuumTable['row'].data.where((continuumTable['row'].data == d)\ .and(continuumTable['column'].data == r)\ .and(continuumTable['array'].data == 'SLW')) if ind.length() == 0: # No Continuum continue polyPar = [continuumTable['p0'].data[ind][0],continuumTable['p1'].data[ind][0], continuumTable['p2'].data[ind][0],continuumTable['p3'].data[ind][0]] i,j = cubeSSW.nearestPixels(ra,dec) #ssw pixel (for velocity value) if i < 0: i = 0 if i >= sswDem1: i = sswDem1 - 1 if j < 0: j = 0 if j >= sswDem2: j = sswDem2 - 1 vel = velTable[j].data[i] vErr = errTable[j].data[i] if vel == vel: #check nan features, freqErr, snr = Double1d(), Double1d(), Double1d() ind = allDets[slwInd].where((allRow[slwInd] == d).and(allColumn[slwInd] == r)) features.append(allFrequency[slwInd][ind]) freqErr.append(allFreqErr[slwInd][ind]) snr.append(allSNR[slwInd][ind]) pixInd = refInd[slwInd][ind] removeFreq, newFreq, newErr, newSnr = checkNeutralC(specIn, features, snr, Double1d(polyPar), vel, window=25.0, apod=False, fwhm=None) newFreqLen = len(newFreq) if newFreqLen > 0: #Remove old CO/[CI] feature if removeFreq is not None: rInd = table['frequency'].data.where(\ (table['frequency'].data == removeFreq)\ .and(table['column'].data == r)\ .and(table['row'].data == d)).toInt1d()[0] table.removeRow(rInd) nLinesTable[r].data[d] -= 1 if testGofFfinal: ff_orig = table['featureFlag'].data[rInd] #Add new CO/[CI] feature for i in range(newFreqLen): if testGofFfinal: if newFreq[i] < 600.0: ff = 0.1 else: ff = ff_orig table.addRow([newFreq[i], newErr[i], newSnr[i],\ 'SLW', d, r, ra, dec, ff, vel, vErr, 'FF?', 1]) else: table.addRow([newFreq[i], newErr[i], newSnr[i],\ 'SLW', d, r, ra, dec, vel, vErr, 'FF?', 1]) nLinesTable[r].data[d] += 1 else: # Bad Velocity continue table.meta['nFeaturesSlw'].value = Long(table['array'].data.where(table['array'].data == 'SLW').length()) return table, [velTable, errTable, flagTable, nLinesTable, detTable]