Python astropy.io.fits.ColDefs() Examples
The following are 19
code examples of astropy.io.fits.ColDefs().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
astropy.io.fits
, or try the search function
.
Example #1
Source File: test_checksum.py From Carnets with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_binary_table_data(self): a1 = np.array(['NGC1001', 'NGC1002', 'NGC1003']) a2 = np.array([11.1, 12.3, 15.2]) col1 = fits.Column(name='target', format='20A', array=a1) col2 = fits.Column(name='V_mag', format='E', array=a2) cols = fits.ColDefs([col1, col2]) tbhdu = fits.BinTableHDU.from_columns(cols) tbhdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True) with fits.open(self.temp('tmp.fits'), checksum=True) as hdul: assert comparerecords(tbhdu.data, hdul[1].data) assert 'CHECKSUM' in hdul[0].header assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA' assert 'DATASUM' in hdul[0].header assert hdul[0].header['DATASUM'] == '0' assert 'CHECKSUM' in hdul[1].header assert hdul[1].header['CHECKSUM'] == 'aD1Oa90MaC0Ma90M' assert 'DATASUM' in hdul[1].header assert hdul[1].header['DATASUM'] == '1062205743'
Example #2
Source File: test_table.py From Carnets with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_bin_table_with_logical_array(self): c1 = fits.Column(name='flag', format='2L', array=[[True, False], [False, True]]) coldefs = fits.ColDefs([c1]) tbhdu1 = fits.BinTableHDU.from_columns(coldefs) assert (tbhdu1.data.field('flag')[0] == np.array([True, False], dtype=bool)).all() assert (tbhdu1.data.field('flag')[1] == np.array([False, True], dtype=bool)).all() tbhdu = fits.BinTableHDU.from_columns(tbhdu1.data) assert (tbhdu.data.field('flag')[0] == np.array([True, False], dtype=bool)).all() assert (tbhdu.data.field('flag')[1] == np.array([False, True], dtype=bool)).all()
Example #3
Source File: test_table.py From Carnets with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_column_endianness(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/77 (Astropy doesn't preserve byte order of non-native order column arrays) """ a = [1., 2., 3., 4.] a1 = np.array(a, dtype='<f8') a2 = np.array(a, dtype='>f8') col1 = fits.Column(name='a', format='D', array=a1) col2 = fits.Column(name='b', format='D', array=a2) cols = fits.ColDefs([col1, col2]) tbhdu = fits.BinTableHDU.from_columns(cols) assert (tbhdu.data['a'] == a1).all() assert (tbhdu.data['b'] == a2).all() # Double check that the array is converted to the correct byte-order # for FITS (big-endian). tbhdu.writeto(self.temp('testendian.fits'), overwrite=True) with fits.open(self.temp('testendian.fits')) as hdul: assert (hdul[1].data['a'] == a2).all() assert (hdul[1].data['b'] == a2).all()
Example #4
Source File: test_table.py From Carnets with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_fits_record_len(self): counts = np.array([312, 334, 308, 317]) names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4']) c1 = fits.Column(name='target', format='10A', array=names) c2 = fits.Column(name='counts', format='J', unit='DN', array=counts) c3 = fits.Column(name='notes', format='A10') c4 = fits.Column(name='spectrum', format='5E') c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp('table1.fits')) t1 = fits.open(self.temp('table1.fits')) assert len(t1[1].data[0]) == 5 assert len(t1[1].data[0][0:4]) == 4 assert len(t1[1].data[0][0:5]) == 5 assert len(t1[1].data[0][0:6]) == 5 assert len(t1[1].data[0][0:7]) == 5 assert len(t1[1].data[0][1:4]) == 3 assert len(t1[1].data[0][1:5]) == 4 assert len(t1[1].data[0][1:6]) == 4 assert len(t1[1].data[0][1:7]) == 4 t1.close()
Example #5
Source File: processing_utils.py From drizzlepac with BSD 3-Clause "New" or "Revised" License | 5 votes |
def update_hdrtab(image, level, total_obj_list, input_exposures): """Build HAP entry table extension for product""" # Convert input_exposure filenames into HAP product filenames name_col = [] orig_tab = image['hdrtab'].data for row in orig_tab: rootname = str(row['rootname']) # The rootname is ipppssoot, but the expname is only contains ipppssoo, # so remove the last character for the comparisons rootname = rootname[0:-1] for expname in input_exposures: if rootname in expname: if level == 1: # Intrepret inputs as exposures (FLT/FLC) filename not HAP names name_col.append(expname) else: # Convert input exposure names into HAP names foundit = False for tot_obj in total_obj_list: for exposure in tot_obj.edp_list: if rootname in exposure.full_filename: name_col.append(exposure.drizzle_filename) foundit = True break # define new column with HAP expname max_len = min(max([len(name) for name in name_col]), 51) hapcol = Column(array=np.array(name_col, dtype=np.str), name=HAPCOLNAME, format='{}A'.format(max_len + 4)) newcol = fits.ColDefs([hapcol]) # define new extension haphdu = fits.BinTableHDU.from_columns(orig_tab.columns + newcol) haphdu.header['extname'] = 'HDRTAB' haphdu.header['extver'] = 1 # remove old extension del image['hdrtab'] # replace with new extension image.append(haphdu)
Example #6
Source File: test_table.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_column_lookup_by_name(self): """Tests that a `ColDefs` can be indexed by column name.""" a = fits.Column(name='a', format='D') b = fits.Column(name='b', format='D') cols = fits.ColDefs([a, b]) assert cols['a'] == cols[0] assert cols['b'] == cols[1]
Example #7
Source File: test_table.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_coldefs_init_from_array(self): """Test that ColDefs._init_from_array works with single element data- types as well as multi-element data-types """ nd_array = np.ndarray((1,), dtype=[('A', '<u4', (2,)), ('B', '>u2')]) col_defs = fits.column.ColDefs(nd_array) assert 2**31 == col_defs['A'].bzero assert 2**15 == col_defs['B'].bzero
Example #8
Source File: test_table.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_new_coldefs_with_invalid_seqence(self): """Test that a TypeError is raised when a ColDefs is instantiated with a sequence of non-Column objects. """ pytest.raises(TypeError, fits.ColDefs, [1, 2, 3])
Example #9
Source File: test_table.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_bool_column_update(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/139""" c1 = fits.Column('F1', 'L', array=[True, False]) c2 = fits.Column('F2', 'L', array=[False, True]) thdu = fits.BinTableHDU.from_columns(fits.ColDefs([c1, c2])) thdu.writeto(self.temp('table.fits')) with fits.open(self.temp('table.fits'), mode='update') as hdul: hdul[1].data['F1'][1] = True hdul[1].data['F2'][0] = True with fits.open(self.temp('table.fits')) as hdul: assert (hdul[1].data['F1'] == [True, True]).all() assert (hdul[1].data['F2'] == [True, True]).all()
Example #10
Source File: test_table.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_mismatched_tform_and_tdim(self): """Normally the product of the dimensions listed in a TDIMn keyword must be less than or equal to the repeat count in the TFORMn keyword. This tests that this works if less than (treating the trailing bytes as unspecified fill values per the FITS standard) and fails if the dimensions specified by TDIMn are greater than the repeat count. """ arra = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) arrb = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]]) cols = [fits.Column(name='a', format='20I', dim='(2,2)', array=arra), fits.Column(name='b', format='4I', dim='(2,2)', array=arrb)] # The first column has the mismatched repeat count hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols)) hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as h: assert h[1].header['TFORM1'] == '20I' assert h[1].header['TFORM2'] == '4I' assert h[1].header['TDIM1'] == h[1].header['TDIM2'] == '(2,2)' assert (h[1].data['a'] == arra).all() assert (h[1].data['b'] == arrb).all() assert h[1].data.itemsize == 48 # 16-bits times 24 # If dims is more than the repeat count in the format specifier raise # an error pytest.raises(VerifyError, fits.Column, name='a', format='2I', dim='(2,2)', array=arra)
Example #11
Source File: test_table.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_new_table_with_nd_column(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/3 """ arra = np.array(['a', 'b'], dtype='|S1') arrb = np.array([['a', 'bc'], ['cd', 'e']], dtype='|S2') arrc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) cols = [ fits.Column(name='str', format='1A', array=arra), fits.Column(name='strarray', format='4A', dim='(2,2)', array=arrb), fits.Column(name='intarray', format='4I', dim='(2, 2)', array=arrc) ] hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols)) hdu.writeto(self.temp('test.fits')) with fits.open(self.temp('test.fits')) as h: # Need to force string arrays to byte arrays in order to compare # correctly on Python 3 assert (h[1].data['str'].encode('ascii') == arra).all() assert (h[1].data['strarray'].encode('ascii') == arrb).all() assert (h[1].data['intarray'] == arrc).all()
Example #12
Source File: test_table.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_endianness(self): x = np.ndarray((1,), dtype=object) channelsIn = np.array([3], dtype='uint8') x[0] = channelsIn col = fits.Column(name="Channels", format="PB()", array=x) cols = fits.ColDefs([col]) tbhdu = fits.BinTableHDU.from_columns(cols) tbhdu.name = "RFI" tbhdu.writeto(self.temp('testendian.fits'), overwrite=True) hduL = fits.open(self.temp('testendian.fits')) rfiHDU = hduL['RFI'] data = rfiHDU.data channelsOut = data.field('Channels')[0] assert (channelsIn == channelsOut).all() hduL.close()
Example #13
Source File: test_checksum.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_ascii_table_data(self): a1 = np.array(['abc', 'def']) r1 = np.array([11.0, 12.0]) c1 = fits.Column(name='abc', format='A3', array=a1) # This column used to be E format, but the single-precision float lost # too much precision when scaling so it was changed to a D c2 = fits.Column(name='def', format='D', array=r1, bscale=2.3, bzero=0.6) c3 = fits.Column(name='t1', format='I', array=[91, 92, 93]) x = fits.ColDefs([c1, c2, c3]) hdu = fits.TableHDU.from_columns(x) hdu.writeto(self.temp('tmp.fits'), overwrite=True, checksum=True) with fits.open(self.temp('tmp.fits'), checksum=True) as hdul: assert comparerecords(hdu.data, hdul[1].data) assert 'CHECKSUM' in hdul[0].header assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA' assert 'DATASUM' in hdul[0].header assert hdul[0].header['DATASUM'] == '0' if not sys.platform.startswith('win32'): # The checksum ends up being different on Windows, possibly due # to slight floating point differences assert 'CHECKSUM' in hdul[1].header assert hdul[1].header['CHECKSUM'] == '3rKFAoI94oICAoI9' assert 'DATASUM' in hdul[1].header assert hdul[1].header['DATASUM'] == '1914653725'
Example #14
Source File: test_core.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_add_del_columns(self): p = fits.ColDefs([]) p.add_col(fits.Column(name='FOO', format='3J')) p.add_col(fits.Column(name='BAR', format='1I')) assert p.names == ['FOO', 'BAR'] p.del_col('FOO') assert p.names == ['BAR']
Example #15
Source File: mosaic.py From k2mosaic with MIT License | 5 votes |
def _make_cr_extension(self): """Create the cosmic ray extension (i.e. extension #3).""" cols = [] cols.append(fits.Column(name='RAWX', format='I', disp='I4', array=np.array([]))) cols.append(fits.Column(name='RAWY', format='I', disp='I4', array=np.array([]))) cols.append(fits.Column(name='COSMIC_RAY', format='E', disp='E14.7', array=np.array([]))) coldefs = fits.ColDefs(cols) hdu = fits.BinTableHDU.from_columns(coldefs) return hdu
Example #16
Source File: basecase.py From pysynphot with BSD 3-Clause "New" or "Revised" License | 5 votes |
def savepysyn(self,wave,flux,fname,units=None): """ Cannot ever use the .writefits() method, because the array is frequently just sampled at the synphot waveset; plus, writefits is smart and does things like tapering.""" if units is None: ytype='throughput' units=' ' else: ytype='flux' col1=pyfits.Column(name='wavelength',format='D',array=wave) col2=pyfits.Column(name=ytype,format='D',array=flux) tbhdu=pyfits.BinTableHDU.from_columns(pyfits.ColDefs([col1,col2])) tbhdu.header.update('tunit1','angstrom') tbhdu.header.update('tunit2',units) tbhdu.writeto(fname.replace('.fits','_pysyn.fits'))
Example #17
Source File: fits.py From everest with MIT License | 5 votes |
def ImagesHDU(model): ''' Construct the HDU containing sample postage stamp images of the target. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=4) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) # The images format = '%dD' % model.pixel_images[0].shape[1] arrays = [pyfits.Column(name='STAMP1', format=format, array=model.pixel_images[0]), pyfits.Column(name='STAMP2', format=format, array=model.pixel_images[1]), pyfits.Column(name='STAMP3', format=format, array=model.pixel_images[2])] # Create the HDU header = pyfits.Header(cards=cards) cols = pyfits.ColDefs(arrays) hdu = pyfits.BinTableHDU.from_columns( cols, header=header, name='POSTAGE STAMPS') return hdu
Example #18
Source File: fits.py From everest with MIT License | 5 votes |
def PixelsHDU(model): ''' Construct the HDU containing the pixel-level light curve. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=2) # Add EVEREST info cards = [] cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) # Create the HDU header = pyfits.Header(cards=cards) # The pixel timeseries arrays = [pyfits.Column(name='FPIX', format='%dD' % model.fpix.shape[1], array=model.fpix)] # The first order PLD vectors for all the neighbors (npixels, ncadences) X1N = model.X1N if X1N is not None: arrays.append(pyfits.Column(name='X1N', format='%dD' % X1N.shape[1], array=X1N)) cols = pyfits.ColDefs(arrays) hdu = pyfits.BinTableHDU.from_columns(cols, header=header, name='PIXELS') return hdu
Example #19
Source File: catalogs.py From Aegean with Academic Free License v3.0 | 4 votes |
def writeFITSTable(filename, table): """ Convert a table into a FITSTable and then write to disk. Parameters ---------- filename : str Filename to write. table : Table Table to write. Returns ------- None Notes ----- Due to a bug in numpy, `int32` and `float32` are converted to `int64` and `float64` before writing. """ def FITSTableType(val): """ Return the FITSTable type corresponding to each named parameter in obj """ if isinstance(val, bool): types = "L" elif isinstance(val, (int, np.int64, np.int32)): types = "J" elif isinstance(val, (float, np.float64, np.float32)): types = "E" elif isinstance(val, six.string_types): types = "{0}A".format(len(val)) else: log.warning("Column {0} is of unknown type {1}".format(val, type(val))) log.warning("Using 5A") types = "5A" return types cols = [] for name in table.colnames: # Cause error columns to always be floats even when they are set to -1 if name.startswith('err_'): fmt = 'E' elif name == 'uuid': fmt = '{0}A'.format(max(len(val) for val in table[name])) else: fmt = FITSTableType(table[name][0]) cols.append(fits.Column(name=name, format=fmt, array=table[name])) cols = fits.ColDefs(cols) tbhdu = fits.BinTableHDU.from_columns(cols) for k in table.meta: tbhdu.header['HISTORY'] = ':'.join((k, table.meta[k])) tbhdu.writeto(filename, overwrite=True)