diff --git a/ERDDAP_config_tools/__init__.py b/ERDDAP_config_tools/__init__.py index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..6176b83fd01163cdf94b70039a5a3ac7af2de00c 100644 --- a/ERDDAP_config_tools/__init__.py +++ b/ERDDAP_config_tools/__init__.py @@ -0,0 +1,43 @@ +import os + +from .bbts_generator import BBTSErddapConfigGenerator +from .simple_generator import SimpleErrdapConfigGenerator + +CURRENT_PATH = os.path.abspath(__file__) + +def generate_erddap_config(**kwargs): + """ + Generate XML dataset config for ERDDAP + + :keyword platform_type: the type of platform (bbts, slocum, etc.) + :keyword output_path: location to save the config XMLs to + :keyword data_file_path: location of data files on ERDDAP server (belafonte.ocean.dal.ca) + :keyword sample_file: full path to a sample data file + :keyword sensors: list of either variable names (as strings) or dictionaries of sensor info from sensor_tracker_client.sensor.get({'depth': 1}) + + .. note:: All other keywords are passed to the relevant ConfigGenerator class + .. seealso:: BBTSErddapConfigGenerator, SimpleErrdapConfigGenerator + """ + platform_type = kwargs.pop('platform_type', None) + output_path = kwargs.pop('output_path', None) + sensors = kwargs.pop('sensors', []) + data_file_path = kwargs.pop('data_file_path', CURRENT_PATH) + sample_file = kwargs.pop('sample_file', CURRENT_PATH) + + if platform_type == 'bbts': + generator = BBTSErddapConfigGenerator(**kwargs) + generator.dataset_setting(data_file_path) + # 'sensors' includes all info from Sensor Tracker (list of dictionaries) + sensor_dict = {} + for s in sensors: + if s['instrument']['identifier'] == '$GPRMC': + s['instrument']['identifier'] = 'GPRMC' + full_name = s['instrument']['identifier'] + '.' + s['identifier'] + sensor_dict[full_name] = s + variables = generator.merge_ST_variables(sensor_dict, sample_file) + generator.generate_variables(variables, False) + generator.generate_dataset_meta() + generator.get_erddap_config(output_path) + else: + generator = SimpleErrdapConfigGenerator() + # TODO: Setup workflow for this use case diff --git a/ERDDAP_config_tools/bbts_generator.py b/ERDDAP_config_tools/bbts_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..f93e01dc297bd04131610e5d2d3d65ed4ebb2fb6 --- /dev/null +++ b/ERDDAP_config_tools/bbts_generator.py @@ -0,0 +1,286 @@ +import os +import json + +from .generator import BaseGenerator, NCVariableReader + +CURRENT_PATH = os.path.abspath(__file__) + + +class BBTSErddapConfigGenerator(BaseGenerator): + """ + Class for generating ERDDAP config XML data for BBTS datasets. + + :keyword dataset_id: the dataset ID used in ERDDAP, optional + :keyword prefix: first part of dataset_id, optional + :keyword platform_name: either 'bop' or 'hyp', optional + :keyword proc_level: data processing level for the dataset, either 'level1', 'level2', or 'level2_bin', optional + :keyword base_keywords: a set of strings to be used as keywords in the ERDDAP config + :keyword remove_underscore: boolean, use if prefix ends with an underscore + + .. note:: Either dataset_id or both platform_name and proc_level must be provided. + """ + TEMPLATE_NAME = "bbts.xml" + EXTRA_METADATA = os.path.join(os.path.dirname(CURRENT_PATH), "templates/bbts.json") + RELOAD_EVERY_N_MINUTES = 2880 + UPDATE_EVERY_N_MILLIS = 10000 + string_sensors = ['INSTRUMENT', 'INSTFIELD', 'VLF_INSTRUMENT', 'TIMEFIELD', 'DATEFIELD'] + GPRMC_Strings = ['EWHEM', 'NAV', 'NSHEM', 'UNKNOWN'] + + def __init__(self, **kwargs): + super().__init__() + + self.dataset_id = kwargs.pop("dataset_id", None) + dataset_params = {} + # create dataset_id if it is not provided + if self.dataset_id is None: + prefix = kwargs.pop('prefix', "bb1") + if kwargs.pop('remove_underscore', True) and prefix[-1] == '_': + dataset_params['file_prefix'] = prefix[:-1] + else: + dataset_params['file_prefix'] = prefix + for key in ['platform_name', 'proc_level']: + try: + dataset_params[key] = kwargs.pop(key) + except KeyError: + raise KeyError('Parameter {} not found in kwargs. Must specify either dataset_id OR platform_name ' + 'and proc_level.'.format(key)) + self.dataset_id = self.dataset_id_generate(**dataset_params) + else: + parts = self.dataset_id.split('_') + if len(parts) < 3: + raise ValueError('Provided dataset ID is not valid for a BBTS-type dataset: {}'.format(self.dataset_id)) + dataset_params['file_prefix'] = parts[0] + '_' + parts[1] + dataset_params['platform_name'] = parts[1] + dataset_params['proc_level'] = '_'.join(parts[2:]) + if dataset_params['proc_level'] == 'level1': + dataset_params['remove_mv_rows'] = 'false' + else: + dataset_params['remove_mv_rows'] = 'true' + self.dataset_params = dataset_params + self.keywords = kwargs.pop('base_keywords', set()) + + def get_erddap_config(self, output_path=None): + """ + Create ERDDAP config XML + + :param output_path: path to output XML file + :return: + """ + input_variables = {} + input_variables.update(self.dataset_params) + input_variables.update(self.dataset_setting_variable) + input_variables.update(self.dataset_meta) + input_variables["deployment_variables"] = self.dataset_variables + config_str = (self.env.render( + **input_variables + )) + if output_path: + with open(output_path, 'w') as f: + f.write(config_str) + return output_path + + def dataset_id_generate(self, platform_name, proc_level, file_prefix="bb1"): + """ + Generate BBTS-style dataset ID + + :param platform_name: either 'bop' or 'hyp' + :param proc_level: data processing level, either 'level1', 'level2', or 'level2_bin' + :param file_prefix: prefix for dataset ID, default 'bb1' + :return: + """ + dataset_id = file_prefix + '_' + platform_name + '_' + proc_level + return dataset_id + + @staticmethod + def merge_ST_variables(sensor_tracker_variables, data_file_path): + """ + Filter variable list to only those present in both the data file and a list from Sensor Tracker + + :param sensor_tracker_variables: nested dictionary of information from Sensor Tracker (top-level keys are variable names) + :param data_file_path: location of a NetCDF file for the dataset + :return: + """ + new_variable_list = [] + nc_variables = NCVariableReader(data_file_path).get_variables() + for name, unit in nc_variables: + if name == 'sampling_day': + new_variable_list.append({ + 'name': name, + 'dataType': 'String' + }) + elif name in sensor_tracker_variables: + var = { + 'name': name, + 'instrument': sensor_tracker_variables[name]['instrument']['identifier'], + 'identifier': sensor_tracker_variables[name]['identifier'], + 'sensor_id': sensor_tracker_variables[name]['instrument']['identifier'] + '\\.' + sensor_tracker_variables[name]['identifier'] + } + if sensor_tracker_variables[name]['standard_name'] is not None and sensor_tracker_variables[name]['standard_name'] != '': + # Some are empty strings? Ignore them, I guess + var['standard_name'] = sensor_tracker_variables[name]['standard_name'] + new_variable_list.append(var) + return new_variable_list + + def dataset_setting(self, data_file_dir): + """ + Generate some basic info for dataset config + + :param data_file_dir: location of data files on ERDDAP server + :return: + """ + self.dataset_setting_variable = { + "datasetID": self.dataset_id, + "reloadEveryNMinutes": self.RELOAD_EVERY_N_MINUTES, + "updateEveryNMillis": self.UPDATE_EVERY_N_MILLIS, + "fileDir": data_file_dir, + } + + def generate_dataset_meta(self): + """ + Generate metadata for the dataset based on the platform and processing level. Must be run AFTER + self.generate_variables() to include full list of keywords. + + This will only accept platform names 'bop' and 'hyp', and levels of the format 'level#[_suffix]', where the + presence of a suffix indicates the data is binned. '#' may be any integer value (1 or more digits). + """ + extra = "" + if len(self.dataset_params['proc_level'].split('_')) > 1: + extra = " Binned" + title = "Bedford Basin {0} Level {1}{2}".format( + self.dataset_params['platform_name'].upper(), + self.dataset_params['proc_level'].split('_')[0][5:], + extra + ) + if self.dataset_params['platform_name'].lower() == 'hyp': + summary = "Level {0}{1} data from the Bedford Basin collected from the HyperPro".format( + self.dataset_params['proc_level'].split('_')[0][5:], + extra.lower() + ) + elif self.dataset_params['platform_name'].lower() == 'bop': + summary = "Level {0}{1} data of the BOP profiler in the Bedford Basin".format( + self.dataset_params['proc_level'].split('_')[0][5:], + extra.lower() + ) + else: + raise ValueError('Unrecognized platform name: {}'.format(self.dataset_params['platform_name'])) + self.dataset_meta = { + "title": title, + "summary": summary, + "keywords": ', '.join(sorted(self.keywords, key=str.lower)), + "infoUrl": ' , , ' + } + + def generate_variables(self, variables, return_value=True): + """ + Generate metadata for all variables to be included in ERDDAP. Pulls some extra metadata from self.EXTRA_METADATA + JSON file (list of IOOS categories, colorbar settings, etc.). + + :param variables: dictionary of variable information + :param return_value: boolean, whether to return the resulting variable list (saved to self.dataset_variables regardless) + :return: + + .. warning:: This cannot currently be run with the variable list produced by BaseGenerator.get_variables(), only the output from self.merge_ST_variables() + """ + with open(self.EXTRA_METADATA) as f: + extra_meta = json.load(f) + var_list = [] + for x in variables: + standard = True + destination_name = '_'.join(x['name'].split('.')) + # 'sampling_day' and 'time' are unique special cases + if x['name'] == 'sampling_day': + extra_attributes = { + 'long_name': 'Sampling Day', + 'ioos_category': 'Identifier', + 'cf_role': 'timeseries_id' + } + data_type = 'String' + standard = False + elif x['name'] == 'time': + extra_attributes = { + 'ioos_category': 'Time', + '_ChunkSizes': 'null' + } + data_type = 'double' + standard = False + else: + # Start with default extra attributes (work for most variables) and replace as necessary + extra_attributes = { + 'long_name': space_and_capitalize(x['name']).split('.')[1], + 'ioos_category': 'Unknown', + '_ChunkSizes': 'null' + } + # Special cases for long_name + if 'standard_name' in x: + # variables with a standard name specified in Sensor Tracker + extra_attributes['standard_name'] = x['standard_name'] + extra_attributes['long_name'] = space_and_capitalize(x['standard_name']) + standard = False + elif x['instrument'] == 'GPRMC': + # master latitude and longitude variables (recorded by GPRMC) + if x['identifier'] in ['LAT', 'latitude']: + destination_name = 'Latitude' + extra_attributes['long_name'] = 'Latitude' + standard = False + elif x['identifier'] in ['LON', 'longitude']: + destination_name = 'Longitude' + extra_attributes['long_name'] = 'Longitude' + standard = False + # IOOS categories specified in supplemental metadata JSON + for cat in extra_meta['ioos_category']: + if cat == 'Location': + # doing this category differently makes the JSON file WAY shorter + # latitude, longitude, and i_depth are present on all instruments at level 2+ + if x['identifier'] in extra_meta['ioos_category'][cat]: + extra_attributes['ioos_category'] = cat + break + else: + # all other categories need the full variable name in the JSON file + # TODO: Possible future task, consider making this more general? May or may not be worth it. + # (e.g. everything with "salinity" in sensor name -> ioos_category = Salinity) + if x['name'] in extra_meta['ioos_category'][cat]: + extra_attributes['ioos_category'] = cat + break + # Any extra info from supplemental metadata JSON + if x['name'] in extra_meta['variables']: + extra_attributes.update(extra_meta['variables'][x['name']]) + # Variable data type (most are 'double', but some specific ones are 'String') + string_variable = x['name'].endswith("_wavelengths") or x['name'].endswith("_wavelengths_data") # comma-separated lists of values for spectral measurements + string_variable = string_variable or x['identifier'] in self.string_sensors + string_variable = string_variable or (x['instrument'] == 'GPRMC' and x['identifier'] in self.GPRMC_Strings) + if string_variable: + data_type = 'String' + else: + data_type = 'double' + # Add variable to list + temp = { + "destinationName": destination_name, + "dataType": data_type, + "addAttributes": extra_attributes + } + if standard: + temp['standard'] = True + if x['name'] == 'sampling_day' or x['name'] == 'time': + temp['sourceName'] = x['name'] + else: + temp['instrument'] = x['instrument'] + temp['sensor'] = x['identifier'] + var_list.append(temp) + # Add dataset keywords as necessary + self.keywords.add(x['name']) + if x['identifier']: + self.keywords.update(x['identifier'].split('_')) + if 'standard_name' in extra_attributes: + self.keywords.update(extra_attributes['standard_name'].split('_')) + self.dataset_variables = var_list + if return_value: + return var_list + + +def space_and_capitalize(text, sep='_'): + """Replace `sep` in string `text` with space, and capitalize the first letter after each separator""" + temp = text.split(sep) + fixed = [] + for i in temp: + fixed.append(i[0].upper() + i[1:]) + return ' '.join(fixed) diff --git a/ERDDAP_config_tools/generator.py b/ERDDAP_config_tools/generator.py index 01ef9f2f271227ebc0101412cf2a1f6984830f19..eee8df369b00183da6ac1995260cd97010a45eb9 100644 --- a/ERDDAP_config_tools/generator.py +++ b/ERDDAP_config_tools/generator.py @@ -6,11 +6,91 @@ import json import netCDF4 from jinja2 import Environment, select_autoescape, FileSystemLoader + DATASETS_NAME = "datasets.xml" CURRENT_PATH = os.path.abspath(__file__) +class NCVariableReader: + """ + Class for reading variable information from a NetCDF file + + :param netcdf_file_path: path to a NetCDF file + """ + def __init__(self, netcdf_file_path): + self.dataset_path = netcdf_file_path + self._dataset = None + self._variable_list = [] + + @property + def dataset(self): + if self._dataset is None: + self._dataset = netCDF4.Dataset(self.dataset_path) + return self._dataset + + def get_variables(self): + for key in self.dataset.variables.keys(): + var = self.dataset[key] + if hasattr(var, "units"): + self._variable_list.append((key, var.units)) + else: + self._variable_list.append((key, None)) + return self._variable_list + + +class BaseGenerator: + """ + Basic ERDDAP config generator class with common properties and methods. + + Any classes inheriting from this need to re-assign the TEMPLATE_NAME property and call super().__init__() in their + __init__() function. + """ + TEMPLATE_NAME = "simple_slocum.xml" + + def __init__(self): + self._env = None + self._jinja_env = None + self._template_path = None + self.dataset_setting_variable = None + self.dataset_variables = None + self.dataset_meta = None + + @staticmethod + def get_variables(sensor_tracker_variable_list, data_file_path): + """ + Return variables from data file that are present in `sensor_tracker_variable_list` + + :param sensor_tracker_variable_list: list of variable names (as strings) + :param data_file_path: full path to a NetCDF file for the dataset + """ + new_variable_list = [] + nc_variables = NCVariableReader(data_file_path).get_variables() + for var, unit in nc_variables: + if var in sensor_tracker_variable_list: + new_variable_list.append((var, unit)) + return new_variable_list + + @property + def env(self): + if self._jinja_env is None: + self._jinja_env = Environment( + loader=FileSystemLoader(self.template_path), + autoescape=select_autoescape(['html', 'xml']), + trim_blocks=True, + lstrip_blocks=True + ) + if self._env is None: + self._env = self._jinja_env.get_template(self.TEMPLATE_NAME) + return self._env + + @property + def template_path(self): + if self._template_path is None: + self._template_path = os.path.join(os.path.dirname(CURRENT_PATH), 'templates') + return self._template_path + + def generate_datasets(path, platform_name, start_time, mission_id, remote_dir, **kwargs): """ :param path: the path of the sample nc file diff --git a/ERDDAP_config_tools/simple_generator.py b/ERDDAP_config_tools/simple_generator.py index a357b7a46aa34d03ae92cbca1c031a7339d4e2aa..236e5889605edbd0dbbd8e5de5fb4674e4ccd1ab 100644 --- a/ERDDAP_config_tools/simple_generator.py +++ b/ERDDAP_config_tools/simple_generator.py @@ -1,33 +1,11 @@ import os -import netCDF4 from jinja2 import Environment, select_autoescape, FileSystemLoader from app_common.utilities import IDGenerator -CURRENT_PATH = os.path.abspath(__file__) - +from .generator import NCVariableReader -class NCVariableReader: - def __init__(self, netcdf_file_path): - self.dataset_path = netcdf_file_path - self._dataset = None - self._variable_list = [] - - @property - def dataset(self): - if self._dataset is None: - self._dataset = netCDF4.Dataset(self.dataset_path) - return self._dataset - - def get_variables(self): - - for key in self.dataset.variables.keys(): - var = self.dataset[key] - if hasattr(var, "units"): - self._variable_list.append((key, var.units)) - else: - self._variable_list.append((key, None)) - return self._variable_list +CURRENT_PATH = os.path.abspath(__file__) class SimpleErrdapConfigGenerator: diff --git a/ERDDAP_config_tools/templates/bbts.json b/ERDDAP_config_tools/templates/bbts.json new file mode 100644 index 0000000000000000000000000000000000000000..f73f68346a0a12e99b04f93f6ba5d183244a2c69 --- /dev/null +++ b/ERDDAP_config_tools/templates/bbts.json @@ -0,0 +1,132 @@ +{ + "ioos_category": { + "Currents": [ + "GPRMC.platform_speed_wrt_ground", + "GPRMC.Speed" + ], + "Dissolved O2": [ + "SATCTD7229.mole_concentration_of_dissolved_molecular_oxygen_in_sea_water", + "SATCTD7229.O2", + "SATCTD7229.O2_volts", + "SATCTD7229.percent_saturation_of_oxygen_in_sea_water" + ], + "Location": [ + "LAT", + "latitude", + "LON", + "longitude", + "i_depth", + "weighted_spectral_depth", + "b_depth" + ], + "Meteorology": [ + "SATVSF0113.REVISION" + ], + "Ocean Color": [ + "SATFL30338.fluorescence_chlorophyll_blue_to_red", + "SATFL30338.fluorescence_chlorophyll_red_to_far_red" + ], + "Optical Properties": [ + "SATBF20129.FLUOR", + "SATFL30338.fluorescence_cdom_uv_to_blue" + ], + "Pressure": [ + "SATACS0020.ACS_PRESSURE" + ], + "Salinity": [ + "SATCTD7229.SALINITY", + "SATCTD7229.sea_water_absolute_salinity", + "SATCTD7229.sea_water_density", + "SATCTD7229.sea_water_electrical_conductivity", + "SATCTD7229.sea_water_ph_reported_on_total_scale", + "SATCTD7229.sea_water_practical_salinity", + "SATCTD7229.sea_water_salinity", + "SATMPR0078.sea_water_absolute_salinity", + "SATMPR0078.sea_water_density", + "SATMPR0078.sea_water_electrical_conductivity", + "SATMPR0078.sea_water_practical_salinity" + ], + "Statistics": [ + "SATVSF0113.FRAME_COUNT" + ], + "Time": [ + "time", + "SATACS0020.ACS_TIME", + "SATHPE0210.integration_time", + "SATHPL0193.integration_time", + "SATHSE0211.integration_time" + ], + "Temperature": [ + "SATCTD7229.sea_water_conservative_temperature", + "SATCTD7229.sea_water_temperature", + "SATMPR0078.sea_water_conservative_temperature", + "SATMPR0078.sea_water_temperature" + ] + }, + "variables": { + "SATCTD7229.percent_saturation_of_oxygen_in_sea_water": { + "colorBarMinimum": 0, + "colorBarMaximum": 100 + }, + "SATVSF0113.FRAME_COUNT": { + "colorBarMinimum": 0, + "colorBarMaximum": 100 + }, + "SATFL30338.fluorescence_chlorophyll_blue_to_red": { + "colorBarMinimum": 0.03, + "colorBarMaximum": 30, + "colorBarScale": "Log" + }, + "SATFL30338.fluorescence_chlorophyll_red_to_far_red": { + "colorBarMinimum": 0.03, + "colorBarMaximum": 30, + "colorBarScale": "Log" + }, + "SATCTD7229.SALINITY": { + "colorBarMinimum": 32, + "colorBarMaximum": 37, + "units": "PSU" + }, + "SATCTD7229.sea_water_absolute_salinity": { + "colorBarMinimum": 32, + "colorBarMaximum": 37, + "units": "PSU" + }, + "SATCTD7229.sea_water_density": { + "colorBarMinimum": 20, + "colorBarMaximum": 28 + }, + "SATCTD7229.sea_water_electrical_conductivity": { + "colorBarMinimum": 30, + "colorBarMaximum": 40 + }, + "SATCTD7229.sea_water_practical_salinity": { + "colorBarMinimum": 32, + "colorBarMaximum": 37, + "units": "PSU" + }, + "SATCTD7229.sea_water_salinity": { + "colorBarMinimum": 32, + "colorBarMaximum": 37, + "units": "PSU" + }, + "SATMPR0078.sea_water_absolute_salinity": { + "colorBarMinimum": 32, + "colorBarMaximum": 37, + "units": "PSU" + }, + "SATMPR0078.sea_water_density": { + "colorBarMinimum": 20, + "colorBarMaximum": 28 + }, + "SATMPR0078.sea_water_electrical_conductivity": { + "colorBarMinimum": 30, + "colorBarMaximum": 40 + }, + "SATMPR0078.sea_water_practical_salinity": { + "colorBarMinimum": 32, + "colorBarMaximum": 37, + "units": "PSU" + } + } +} diff --git a/ERDDAP_config_tools/templates/bbts.xml b/ERDDAP_config_tools/templates/bbts.xml new file mode 100644 index 0000000000000000000000000000000000000000..1e168ac584aa2e19e67ce8c2f1a9d737e88c900a --- /dev/null +++ b/ERDDAP_config_tools/templates/bbts.xml @@ -0,0 +1,53 @@ +<dataset type="EDDTableFromMultidimNcFiles" datasetID="{{ datasetID }}" active="true"> + <reloadEveryNMinutes>{{ reloadEveryNMinutes }}</reloadEveryNMinutes> + <updateEveryNMillis>{{ updateEveryNMillis }}</updateEveryNMillis> + <fileDir>{{ fileDir }}</fileDir> + <fileNameRegex>{{ file_prefix }}_.*\.nc</fileNameRegex> + <recursive>true</recursive> + <pathRegex>.*</pathRegex> + <metadataFrom>last</metadataFrom> + <preExtractRegex>^[A-Za-z0-9]*_{{ platform_name }}_</preExtractRegex> + <postExtractRegex>_d_A[A-F]_{{ proc_level }}\.nc$</postExtractRegex> + <extractRegex>.*</extractRegex> + <columnNameForExtract>sampling_day</columnNameForExtract> + <removeMVRows>{{ remove_mv_rows }}</removeMVRows> + <sortFilesBySourceNames>sampling_day</sortFilesBySourceNames> + <fileTableInMemory>false</fileTableInMemory> + <accessibleViaFiles>true</accessibleViaFiles> + + + + <addAttributes> + <att name="Conventions">CF-1.6, COARDS, ACDD-1.3</att> + <att name="history">Observational data from an undersea listening station</att> + <att name="infoUrl">{{ infoUrl }}</att> + <att name="keywords">{{ keywords }}</att> + <att name="Metadata_Conventions">null</att> + <att name="sourceUrl">(local files)</att> + <att name="summary">{{ summary }}</att> + <att name="title">{{ title }}</att> + <att name="cdm_timeseries_variables">latitude, longitude</att> + </addAttributes> + {% for item in deployment_variables %} + <dataVariable> + {% if item.sourceName %} + <sourceName>{{ item.sourceName }}</sourceName> + {% else %} + <sourceName>{{ item.instrument }}\.{{ item.sensor }}</sourceName> + {% endif %} + <destinationName>{{ item.destinationName }}</destinationName> + <dataType>{{ item.dataType }}</dataType> + <addAttributes> + {% if item.addAttributes %} + {% for key, value in item.addAttributes.items() %} + {% if key == "long_name" and item.standard %} + <att name="{{ key }}">{{ item.instrument }}\.{{ value }}</att> + {% else %} + <att name="{{ key }}">{{ value }}</att> + {% endif %} + {% endfor %} + {% endif %} + </addAttributes> + </dataVariable> + {% endfor %} +</dataset> diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..755181e4159a07fd851023fc16f22993e2d116c1 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1 @@ +recursive-include ERDDAP_config_tools * diff --git a/environment.yml b/environment.yml index 86c1f6ba167be842ef39851b0b36e89fcf2c3a9c..942b86aa0a6210aaa733c34a88619a949fcc4a9a 100644 --- a/environment.yml +++ b/environment.yml @@ -5,6 +5,6 @@ dependencies: - python - netCDF4 - jinja2 - - pip: - git+ssh://git@gitlab.oceantrack.org/ceotr/app_common.git + diff --git a/setup.py b/setup.py index 8400fedd43a07726f29a7715865d577e8836be48..b9c7493b3cc125163bba0120eba3aa3b7f8a832d 100644 --- a/setup.py +++ b/setup.py @@ -5,9 +5,13 @@ setup(name="ST_ERDDAP_gen", description="Generate ERDDAP datasets.xml configs based on NetCDF files and Sensor Tracker metadata.", author="Xiang Ling", author_email="", - url="https://gitlab.oceantrack.org/otn-glider-group/ST_ERDDAP_gen", + url="https://gitlab.oceantrack.org/ceotr/gliders/ST_ERDDAP_gen", packages=find_packages(exclude=['tests']), python_requires='>=3.5', - install_requires=[], + install_requires=[ + "netCDF4", + "jinja2" + ], + include_package_data=True, zip_safe=True )