Commit af5a1edc authored by Alex Nunes's avatar Alex Nunes

Merge branch 'master' into 'dev'

Sim tool to dev

See merge request otndc/resonate!21
parents bf7838a3 e48d44eb
......@@ -4,10 +4,34 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [1.0.3]
### Added
### Fixed
- Correct latitudes and longitudes are now coming back from `compressed_detections `
### Changed
- All analysis and documentation now uses seconds and not minutes as a timedelta unit
## [1.0.2]
### Changed
- All analysis and documentation now uses seconds and not minutes as a timedelta unit
## [1.0.1]
### Fixed
- Minor bug and install fixes
### Changed
- Visual Timeline is now a Plotly Animation
## [1.0.0]
### Removed
\ No newline at end of file
### Added
- REI tool added
- Distance Filter
- Velocity Filer
- Updated Documentation
### Changed
- Visual Timeline is now a Plotly Animation
......@@ -45,7 +45,7 @@ OTN has developed a tool which will assist with filtering false detections. The
This is a very simple tool. It will take an input file of detections and based on an input parameter will identify suspect detections. The suspect detections will be put into a dataframe which the user can examine. There will be enough information for each suspect detection for the user to understand why it was flagged. There is also enough information to be able to reference the detection in the original file if the user wants to see what was happening at the same time.
The input parameter is a time in minutes. We used 60 minutes as the default as this is what was used in Easton's code. This value can be changed by the user. The output contains a record for each detection for which there has been more than xx minutes since the previous detection (of that tag/animal) and more than the same amount of time until the next detection. It ignores which receiver the detection occurred at. That is all it does, nothing more and nothing less.
The input parameter is a time in minutes. We used 3600 seconds as the default as this is what was used in Easton's code. This value can be changed by the user. The output contains a record for each detection for which there has been more than xx minutes since the previous detection (of that tag/animal) and more than the same amount of time until the next detection. It ignores which receiver the detection occurred at. That is all it does, nothing more and nothing less.
Distance Matrix
......
......@@ -59,7 +59,7 @@ OTN has developed a tool which will assist with filtering false detections. The
This is a very simple tool. It will take an input file of detections and based on an input parameter will identify suspect detections. The suspect detections will be put into a dataframe which the user can examine. There will be enough information for each suspect detection for the user to understand why it was flagged. There is also enough information to be able to reference the detection in the original file if the user wants to see what was happening at the same time.
The input parameter is a time in minutes. We used 60 minutes as the default as this is what was used in Easton's code. This value can be changed by the user. The output contains a record for each detection for which there has been more than xx minutes since the previous detection (of that tag/animal) and more than the same amount of time until the next detection. It ignores which receiver the detection occurred at. That is all it does, nothing more and nothing less.
The input parameter is a time in minutes. We used 3600 seconds as the default as this is what was used in Easton's code. This value can be changed by the user. The output contains a record for each detection for which there has been more than xx minutes since the previous detection (of that tag/animal) and more than the same amount of time until the next detection. It ignores which receiver the detection occurred at. That is all it does, nothing more and nothing less.
Distance Matrix
......
#!/bin/bash
twine upload --repository-url https://test.pypi.org/legacy/ dist/*;
twine upload dist/*;
python setup.py sdist
twine upload --skip-existing --repository-url https://test.pypi.org/legacy/ dist/*;
twine upload --skip-existing dist/*;
rm -f ~/anaconda3/conda-bld/osx-64/resonate*;
conda-build conda.recipe &&
rm -rf conda-dist &&
......
package:
name: resonate
version: "1.0.0"
version: "1.0.3"
source:
git_rev: master
......
......@@ -11,15 +11,18 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from nbconvert import RSTExporter
import os
import sys
import nbformat
from nbconvert import RSTExporter
rst_exporter = RSTExporter()
for file in os.listdir("../py_notebooks"):
if file.endswith(".ipynb"):
rst, rest = rst_exporter.from_notebook_node(nbformat.read('../py_notebooks/'+file, as_version=4), wrap ='preserve')
f=open('../docs/notebooks/'+str(file)+'.rst', 'w')
rst, rest = rst_exporter.from_notebook_node(nbformat.read(
'../py_notebooks/' + file, as_version=4), wrap='preserve')
f = open('../docs/notebooks/' + str(file) + '.rst', 'w')
rst = rst.replace('raw-latex', 'math')
rst = rst.replace('$', '')
rst = rst.replace('ipython2', 'python')
......@@ -33,7 +36,8 @@ for file in os.listdir("../py_notebooks"):
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath('.')), 'resonate'))
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.abspath('.')), 'resonate'))
def setup(app):
......@@ -72,9 +76,9 @@ copyright = u'2017 Ocean Tracking Network. All Rights Reserved.'
# built documents.
#
# The short X.Y version.
version = 'v1.0.0'
version = 'v1.0.3'
# The full version, including alpha/beta/rc tags.
release = 'v1.0.0'
release = 'v1.0.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
......@@ -147,7 +151,6 @@ html_favicon = '_static/favicon.ico'
html_static_path = ['_static']
# html_context = {
# 'css_files': [
# '_static/full_width_resonate.css', # override wide tables in RTD theme
......@@ -201,21 +204,21 @@ htmlhelp_basename = 'resonate-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'resonate.tex', u'resonate Documentation',
u'Kenneth Reitz', 'manual'),
('index', 'resonate.tex', u'resonate Documentation',
u'Kenneth Reitz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
......@@ -258,9 +261,9 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'resonate', u'resonate Documentation',
u'Kenneth Reitz', 'resonate', 'One line description of project.',
'Miscellaneous'),
('index', 'resonate', u'resonate Documentation',
u'Kenneth Reitz', 'resonate', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
......
......@@ -66,7 +66,7 @@ OTN has developed a tool which will assist with filtering false detections. The
This is a very simple tool. It will take an input file of detections and based on an input parameter will identify suspect detections. The suspect detections will be put into a dataframe which the user can examine. There will be enough information for each suspect detection for the user to understand why it was flagged. There is also enough information to be able to reference the detection in the original file if the user wants to see what was happening at the same time.
The input parameter is a time in minutes. We used 60 minutes as the default as this is what was used in Easton's code. This value can be changed by the user. The output contains a record for each detection for which there has been more than xx minutes since the previous detection (of that tag/animal) and more than the same amount of time until the next detection. It ignores which receiver the detection occurred at. That is all it does, nothing more and nothing less. Details are in :ref:`Filter Tool <filter_page>`.
The input parameter is a time in minutes. We used 3600 seconds as the default as this is what was used in Easton's code. This value can be changed by the user. The output contains a record for each detection for which there has been more than xx minutes since the previous detection (of that tag/animal) and more than the same amount of time until the next detection. It ignores which receiver the detection occurred at. That is all it does, nothing more and nothing less. Details are in :ref:`Filter Tool <filter_page>`.
Two other filtering tools are available as well, one based on distance alone and one based on velocity. They can be found at :ref:`Filter Tools <filter_page>` as well.
......
......@@ -15,7 +15,7 @@ The function returns a dataframe which you can use to help identify
animal cohorts. The cohort is created from the compressed data that is a
result from the ``compress_detections()`` function. Pass the compressed
dataframe into the ``cohort()`` function along with a time interval in
minutes (default is 60) to create the cohort dataframe.
seconds (default is 3600) to create the cohort dataframe.
.. warning::
......@@ -28,7 +28,7 @@ minutes (default is 60) to create the cohort dataframe.
from resonate.compress import compress_detections
import pandas as pd
time_interval = 60
time_interval = 3600 # in seconds
data = pd.read_csv('/path/to/detections.csv')
......
......@@ -25,15 +25,15 @@ the user to understand why it was flagged. There is also enough
information to be able to reference the detection in the original file
if the user wants to see what was happening at the same time.
The input parameter is a time in minutes. We used 60 minutes as the
The input parameter is a time in seconds. We used 3600 seconds as the
default as this is what was used in Easton’s code. This value can be
changed by the user. The output contains a record for each detection for
which there has been more than xx minutes since the previous detection
which there has been more than xx seconds since the previous detection
(of that tag/animal) and more than the same amount of time until the
next detection. It ignores which receiver the detection occurred at.
That is all it does, nothing more and nothing less.
Below the interval is set to 60 minutes and is not using a a user
Below the interval is set to 3600 seconds and is not using a a user
specified suspect file. The function will also create a distance matrix.
.. warning::
......@@ -48,7 +48,7 @@ specified suspect file. The function will also create a distance matrix.
detections = pd.read_csv('/path/to/detections.csv')
time_interval = 60 # in Minutes
time_interval = 3600 # in seconds
SuspectFile = None
......
......@@ -68,3 +68,4 @@ file to a desired location.
.. code:: python
interval.to_csv('/path/to/output.csv', index=False)
......@@ -10,8 +10,10 @@ create a context for the detections. Both the amount of unique tags and
number of species are taken into consideration in the calculation.
The receiver efficiency index implement is implemented based on the
paper [paper place holder]. Each receiver’s index is calculated on the
formula of:
paper `Acoustic telemetry array evolution: From species- and
project-specific designs to large-scale, multispecies, cooperative
networks <https://doi.org/10.1016/j.fishres.2018.09.015>`__. Each
receiver’s index is calculated on the formula of:
.. container:: large-math
......
......@@ -10,7 +10,7 @@
"\n",
"The tool takes a dataframe of compressed detections and a time parameter in minutes. It identifies groups of animals traveling together. Each station an animal visits is checked for other animals detected there within the specified time period. \n",
"\n",
"The function returns a dataframe which you can use to help identify animal cohorts. The cohort is created from the compressed data that is a result from the `compress_detections()` function. Pass the compressed dataframe into the `cohort()` function along with a time interval in minutes (default is 60) to create the cohort dataframe.\n",
"The function returns a dataframe which you can use to help identify animal cohorts. The cohort is created from the compressed data that is a result from the `compress_detections()` function. Pass the compressed dataframe into the `cohort()` function along with a time interval in seconds (default is 3600) to create the cohort dataframe.\n",
"\n",
"<span style=\"color:red\">Warning:</span> \n",
"\n",
......@@ -30,7 +30,7 @@
"from resonate.compress import compress_detections\n",
"import pandas as pd\n",
"\n",
"time_interval = 60\n",
"time_interval = 3600 # in seconds\n",
"\n",
"data = pd.read_csv('/path/to/detections.csv')\n",
"\n",
......@@ -63,23 +63,23 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"display_name": "Python [conda env:anaconda3]",
"language": "python",
"name": "python2"
"name": "conda-env-anaconda3-py"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.13"
"pygments_lexer": "ipython3",
"version": "3.6.6"
}
},
"nbformat": 4,
"nbformat_minor": 1
"nbformat_minor": 2
}
......@@ -15,9 +15,9 @@
"\n",
"This is a very simple tool. It will take an input file of detections and based on an input parameter will identify suspect detections. The suspect detections will be put into a dataframe which the user can examine. There will be enough information for each suspect detection for the user to understand why it was flagged. There is also enough information to be able to reference the detection in the original file if the user wants to see what was happening at the same time.\n",
"\n",
"The input parameter is a time in minutes. We used 60 minutes as the default as this is what was used in Easton's code. This value can be changed by the user. The output contains a record for each detection for which there has been more than xx minutes since the previous detection (of that tag/animal) and more than the same amount of time until the next detection. It ignores which receiver the detection occurred at. That is all it does, nothing more and nothing less.\n",
"The input parameter is a time in seconds. We used 3600 seconds as the default as this is what was used in Easton's code. This value can be changed by the user. The output contains a record for each detection for which there has been more than xx seconds since the previous detection (of that tag/animal) and more than the same amount of time until the next detection. It ignores which receiver the detection occurred at. That is all it does, nothing more and nothing less.\n",
"\n",
"Below the interval is set to 60 minutes and is not using a a user specified suspect file. The function will also create a distance matrix.\n",
"Below the interval is set to 3600 seconds and is not using a a user specified suspect file. The function will also create a distance matrix.\n",
"\n",
"<span style=\"color:red\">Warning:</span> \n",
"\n",
......@@ -36,7 +36,7 @@
"\n",
"detections = pd.read_csv('/path/to/detections.csv')\n",
"\n",
"time_interval = 60 # in Minutes\n",
"time_interval = 3600 # in seconds\n",
"\n",
"SuspectFile = None\n",
"\n",
......@@ -211,5 +211,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 1
"nbformat_minor": 2
}
......@@ -10,7 +10,7 @@
"The function takes a set detections and a deployment history of the receivers to create a context for the detections. Both the amount of unique tags and number of species are taken into \n",
"consideration in the calculation.\n",
"\n",
"The receiver efficiency index implement is implemented based on the paper [paper place holder]. Each receiver's index is calculated on the formula of:\n",
"The receiver efficiency index implement is implemented based on the paper [Acoustic telemetry array evolution: From species- and project-specific designs to large-scale, multispecies, cooperative networks](https://doi.org/10.1016/j.fishres.2018.09.015). Each receiver's index is calculated on the formula of:\n",
"\n",
"\n",
"<br/>\n",
......@@ -66,7 +66,7 @@
"metadata": {
"anaconda-cloud": {},
"kernelspec": {
"display_name": "Python [default]",
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
......@@ -80,7 +80,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
"version": "3.6.7"
},
"varInspector": {
"cols": {
......
......@@ -386,7 +386,7 @@
"metadata": {
"anaconda-cloud": {},
"kernelspec": {
"display_name": "Python [default]",
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
......@@ -400,7 +400,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
"version": "3.6.7"
},
"varInspector": {
"cols": {
......@@ -433,5 +433,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 1
"nbformat_minor": 2
}
__author__ = "Alex Nunes"
__credits__ = ["Alex Nunes", "Jon Pye", "Brian Jones", "Marta Mihoff"]
__license__ = "GPL"
__version__ = "1.0.0"
__version__ = "1.0.3"
__maintainer__ = "Alex Nunes"
__email__ = "anunes@dal.ca"
__status__ = "Production"
import pandas as pd
def cohort(compressed_df, interval_time=60):
def cohort(compressed_df, interval_time=3600):
"""
Creates a dataframe of cohorts using a compressed detection file
:param compressed_df: compressed dataframe
:param interval_time: cohort detection time interval (in minutes)
:param interval_time: cohort detection time interval (in seconds)
:return: cohort dataframe with the following columns
* anml_1
......@@ -23,7 +23,7 @@ def cohort(compressed_df, interval_time=60):
"""
# Convert input int interval_time into a timedelta object
interval_time = pd.to_timedelta(interval_time, unit='m')
interval_time = pd.to_timedelta(interval_time, unit='s')
# Sort input compressed data file
cmps = compressed_df.sort_values(['catalognumber', 'seq_num'])
......
......@@ -21,7 +21,7 @@ def compress_detections(detections, timefilter=3600):
['datecollected', 'catalognumber', 'unqdetecid', 'latitude', 'longitude'])
if mandatory_columns.issubset(detections.columns):
stations = detections.groupby('catalognumber').agg(
stations = detections.groupby('station').agg(
'mean')[['latitude', 'longitude']].reset_index()
# Get unique list of animals (not tags), set indices to respect animal and date of detections
......@@ -70,7 +70,7 @@ def compress_detections(detections, timefilter=3600):
out_df = out_df[['catalognumber', 'station', 'seq_num']].drop_duplicates(
).merge(stat_df, on=['catalognumber', 'seq_num'])
out_df = out_df.merge(stations, on='catalognumber')
out_df = out_df.merge(stations, on='station')
return out_df
else:
......
This diff is collapsed.
......@@ -12,7 +12,7 @@ with open('LICENSE') as f:
setup(
name='resonATe',
version='1.0.0',
version='1.0.3',
description='resonate data analysis package',
long_description=readme,
author='Alex Nunes',
......
# -*- coding: utf-8 -*-
from resonate.cohorts import cohort
from resonate.compress import compress_detections
import unittest
import pandas as pd
import pandas.testing as pt
from colorama import Fore as c
from resonate.cohorts import cohort
from resonate.compress import compress_detections
class CohortTest(unittest.TestCase):
def test_cohort(self):
print(c.YELLOW+'Testing Cohort...'+c.RESET)
compressed = compress_detections(pd.read_csv('tests/assertion_files/nsbs.csv'))
dfa = cohort(compressed, 60)
print(c.YELLOW + 'Testing Cohort...' + c.RESET)
compressed = compress_detections(
pd.read_csv('tests/assertion_files/nsbs.csv'))
dfa = cohort(compressed, 3600)
dfb = pd.read_csv('tests/assertion_files/nsbs_cohort_60min.csv')
dfb.anml_2_arrive = pd.to_datetime(dfb.anml_2_arrive)
dfb.anml_2_depart = pd.to_datetime(dfb.anml_2_depart)
pt.assert_frame_equal(dfa, dfb)
print(c.GREEN+'OK!\n'+c.RESET)
print(c.GREEN + 'OK!\n' + c.RESET)
if __name__ == '__main__':
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment